Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,37 @@
#
# Makefile for arch/parisc/kernel
#
extra-y := head.o vmlinux.lds
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
topology.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_cache.o = -pg
CFLAGS_REMOVE_irq.o = -pg
CFLAGS_REMOVE_pacache.o = -pg
CFLAGS_REMOVE_perf.o = -pg
CFLAGS_REMOVE_traps.o = -pg
CFLAGS_REMOVE_unaligned.o = -pg
CFLAGS_REMOVE_unwind.o = -pg
endif
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
obj-$(CONFIG_STACKTRACE)+= stacktrace.o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o

View file

@ -0,0 +1,301 @@
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/uaccess.h>
#ifdef CONFIG_64BIT
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
#define FRAME_ALIGN 64
/* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
*/
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
/* TASK_SZ_ALGN includes space for a stack frame. */
DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
/* PT_SZ_ALGN includes space for a stack frame. */
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
/* THREAD_SZ_ALGN includes space for a stack frame. */
DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
BLANK();
DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
BLANK();
return 0;
}

View file

@ -0,0 +1,81 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_COMPAT
if (arch == AUDIT_ARCH_PARISC)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_COMPAT
extern int parisc32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_PARISC)
return parisc32_classify_syscall(syscall);
#endif
switch (syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
extern __u32 parisc32_dir_class[];
extern __u32 parisc32_write_class[];
extern __u32 parisc32_read_class[];
extern __u32 parisc32_chattr_class[];
extern __u32 parisc32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);

View file

@ -0,0 +1,104 @@
/*
* Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
*
* Copyright (C) 2000 John Marvin
* Copyright (C) 2000 Hewlett Packard Co.
*
* Heavily inspired from various other efforts to do the same thing
* (ia64,sparc64/mips64)
*/
/* Make sure include/asm-parisc/elf.h does the right thing */
#define ELF_CLASS ELFCLASS32
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
{ int i; \
for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
} \
dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
typedef unsigned int elf_greg_t;
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h> /* struct compat_timeval */
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define init_elf_binfmt init_elf32_binfmt
#define ELF_PLATFORM ("PARISC32\0")
/*
* We should probably use this macro to set a flag somewhere to indicate
* this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
* could set a processor dependent flag in the thread_struct.
*/
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex) \
set_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
#include "../../../fs/binfmt_elf.c"

584
arch/parisc/kernel/cache.c Normal file
View file

@ -0,0 +1,584 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
* Copyright (C) 1999 SuSE GmbH Nuernberg
* Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
*
* Cache and TLB management
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/shmparam.h>
int split_tlb __read_mostly;
int dcache_stride __read_mostly;
int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We put a spinlock around all TLB flushes to
* ensure this.
*/
DEFINE_SPINLOCK(pa_tlb_lock);
struct pdc_cache_info cache_info __read_mostly;
#ifndef CONFIG_PA20
static struct pdc_btlb_info btlb_info __read_mostly;
#endif
#ifdef CONFIG_SMP
void
flush_data_cache(void)
{
on_each_cpu(flush_data_cache_local, NULL, 1);
}
void
flush_instruction_cache(void)
{
on_each_cpu(flush_instruction_cache_local, NULL, 1);
}
#endif
void
flush_cache_all_local(void)
{
flush_instruction_cache_local(NULL);
flush_data_cache_local(NULL);
}
EXPORT_SYMBOL(flush_cache_all_local);
/* Virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
unsigned long pfn = pte_pfn(*ptep);
struct page *page;
/* We don't have pte special. As a result, we can be called with
an invalid pfn and we don't need to flush the kernel dcache page.
This occurs with FireGL card in C8000. */
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
show_cache_info(struct seq_file *m)
{
char buf[32];
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
if (cache_info.dc_loop != 1)
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
((cache_info.dc_loop == 1) ? "direct mapped" : buf));
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
cache_info.it_size,
cache_info.dt_size,
cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
);
#ifndef CONFIG_PA20
/* BTLB - Block TLB */
if (btlb_info.max_size==0) {
seq_printf(m, "BTLB\t\t: not supported\n" );
} else {
seq_printf(m,
"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
btlb_info.max_size, (int)4096,
btlb_info.max_size>>8,
btlb_info.fixed_range_info.num_i,
btlb_info.fixed_range_info.num_d,
btlb_info.fixed_range_info.num_comb,
btlb_info.variable_range_info.num_i,
btlb_info.variable_range_info.num_d,
btlb_info.variable_range_info.num_comb
);
}
#endif
}
void __init
parisc_cache_init(void)
{
if (pdc_cache_info(&cache_info) < 0)
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
printk("ic_size %lx dc_size %lx it_size %lx\n",
cache_info.ic_size,
cache_info.dc_size,
cache_info.it_size);
printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.dc_base,
cache_info.dc_stride,
cache_info.dc_count,
cache_info.dc_loop);
printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.dc_conf),
cache_info.dc_conf.cc_alias,
cache_info.dc_conf.cc_block,
cache_info.dc_conf.cc_line,
cache_info.dc_conf.cc_shift);
printk(" wt %d sh %d cst %d hv %d\n",
cache_info.dc_conf.cc_wt,
cache_info.dc_conf.cc_sh,
cache_info.dc_conf.cc_cst,
cache_info.dc_conf.cc_hv);
printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.ic_base,
cache_info.ic_stride,
cache_info.ic_count,
cache_info.ic_loop);
printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.ic_conf),
cache_info.ic_conf.cc_alias,
cache_info.ic_conf.cc_block,
cache_info.ic_conf.cc_line,
cache_info.ic_conf.cc_shift);
printk(" wt %d sh %d cst %d hv %d\n",
cache_info.ic_conf.cc_wt,
cache_info.ic_conf.cc_sh,
cache_info.ic_conf.cc_cst,
cache_info.ic_conf.cc_hv);
printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
cache_info.dt_conf.tc_sh,
cache_info.dt_conf.tc_page,
cache_info.dt_conf.tc_cst,
cache_info.dt_conf.tc_aid,
cache_info.dt_conf.tc_pad1);
printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
cache_info.it_conf.tc_sh,
cache_info.it_conf.tc_page,
cache_info.it_conf.tc_cst,
cache_info.it_conf.tc_aid,
cache_info.it_conf.tc_pad1);
#endif
split_tlb = 0;
if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
if (cache_info.dt_conf.tc_sh == 2)
printk(KERN_WARNING "Unexpected TLB configuration. "
"Will flush I/D separately (could be optimized).\n");
split_tlb = 1;
}
/* "New and Improved" version from Jim Hull
* (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
* The following CAFL_STRIDE is an optimized version, see
* http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
* http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
*/
#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
#ifndef CONFIG_PA20
if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
#endif
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
#if 0
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
}
void disable_sr_hashing(void)
{
int srhash_type, retval;
unsigned long space_bits;
switch (boot_cpu_data.cpu_type) {
case pcx: /* We shouldn't get this far. setup.c should prevent it. */
BUG();
return;
case pcxs:
case pcxt:
case pcxt_:
srhash_type = SRHASH_PCXST;
break;
case pcxl:
srhash_type = SRHASH_PCXL;
break;
case pcxl2: /* pcxl2 doesn't support space register hashing */
return;
default: /* Currently all PA2.0 machines use the same ins. sequence */
srhash_type = SRHASH_PA20;
break;
}
disable_sr_hashing_asm(srhash_type);
retval = pdc_spaceid_bits(&space_bits);
/* If this procedure isn't implemented, don't panic. */
if (retval < 0 && retval != PDC_BAD_OPTION)
panic("pdc_spaceid_bits call failed.\n");
if (space_bits != 0)
panic("SpaceID hashing is still on!\n");
}
static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long physaddr)
{
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_icache_page_asm(physaddr, vmaddr);
preempt_enable();
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
return;
}
flush_kernel_dcache_page(page);
if (!mapping)
return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
/* The TLB is the engine of coherence on parisc: The
* CPU is entitled to speculate any page with a TLB
* mapping, so here we kill the mapping then flush the
* page along a special flush only alias mapping.
* This guarantees that the page is no-longer in the
* cache for any process and nor may it be
* speculatively read in (until the user or kernel
* specifically accesses it, of course) */
flush_tlb_page(mpnt, addr);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
__flush_cache_page(mpnt, addr, page_to_phys(page));
if (old_addr)
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
}
EXPORT_SYMBOL(flush_dcache_page);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
alltime = mfctl(16);
flush_data_cache();
alltime = mfctl(16) - alltime;
size = (unsigned long)(_end - _text);
rangetime = mfctl(16);
flush_kernel_dcache_range((unsigned long)_text, size);
rangetime = mfctl(16) - rangetime;
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
/* Racy, but if we see an intermediate value, it's ok too... */
parisc_cache_flush_threshold = size * alltime / rangetime;
parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
if (!parisc_cache_flush_threshold)
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
if (parisc_cache_flush_threshold > cache_info.dc_size)
parisc_cache_flush_threshold = cache_info.dc_size;
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(void *addr)
{
unsigned long flags;
flush_kernel_dcache_page_asm(addr);
purge_tlb_start(flags);
pdtlb_kernel(addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
/* Copy using kernel mapping. No coherency is needed (all in
kunmap) for the `to' page. However, the `from' page needs to
be flushed through a mapping equivalent to the user mapping
before it can be accessed through the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
preempt_enable();
copy_page_asm(vto, vfrom);
}
EXPORT_SYMBOL(copy_user_page);
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
/* Note: purge_tlb_entries can be called at startup with
no context. */
purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(purge_tlb_entries);
void __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
unsigned long npages;
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
unsigned long flags;
purge_tlb_start(flags);
mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
} else {
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
}
purge_tlb_end(flags);
}
}
static void cacheflush_h_tmp_function(void *dummy)
{
flush_cache_all_local();
}
void flush_cache_all(void)
{
on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long usize = 0;
for (vma = mm->mmap; vma; vma = vma->vm_next)
usize += vma->vm_end - vma->vm_start;
return usize;
}
static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
{
pte_t *ptep = NULL;
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd_t *pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
ptep = pte_offset_map(pmd, addr);
}
}
return ptep;
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
if ((vma->vm_flags & VM_EXEC) == 0)
continue;
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
pgd = mm->pgd;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
}
void
flush_user_icache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long addr;
pgd_t *pgd;
BUG_ON(!vma->vm_mm->context);
if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
return;
}
pgd = vma->vm_mm->pgd;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) {
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}

View file

@ -0,0 +1,40 @@
#include <asm/unistd.h>
unsigned int parisc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned int parisc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned int parisc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned int parisc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned int parisc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int parisc32_classify_syscall(unsigned syscall)
{
switch (syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_execve:
return 5;
default:
return 1;
}
}

View file

@ -0,0 +1,911 @@
/*
* drivers.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
* Copyright (c) 2001 Helge Deller <deller@gmx.de>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
*
* The file handles registering devices and drivers, then matching them.
* It's the closest we get to a dating agency.
*
* If you're thinking about modifying this file, here are some gotchas to
* bear in mind:
* - 715/Mirage device paths have a dummy device between Lasi and its children
* - The EISA adapter may show up as a sibling or child of Wax
* - Dino has an optionally functional serial port. If firmware enables it,
* it shows up as a child of Dino. If firmware disables it, the buswalk
* finds it and it shows up as a child of Cujo
* - Dino has both parisc and pci devices as children
* - parisc devices are discovered in a random order, including children
* before parents in some cases.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/export.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
struct hppa_dma_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
.init_name = "parisc",
};
static inline int check_dev(struct device *dev)
{
if (dev->bus == &parisc_bus_type) {
struct parisc_device *pdev;
pdev = to_parisc_device(dev);
return pdev->id.hw_type != HPHW_FAULTY;
}
return 1;
}
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath);
struct recurse_struct {
void * obj;
int (*fn)(struct device *, void *);
};
static int descend_children(struct device * dev, void * data)
{
struct recurse_struct * recurse_data = (struct recurse_struct *)data;
if (recurse_data->fn(dev, recurse_data->obj))
return 1;
else
return device_for_each_child(dev, recurse_data, descend_children);
}
/**
* for_each_padev - Iterate over all devices in the tree
* @fn: Function to call for each device.
* @data: Data to pass to the called function.
*
* This performs a depth-first traversal of the tree, calling the
* function passed for each node. It calls the function for parents
* before children.
*/
static int for_each_padev(int (*fn)(struct device *, void *), void * data)
{
struct recurse_struct recurse_data = {
.obj = data,
.fn = fn,
};
return device_for_each_child(&root, &recurse_data, descend_children);
}
/**
* match_device - Report whether this driver can handle this device
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
for (ids = driver->id_table; ids->sversion; ids++) {
if ((ids->sversion != SVERSION_ANY_ID) &&
(ids->sversion != dev->id.sversion))
continue;
if ((ids->hw_type != HWTYPE_ANY_ID) &&
(ids->hw_type != dev->id.hw_type))
continue;
if ((ids->hversion != HVERSION_ANY_ID) &&
(ids->hversion != dev->id.hversion))
continue;
return 1;
}
return 0;
}
static int parisc_driver_probe(struct device *dev)
{
int rc;
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
rc = pa_drv->probe(pa_dev);
if (!rc)
pa_dev->driver = pa_drv;
return rc;
}
static int parisc_driver_remove(struct device *dev)
{
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
if (pa_drv->remove)
pa_drv->remove(pa_dev);
return 0;
}
/**
* register_parisc_driver - Register this driver if it can handle a device
* @driver: the PA-RISC driver to try
*/
int register_parisc_driver(struct parisc_driver *driver)
{
/* FIXME: we need this because apparently the sti
* driver can be registered twice */
if(driver->drv.name) {
printk(KERN_WARNING
"BUG: skipping previously registered driver %s\n",
driver->name);
return 1;
}
if (!driver->probe) {
printk(KERN_WARNING
"BUG: driver %s has no probe routine\n",
driver->name);
return 1;
}
driver->drv.bus = &parisc_bus_type;
/* We install our own probe and remove routines */
WARN_ON(driver->drv.probe != NULL);
WARN_ON(driver->drv.remove != NULL);
driver->drv.name = driver->name;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(register_parisc_driver);
struct match_count {
struct parisc_driver * driver;
int count;
};
static int match_and_count(struct device * dev, void * data)
{
struct match_count * m = data;
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev)) {
if (match_device(m->driver, pdev))
m->count++;
}
return 0;
}
/**
* count_parisc_driver - count # of devices this driver would match
* @driver: the PA-RISC driver to try
*
* Use by IOMMU support to "guess" the right size IOPdir.
* Formula is something like memsize/(num_iommu * entry_size).
*/
int count_parisc_driver(struct parisc_driver *driver)
{
struct match_count m = {
.driver = driver,
.count = 0,
};
for_each_padev(match_and_count, &m);
return m.count;
}
/**
* unregister_parisc_driver - Unregister this driver from the list of drivers
* @driver: the PA-RISC driver to unregister
*/
int unregister_parisc_driver(struct parisc_driver *driver)
{
driver_unregister(&driver->drv);
return 0;
}
EXPORT_SYMBOL(unregister_parisc_driver);
struct find_data {
unsigned long hpa;
struct parisc_device * dev;
};
static int find_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct find_data * d = (struct find_data*)data;
if (check_dev(dev)) {
if (pdev->hpa.start == d->hpa) {
d->dev = pdev;
return 1;
}
}
return 0;
}
static struct parisc_device *find_device_by_addr(unsigned long hpa)
{
struct find_data d = {
.hpa = hpa,
};
int ret;
ret = for_each_padev(find_device, &d);
return ret ? d.dev : NULL;
}
/**
* find_pa_parent_type - Find a parent of a specific type
* @dev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
* If it finds it, it returns it. If not, it returns NULL.
*/
const struct parisc_device *
find_pa_parent_type(const struct parisc_device *padev, int type)
{
const struct device *dev = &padev->dev;
while (dev != &root) {
struct parisc_device *candidate = to_parisc_device(dev);
if (candidate->id.hw_type == type)
return candidate;
dev = dev->parent;
}
return NULL;
}
/*
* get_node_path fills in @path with the firmware path to the device.
* Note that if @node is a parisc device, we don't fill in the 'mod' field.
* This is because both callers pass the parent and fill in the mod
* themselves. If @node is a PCI device, we do fill it in, even though this
* is inconsistent.
*/
static void get_node_path(struct device *dev, struct hardware_path *path)
{
int i = 5;
memset(&path->bc, -1, 6);
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->mod = PCI_FUNC(devfn);
path->bc[i--] = PCI_SLOT(devfn);
dev = dev->parent;
}
while (dev != &root) {
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
} else if (dev->bus == &parisc_bus_type) {
path->bc[i--] = to_parisc_device(dev)->hw_path;
}
dev = dev->parent;
}
}
static char *print_hwpath(struct hardware_path *path, char *output)
{
int i;
for (i = 0; i < 6; i++) {
if (path->bc[i] == -1)
continue;
output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
}
output += sprintf(output, "%u", (unsigned char) path->mod);
return output;
}
/**
* print_pa_hwpath - Returns hardware path for PA devices
* dev: The device to return the path for
* output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pa_hwpath(struct parisc_device *dev, char *output)
{
struct hardware_path path;
get_node_path(dev->dev.parent, &path);
path.mod = dev->hw_path;
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pa_hwpath);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
/**
* get_pci_node_path - Determines the hardware path for a PCI device
* @pdev: The device to return the path for
* @path: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the hardware_path structure with the route to
* the specified PCI device. This structure is suitable for passing to
* PDC calls.
*/
void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
{
get_node_path(&pdev->dev, path);
}
EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
* dev: The device to return the path for
* output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pci_hwpath(struct pci_dev *dev, char *output)
{
struct hardware_path path;
get_pci_node_path(dev, &path);
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pci_hwpath);
#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
char name[28];
char *output = name;
int i;
get_node_path(padev->dev.parent, &path);
for (i = 0; i < 6; i++) {
if (path.bc[i] == -1)
continue;
output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
}
sprintf(output, "%u", (unsigned char) padev->hw_path);
dev_set_name(&padev->dev, name);
}
struct parisc_device * create_tree_node(char id, struct device *parent)
{
struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->hw_path = id;
dev->id.hw_type = HPHW_FAULTY;
dev->dev.parent = parent;
setup_bus_id(dev);
dev->dev.bus = &parisc_bus_type;
dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
/* make the generic dma mask a pointer to the parisc one */
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
if (device_register(&dev->dev)) {
kfree(dev);
return NULL;
}
return dev;
}
struct match_id_data {
char id;
struct parisc_device * dev;
};
static int match_by_id(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct match_id_data * d = data;
if (pdev->hw_path == d->id) {
d->dev = pdev;
return 1;
}
return 0;
}
/**
* alloc_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @id: the element of the module path for this entry
*
* Checks all the children of @parent for a matching @id. If none
* found, it allocates a new device and returns it.
*/
static struct parisc_device * alloc_tree_node(struct device *parent, char id)
{
struct match_id_data d = {
.id = id,
};
if (device_for_each_child(parent, &d, match_by_id))
return d.dev;
else
return create_tree_node(id, parent);
}
static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
}
return alloc_tree_node(parent, modpath->mod);
}
struct parisc_device *
alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
{
int status;
unsigned long bytecnt;
u8 iodc_data[32];
struct parisc_device *dev;
const char *name;
/* Check to make sure this device has not already been added - Ryan */
if (find_device_by_addr(hpa) != NULL)
return NULL;
status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
if (status != PDC_OK)
return NULL;
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
printk(KERN_ERR "Two devices have hardware path [%s]. "
"IODC data for second device: "
"%02x%02x%02x%02x%02x%02x\n"
"Rearranging GSC cards sometimes helps\n",
parisc_pathname(dev), iodc_data[0], iodc_data[1],
iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
return NULL;
}
dev->id.hw_type = iodc_data[3] & 0x1f;
dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
dev->hpa.name = parisc_pathname(dev);
dev->hpa.start = hpa;
/* This is awkward. The STI spec says that gfx devices may occupy
* 32MB or 64MB. Unfortunately, we don't know how to tell whether
* it's the former or the latter. Assumptions either way can hurt us.
*/
if (hpa == 0xf4000000 || hpa == 0xf8000000) {
dev->hpa.end = hpa + 0x03ffffff;
} else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
dev->hpa.end = hpa + 0x01ffffff;
} else {
dev->hpa.end = hpa + 0xfff;
}
dev->hpa.flags = IORESOURCE_MEM;
name = parisc_hardware_description(&dev->id);
if (name) {
strlcpy(dev->name, name, sizeof(dev->name));
}
/* Silently fail things like mouse ports which are subsumed within
* the keyboard controller
*/
if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
printk("Unable to claim HPA %lx for device %s\n",
hpa, name);
return dev;
}
static int parisc_generic_match(struct device *dev, struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
static ssize_t make_modalias(struct device *dev, char *buf)
{
const struct parisc_device *padev = to_parisc_device(dev);
const struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
}
static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct parisc_device *padev;
char modalias[40];
if (!dev)
return -ENODEV;
padev = to_parisc_device(dev);
if (!padev)
return -ENODEV;
if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
return -ENOMEM;
make_modalias(dev, modalias);
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct parisc_device *padev = to_parisc_device(dev); \
return sprintf(buf, format_string, padev->field); \
}
#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
pa_dev_attr(irq, irq, "%u\n");
pa_dev_attr_id(hw_type, "0x%02x\n");
pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
pa_dev_attr_id(hversion, "0x%03x\n");
pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return make_modalias(dev, buf);
}
static struct device_attribute parisc_device_attrs[] = {
__ATTR_RO(irq),
__ATTR_RO(hw_type),
__ATTR_RO(rev),
__ATTR_RO(hversion),
__ATTR_RO(sversion),
__ATTR_RO(modalias),
__ATTR_NULL,
};
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.uevent = parisc_uevent,
.dev_attrs = parisc_device_attrs,
.probe = parisc_driver_probe,
.remove = parisc_driver_remove,
};
/**
* register_parisc_device - Locate a driver to manage this device.
* @dev: The parisc device.
*
* Search the driver list for a driver that is willing to manage
* this device.
*/
int register_parisc_device(struct parisc_device *dev)
{
if (!dev)
return 0;
if (dev->driver)
return 1;
return 0;
}
/**
* match_pci_device - Matches a pci device against a given hardware path
* entry.
* @dev: the generic device (known to be contained by a pci_dev).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_pci_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct pci_dev *pdev = to_pci_dev(dev);
int id;
if (index == 5) {
/* we are at the end of the path, and on the actual device */
unsigned int devfn = pdev->devfn;
return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
(modpath->mod == PCI_FUNC(devfn)));
}
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
/**
* match_parisc_device - Matches a parisc device against a given hardware
* path entry.
* @dev: the generic device (known to be contained by a parisc_device).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_parisc_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct parisc_device *curr = to_parisc_device(dev);
char id = (index == 6) ? modpath->mod : modpath->bc[index];
return (curr->hw_path == id);
}
struct parse_tree_data {
int index;
struct hardware_path * modpath;
struct device * dev;
};
static int check_parent(struct device * dev, void * data)
{
struct parse_tree_data * d = data;
if (check_dev(dev)) {
if (dev->bus == &parisc_bus_type) {
if (match_parisc_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev_is_pci(dev)) {
if (match_pci_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev->bus == NULL) {
/* we are on a bus bridge */
struct device *new = parse_tree_node(dev, d->index, d->modpath);
if (new)
d->dev = new;
}
}
return d->dev != NULL;
}
/**
* parse_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @index: the current BC index
* @modpath: the hardware_path struct to match a device against
* @return: The corresponding device if found, NULL otherwise.
*
* Checks all the children of @parent for a matching @id. If none
* found, it returns NULL.
*/
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
{
struct parse_tree_data d = {
.index = index,
.modpath = modpath,
};
struct recurse_struct recurse_data = {
.obj = &d,
.fn = check_parent,
};
if (device_for_each_child(parent, &recurse_data, descend_children))
/* nothing */;
return d.dev;
}
/**
* hwpath_to_device - Finds the generic device corresponding to a given hardware path.
* @modpath: the hardware path.
* @return: The target device, NULL if not found.
*/
struct device *hwpath_to_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = parse_tree_node(parent, i, modpath);
if (!parent)
return NULL;
}
if (dev_is_pci(parent)) /* pci devices already parse MOD */
return parent;
else
return parse_tree_node(parent, 6, modpath);
}
EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
* @param dev the target device
* @param path pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
struct parisc_device *padev;
if (dev->bus == &parisc_bus_type) {
padev = to_parisc_device(dev);
get_node_path(dev->parent, path);
path->mod = padev->hw_path;
} else if (dev_is_pci(dev)) {
get_node_path(dev, path);
}
}
EXPORT_SYMBOL(device_to_hwpath);
#define BC_PORT_MASK 0x8
#define BC_LOWER_PORT 0x8
#define BUS_CONVERTER(dev) \
((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
#define IS_LOWER_PORT(dev) \
((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \
& BC_PORT_MASK) == BC_LOWER_PORT)
#define MAX_NATIVE_DEVICES 64
#define NATIVE_DEVICE_OFFSET 0x1000
#define FLEX_MASK F_EXTEND(0xfffc0000)
#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW)
#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH)
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
void walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;
if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
return;
if (dev->id.hw_type == HPHW_IOA) {
io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
} else {
io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
}
walk_native_bus(io_io_low, io_io_high, &dev->dev);
}
/**
* walk_native_bus -- Probe a bus for devices
* @io_io_low: Base address of this bus.
* @io_io_high: Last address of this bus.
* @parent: The parent bus device.
*
* A native bus (eg Runway or GSC) may have up to 64 devices on it,
* spaced at intervals of 0x1000 bytes. PDC may not inform us of these
* devices, so we have to probe for them. Unfortunately, we may find
* devices which are not physically connected (such as extra serial &
* keyboard ports). This problem is not yet solved.
*/
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent)
{
int i, devices_found = 0;
unsigned long hpa = io_io_low;
struct hardware_path path;
get_node_path(parent, &path);
do {
for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
struct parisc_device *dev;
/* Was the device already added by Firmware? */
dev = find_device_by_addr(hpa);
if (!dev) {
path.mod = i;
dev = alloc_pa_dev(hpa, &path);
if (!dev)
continue;
register_parisc_device(dev);
devices_found++;
}
walk_lower_bus(dev);
}
} while(!devices_found && hpa < io_io_high);
}
#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
/**
* walk_central_bus - Find devices attached to the central bus
*
* PDC doesn't tell us about all devices in the system. This routine
* finds devices connected to the central bus.
*/
void walk_central_bus(void)
{
walk_native_bus(CENTRAL_BUS_ADDR,
CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
&root);
}
static void print_parisc_device(struct parisc_device *dev)
{
char hw_path[64];
static int count;
print_pa_hwpath(dev, hw_path);
printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
if (dev->num_addrs) {
int k;
printk(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
printk("0x%lx ", dev->addr[k]);
}
printk("\n");
}
/**
* init_parisc_bus - Some preparation to be done before inventory
*/
void init_parisc_bus(void)
{
if (bus_register(&parisc_bus_type))
panic("Could not register PA-RISC bus type\n");
if (device_register(&root))
panic("Could not register PA-RISC root device\n");
get_device(&root);
}
static int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev))
print_parisc_device(pdev);
return 0;
}
/**
* print_parisc_devices - Print out a list of devices found in this system
*/
void print_parisc_devices(void)
{
for_each_padev(print_one_device, NULL);
}

2218
arch/parisc/kernel/entry.S Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

185
arch/parisc/kernel/ftrace.c Normal file
View file

@ -0,0 +1,185 @@
/*
* Code for tracing calls in Linux kernel.
* Copyright (C) 2009 Helge Deller <deller@gmx.de>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* future possible enhancements:
* - add CONFIG_DYNAMIC_FTRACE
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func, int *depth)
{
int index;
if (!current->ret_stack)
return -EBUSY;
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);
return -EBUSY;
}
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = time;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
int index;
index = current->curr_ret_stack;
if (unlikely(index < 0)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)
dereference_function_descriptor(&panic);
return;
}
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index;
barrier();
current->curr_ret_stack--;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(unsigned long retval0,
unsigned long retval1)
{
struct ftrace_graph_ret trace;
unsigned long ret;
pop_return_trace(&trace, &ret);
trace.rettime = local_clock();
ftrace_graph_return(&trace);
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)
dereference_function_descriptor(&panic);
}
/* HACK: we hand over the old functions' return values
in %r23 and %r24. Assembly in entry.S will take care
and move those to their final registers %ret0 and %ret1 */
asm( "copy %0, %%r23 \n\t"
"copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
return ret;
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
struct ftrace_graph_ent trace;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
*parent = (unsigned long)
dereference_function_descriptor(&return_to_handler);
if (unlikely(!__kernel_text_address(old))) {
ftrace_graph_stop();
*parent = old;
WARN_ON(1);
return;
}
calltime = local_clock();
if (push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
void ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3)
{
extern ftrace_func_t ftrace_trace_function;
if (ftrace_trace_function != ftrace_stub) {
ftrace_trace_function(parent, self_addr);
return;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_entry && ftrace_graph_return) {
unsigned long sp;
unsigned long *parent_rp;
asm volatile ("copy %%r30, %0" : "=r"(sp));
/* sanity check: is stack pointer which we got from
assembler function in entry.S in a reasonable
range compared to current stack pointer? */
if ((sp - org_sp_gr3) > 0x400)
return;
/* calculate pointer to %rp in stack */
parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}

File diff suppressed because it is too large Load diff

360
arch/parisc/kernel/head.S Normal file
View file

@ -0,0 +1,360 @@
/* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
* Copyright (C) 2001 Grant Grundler (Hewlett Packard)
* Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
*
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
#include <asm/asm-offsets.h>
#include <asm/psw.h>
#include <asm/pdc.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
__INITDATA
ENTRY(boot_args)
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
END(boot_args)
__HEAD
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
#ifndef CONFIG_64BIT
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
ENTRY(parisc_kernel_start)
.proc
.callinfo
/* Make sure sr4-sr7 are set to zero for the kernel address space */
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Clear BSS (shouldn't the boot loader do this?) */
.import __bss_start,data
.import __bss_stop,data
load32 PA(__bss_start),%r3
load32 PA(__bss_stop),%r4
$bss_loop:
cmpb,<<,n %r3,%r4,$bss_loop
stw,ma %r0,4(%r3)
/* Save away the arguments the boot loader passed in (32 bit args) */
load32 PA(boot_args),%r1
stw,ma %arg0,4(%r1)
stw,ma %arg1,4(%r1)
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
/* Initialize startup VM. Just map first 8/16 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#if PT_NLEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
#else
/* 2-level page table, so pmd == pgd */
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Fill in pmd with enough pte directories */
load32 PA(pg0),%r1
SHRREG %r1,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
ldi ASM_PT_INITIAL,%r1
1:
stw %r3,0(%r4)
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
#if PT_NLEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Now initialize the PTEs themselves. We use RWX for
* everything ... it will get remapped correctly later */
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
addib,> -1,%r11,$pgt_fill_loop
nop
/* Load the return address...er...crash 'n burn */
copy %r0,%r2
/* And the RFI Target address too */
load32 start_parisc,%r11
/* And the initial task pointer */
load32 init_thread_union,%r6
mtctl %r6,%cr30
/* And the stack pointer too */
ldo THREAD_SZ_ALGN(%r6),%sp
#ifdef CONFIG_SMP
/* Set the smp rendezvous address into page zero.
** It would be safer to do this in init_smp_config() but
** it's just way easier to deal with here because
** of 64-bit function ptrs and the address is local to this file.
*/
load32 PA(smp_slave_stext),%r10
stw %r10,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
/* FALLTHROUGH */
.procend
/*
** Code Common to both Monarch and Slave processors.
** Entry:
**
** 1.1:
** %r11 must contain RFI target address.
** %r25/%r26 args to pass to target function
** %r2 in case rfi target decides it didn't like something
**
** 2.0w:
** %r3 PDCE_PROC address
** %r11 RFI target address
**
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
*/
common_stext:
.proc
.callinfo
#else
/* Clear PDC entry point - we won't use it */
stw %r0,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/
#ifdef CONFIG_64BIT
tophys_r1 %sp
/* Save the rfi target address */
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
std %r11, TASK_PT_GR11(%r10)
/* Switch to wide mode Superdome doesn't support narrow PDC
** calls.
*/
1: mfia %rp /* clear upper part of pcoq */
ldo 2f-1b(%rp),%rp
depdi 0,31,32,%rp
bv (%rp)
ssm PSW_SM_W,%r0
/* Set Wide mode as the "Default" (eg for traps)
** First trap occurs *right* after (or part of) rfi for slave CPUs.
** Someday, palo might not do this for the Monarch either.
*/
2:
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
load32 PA(stext_pdc_ret), %rp
bv (%r3)
copy %r0,%arg3
stext_pdc_ret:
mtctl %r6,%cr30 /* restore task thread info */
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
ldd TASK_PT_GR11(%r10), %r11
tovirt_r1 %sp
#endif
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
mtsp %r0,%sr2
mtsp %r0,%sr3
/* Initialize Protection Registers */
mtctl %r0,%cr8
mtctl %r0,%cr9
mtctl %r0,%cr12
mtctl %r0,%cr13
/* Initialize the global data pointer */
loadgp
/* Set up our interrupt table. HPMCs might not work after this!
*
* We need to install the correct iva for PA1.1 or PA2.0. The
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
#ifndef CONFIG_64BIT
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
mfctl,w %cr11,%r10
.level 1.1
comib,<>,n 0,%r10,$is_pa20
ldil L%PA(fault_vector_11),%r10
b $install_iva
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
.level LEVEL /* restore 1.1 || 2.0w */
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
$install_iva:
mtctl %r10,%cr14
b aligned_rfi /* Prepare to RFI! Man all the cannons! */
nop
.align 128
aligned_rfi:
pcxt_ssm_bug
rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
/* Don't need NOPs, have 8 compliant insn before rfi */
mtctl %r0,%cr17 /* Clear IIASQ tail */
mtctl %r0,%cr17 /* Clear IIASQ head */
/* Load RFI target into PC queue */
mtctl %r11,%cr18 /* IIAOQ head */
ldo 4(%r11),%r11
mtctl %r11,%cr18 /* IIAOQ tail */
load32 KERNEL_PSW,%r10
mtctl %r10,%ipsw
/* Jump through hyperspace to Virt Mode */
rfi
nop
.procend
#ifdef CONFIG_SMP
.import smp_init_current_idle_task,data
.import smp_callin,code
#ifndef CONFIG_64BIT
smp_callin_rtn:
.proc
.callinfo
break 1,1 /* Break if returned from start_secondary */
nop
nop
.procend
#endif /*!CONFIG_64BIT*/
/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
* pokes the slave CPUs in smp.c:smp_boot_cpus().
*
* Once here, registers values are initialized in order to branch to virtual
* mode. Once all available/eligible CPUs are in virtual mode, all are
* released and start out by executing their own idle task.
*****************************************************************************/
smp_slave_stext:
.proc
.callinfo
/*
** Initialize Space registers
*/
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
LDREG 0(%sp),%sp /* load task address */
tophys_r1 %sp
LDREG TASK_THREAD_INFO(%sp),%sp
mtctl %sp,%cr30 /* store in cr30 */
ldo THREAD_SZ_ALGN(%sp),%sp
/* point CPU to kernel page tables */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef CONFIG_64BIT
/* Setup PDCE_PROC entry */
copy %arg0,%r3
#else
/* Load RFI *return* address in case smp_callin bails */
load32 smp_callin_rtn,%r2
#endif
/* Load RFI target address. */
load32 smp_callin,%r11
/* ok...common code can handle the rest */
b common_stext
nop
.procend
#endif /* CONFIG_SMP */
ENDPROC(parisc_kernel_start)
#ifndef CONFIG_64BIT
.section .data..read_mostly
.align 4
.export $global$,data
.type $global$,@object
.size $global$,4
$global$:
.word 0
#endif /*!CONFIG_64BIT*/

305
arch/parisc/kernel/hpmc.S Normal file
View file

@ -0,0 +1,305 @@
/*
* HPMC (High Priority Machine Check) handler.
*
* Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
* Copyright (C) 2000 Hewlett-Packard (John Marvin)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* This HPMC handler retrieves the HPMC pim data, resets IO and
* returns to the default trap handler with code set to 1 (HPMC).
* The default trap handler calls handle interruption, which
* does a stack and register dump. This at least allows kernel
* developers to get back to C code in virtual mode, where they
* have the option to examine and print values from memory that
* would help in debugging an HPMC caused by a software bug.
*
* There is more to do here:
*
* 1) On MP systems we need to synchronize processors
* before calling pdc/iodc.
* 2) We should be checking the system state and not
* returning to the fault handler if things are really
* bad.
*
*/
.level 1.1
.data
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <linux/linkage.h>
/*
* stack for os_hpmc, the HPMC handler.
* buffer for IODC procedures (for the HPMC handler).
*
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
.align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
.align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
.align 8
hpmc_raddr:
.block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
.align 8
ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
END(hpmc_pim_data)
.text
.import intr_save, code
ENTRY(os_hpmc)
.os_hpmc:
/*
* registers modified:
*
* Using callee saves registers without saving them. The
* original values are in the pim dump if we need them.
*
* r2 (rp) return pointer
* r3 address of PDCE_PROC
* r4 scratch
* r5 scratch
* r23 (arg3) procedure arg
* r24 (arg2) procedure arg
* r25 (arg1) procedure arg
* r26 (arg0) procedure arg
* r30 (sp) stack pointer
*
* registers read:
*
* r26 contains address of PDCE_PROC on entry
* r28 (ret0) return value from procedure
*/
copy arg0, %r3 /* save address of PDCE_PROC */
/*
* disable nested HPMCs
*
* Increment os_hpmc checksum to invalidate it.
* Do this before turning the PSW M bit off.
*/
mfctl %cr14, %r4
ldw 52(%r4),%r5
addi 1,%r5,%r5
stw %r5,52(%r4)
/* MP_FIXME: synchronize all processors. */
/* Setup stack pointer. */
load32 PA(hpmc_stack),sp
ldo 128(sp),sp /* leave room for arguments */
/*
* Most PDC routines require that the M bit be off.
* So turn on the Q bit and turn off the M bit.
*/
ldo 8(%r0),%r4 /* PSW Q on, PSW M off */
mtctl %r4,ipsw
mtctl %r0,pcsq
mtctl %r0,pcsq
load32 PA(os_hpmc_1),%r4
mtctl %r4,pcoq
ldo 4(%r4),%r4
mtctl %r4,pcoq
rfi
nop
os_hpmc_1:
/* Call PDC_PIM to get HPMC pim info */
/*
* Note that on some newer boxes, PDC_PIM must be called
* before PDC_IO if you want IO to be reset. PDC_PIM sets
* a flag that PDC_IO examines.
*/
ldo PDC_PIM(%r0), arg0
ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
load32 PA(hpmc_raddr),arg2
load32 PA(hpmc_pim_data),arg3
load32 HPMC_PIM_DATA_SIZE,%r4
stw %r4,-52(sp)
ldil L%PA(os_hpmc_2), rp
bv (r3) /* call pdce_proc */
ldo R%PA(os_hpmc_2)(rp), rp
os_hpmc_2:
comib,<> 0,ret0, os_hpmc_fail
/* Reset IO by calling the hversion dependent PDC_IO routine */
ldo PDC_IO(%r0),arg0
ldo 0(%r0),arg1 /* log IO errors */
ldo 0(%r0),arg2 /* reserved */
ldo 0(%r0),arg3 /* reserved */
stw %r0,-52(sp) /* reserved */
ldil L%PA(os_hpmc_3),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_3)(rp),rp
os_hpmc_3:
/* FIXME? Check for errors from PDC_IO (-1 might be OK) */
/*
* Initialize the IODC console device (HPA,SPA, path etc.
* are stored on page 0.
*/
/*
* Load IODC into hpmc_iodc_buf by calling PDC_IODC.
* Note that PDC_IODC handles flushing the appropriate
* data and instruction cache lines.
*/
ldo PDC_IODC(%r0),arg0
ldo PDC_IODC_READ(%r0),arg1
load32 PA(hpmc_raddr),arg2
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
ldo PDC_IODC_RI_INIT(%r0),%r4
stw %r4,-52(sp)
load32 PA(hpmc_iodc_buf),%r4
stw %r4,-56(sp)
load32 HPMC_IODC_BUF_SIZE,%r4
stw %r4,-60(sp)
ldil L%PA(os_hpmc_4),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_4)(rp),rp
os_hpmc_4:
comib,<> 0,ret0,os_hpmc_fail
/* Call the entry init (just loaded by PDC_IODC) */
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
ldo ENTRY_INIT_MOD_DEV(%r0), arg1
ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
depi 0,31,11,arg2 /* clear bits 21-31 */
ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
load32 PA(hpmc_raddr),%r4
stw %r4, -52(sp)
stw %r0, -56(sp) /* HV */
stw %r0, -60(sp) /* HV */
stw %r0, -64(sp) /* HV */
stw %r0, -68(sp) /* lang, must be zero */
load32 PA(hpmc_iodc_buf),%r5
ldil L%PA(os_hpmc_5),rp
bv (%r5)
ldo R%PA(os_hpmc_5)(rp),rp
os_hpmc_5:
comib,<> 0,ret0,os_hpmc_fail
/* Prepare to call intr_save */
/*
* Load kernel page directory (load into user also, since
* we don't intend to ever return to user land anyway)
*/
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
/* Clear sr4-sr7 */
mtsp %r0, %sr4
mtsp %r0, %sr5
mtsp %r0, %sr6
mtsp %r0, %sr7
tovirt_r1 %r30 /* make sp virtual */
rsm 8,%r0 /* Clear Q bit */
ldi 1,%r8 /* Set trap code to "1" for HPMC */
load32 PA(intr_save),%r1
be 0(%sr7,%r1)
nop
os_hpmc_fail:
/*
* Reset the system
*
* Some systems may lockup from a broadcast reset, so try the
* hversion PDC_BROADCAST_RESET() first.
* MP_FIXME: reset all processors if more than one central bus.
*/
/* PDC_BROADCAST_RESET() */
ldo PDC_BROADCAST_RESET(%r0),arg0
ldo 0(%r0),arg1 /* do reset */
ldil L%PA(os_hpmc_6),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_6)(rp),rp
os_hpmc_6:
/*
* possible return values:
* -1 non-existent procedure
* -2 non-existent option
* -16 unaligned stack
*
* If call returned, do a broadcast reset.
*/
ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
ldo 5(%r0),%r5
stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
b .
nop
ENDPROC(os_hpmc)
.os_hpmc_end:
nop
.data
.align 4
.export os_hpmc_size
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc

View file

@ -0,0 +1,622 @@
/*
* inventory.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
* Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
*
* These are the routines to discover what hardware exists in this box.
* This task is complicated by there being 3 different ways of
* performing an inventory, depending largely on the age of the box.
* The recommended way to do this is to check to see whether the machine
* is a `Snake' first, then try System Map, then try PAT. We try System
* Map before checking for a Snake -- this probably doesn't cause any
* problems, but...
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/mmzone.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
/*
** Debug options
** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
*/
#undef DEBUG_PAT
int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
void __init setup_pdc(void)
{
long status;
unsigned int bus_id;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
struct pdc_model model;
#ifdef CONFIG_64BIT
struct pdc_pat_cell_num cell_info;
#endif
/* Determine the pdc "type" used on this machine */
printk(KERN_INFO "Determining PDC firmware type: ");
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP;
printk("System Map.\n");
return;
}
/*
* If the machine doesn't support PDC_SYSTEM_MAP then either it
* is a pdc pat box, or it is an older box. All 64 bit capable
* machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
*/
/*
* TODO: We should test for 64 bit capability and give a
* clearer message.
*/
#ifdef CONFIG_64BIT
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT;
printk("64 bit PAT.\n");
return;
}
#endif
/* Check the CPU's bus ID. There's probably a better test. */
status = pdc_model_info(&model);
bus_id = (model.hversion >> (4 + 7)) & 0x1f;
switch (bus_id) {
case 0x4: /* 720, 730, 750, 735, 755 */
case 0x6: /* 705, 710 */
case 0x7: /* 715, 725 */
case 0x8: /* 745, 747, 742 */
case 0xA: /* 712 and similar */
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
printk("Snake.\n");
return;
default: /* Everything else */
printk("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
}
}
#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
static void __init
set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
unsigned long pages4k)
{
/* Rather than aligning and potentially throwing away
* memory, we'll assume that any ranges are already
* nicely aligned with any reasonable page size, and
* panic if they are not (it's more likely that the
* pdc info is bad in this case).
*/
if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
|| ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
panic("Memory range doesn't align with page size!\n");
}
pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
}
static void __init pagezero_memconfig(void)
{
unsigned long npages;
/* Use the 32 bit information from page zero to create a single
* entry in the pmem_ranges[] table.
*
* We currently don't support machines with contiguous memory
* >= 4 Gb, who report that memory using 64 bit only fields
* on page zero. It's not worth doing until it can be tested,
* and it is not clear we can support those machines for other
* reasons.
*
* If that support is done in the future, this is where it
* should be done.
*/
npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
set_pmem_entry(pmem_ranges,0UL,npages);
npmem_ranges = 1;
}
#ifdef CONFIG_64BIT
/* All of the PDC PAT specific code is 64-bit only */
/*
** The module object is filled via PDC_PAT_CELL[Return Cell Module].
** If a module is found, register module will get the IODC bytes via
** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
**
** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
** only for SBAs and LBAs. This view will cause an invalid
** argument error for all other cell module types.
**
*/
static int __init
pat_query_module(ulong pcell_loc, ulong mod_index)
{
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
unsigned long bytecnt;
unsigned long temp; /* 64-bit scratch value */
long status; /* PDC return value status */
struct parisc_device *dev;
pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
if (!pa_pdc_cell)
panic("couldn't allocate memory for PDC_PAT_CELL!");
/* return cell module (PA or Processor view) */
status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
PA_VIEW, pa_pdc_cell);
if (status != PDC_OK) {
/* no more cell modules or error */
kfree(pa_pdc_cell);
return status;
}
temp = pa_pdc_cell->cba;
dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
if (!dev) {
kfree(pa_pdc_cell);
return PDC_OK;
}
/* alloc_pa_dev sets dev->hpa */
/*
** save parameters in the parisc_device
** (The idea being the device driver will call pdc_pat_cell_module()
** and store the results in its own data structure.)
*/
dev->pcell_loc = pcell_loc;
dev->mod_index = mod_index;
/* save generic info returned from the call */
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
#ifdef DEBUG_PAT
pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
/* dump what we see so far... */
switch (PAT_GET_ENTITY(dev->mod_info)) {
unsigned long i;
case PAT_ENTITY_PROC:
printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
pa_pdc_cell->mod[0]);
break;
case PAT_ENTITY_MEM:
printk(KERN_DEBUG
"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
pa_pdc_cell->mod[2]);
break;
case PAT_ENTITY_CA:
printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
break;
case PAT_ENTITY_PBC:
printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
goto print_ranges;
case PAT_ENTITY_SBA:
printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
goto print_ranges;
case PAT_ENTITY_LBA:
printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
print_ranges:
pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
IO_VIEW, &io_pdc_cell);
printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
printk(KERN_DEBUG
" PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, pa_pdc_cell->mod[2 + i * 3], /* type */
pa_pdc_cell->mod[3 + i * 3], /* start */
pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, io_pdc_cell->mod[2 + i * 3], /* type */
io_pdc_cell->mod[3 + i * 3], /* start */
io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
}
#endif /* DEBUG_PAT */
kfree(pa_pdc_cell);
return PDC_OK;
}
/* pat pdc can return information about a variety of different
* types of memory (e.g. firmware,i/o, etc) but we only care about
* the usable physical ram right now. Since the firmware specific
* information is allocated on the stack, we'll be generous, in
* case there is a lot of other information we don't care about.
*/
#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
static void __init pat_memconfig(void)
{
unsigned long actual_len;
struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
unsigned long length;
int i;
length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
if ((status != PDC_OK)
|| ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
/* The above pdc call shouldn't fail, but, just in
* case, just use the PAGE0 info.
*/
printk("\n\n\n");
printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
"All memory may not be used!\n\n\n");
pagezero_memconfig();
return;
}
entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
if (entries > PAT_MAX_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
}
/* Copy information into the firmware independent pmem_ranges
* array, skipping types we don't care about. Notice we said
* "may" above. We'll use all the entries that were returned.
*/
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
|| (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
|| (mtbl_ptr->pages == 0)
|| ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
continue;
}
if (npmem_ranges == MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
break;
}
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
static int __init pat_inventory(void)
{
int status;
ulong mod_index = 0;
struct pdc_pat_cell_num cell_info;
/*
** Note: Prelude (and it's successors: Lclass, A400/500) only
** implement PDC_PAT_CELL sub-options 0 and 2.
*/
status = pdc_pat_cell_get_number(&cell_info);
if (status != PDC_OK) {
return 0;
}
#ifdef DEBUG_PAT
printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
cell_info.cell_loc);
#endif
while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
mod_index++;
}
return mod_index;
}
/* We only look for extended memory ranges on a 64 bit capable box */
static void __init sprockets_memconfig(void)
{
struct pdc_memory_table_raddr r_addr;
struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
struct pdc_memory_table *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
int i;
status = pdc_mem_mem_table(&r_addr,mem_table,
(unsigned long)MAX_PHYSMEM_RANGES);
if (status != PDC_OK) {
/* The above pdc call only works on boxes with sprockets
* firmware (newer B,C,J class). Other non PAT PDC machines
* do support more than 3.75 Gb of memory, but we don't
* support them yet.
*/
pagezero_memconfig();
return;
}
if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
}
entries = (int)r_addr.entries_returned;
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
#else /* !CONFIG_64BIT */
#define pat_inventory() do { } while (0)
#define pat_memconfig() do { } while (0)
#define sprockets_memconfig() pagezero_memconfig()
#endif /* !CONFIG_64BIT */
#ifndef CONFIG_PA20
/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
static struct parisc_device * __init
legacy_create_device(struct pdc_memory_map *r_addr,
struct pdc_module_path *module_path)
{
struct parisc_device *dev;
int status = pdc_mem_map_hpa(r_addr, module_path);
if (status != PDC_OK)
return NULL;
dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
if (dev == NULL)
return NULL;
register_parisc_device(dev);
return dev;
}
/**
* snake_inventory
*
* Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
* To use it, we initialise the mod_path.bc to 0xff and try all values of
* mod to get the HPA for the top-level devices. Bus adapters may have
* sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
* module, then trying all possible functions.
*/
static void __init snake_inventory(void)
{
int mod;
for (mod = 0; mod < 16; mod++) {
struct parisc_device *dev;
struct pdc_module_path module_path;
struct pdc_memory_map r_addr;
unsigned int func;
memset(module_path.path.bc, 0xff, 6);
module_path.path.mod = mod;
dev = legacy_create_device(&r_addr, &module_path);
if ((!dev) || (dev->id.hw_type != HPHW_BA))
continue;
memset(module_path.path.bc, 0xff, 4);
module_path.path.bc[4] = mod;
for (func = 0; func < 16; func++) {
module_path.path.bc[5] = 0;
module_path.path.mod = func;
legacy_create_device(&r_addr, &module_path);
}
}
}
#else /* CONFIG_PA20 */
#define snake_inventory() do { } while (0)
#endif /* CONFIG_PA20 */
/* Common 32/64 bit based code goes here */
/**
* add_system_map_addresses - Add additional addresses to the parisc device.
* @dev: The parisc device.
* @num_addrs: Then number of addresses to add;
* @module_instance: The system_map module instance.
*
* This function adds any additional addresses reported by the system_map
* firmware to the parisc device.
*/
static void __init
add_system_map_addresses(struct parisc_device *dev, int num_addrs,
int module_instance)
{
int i;
long status;
struct pdc_system_map_addr_info addr_result;
dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __func__);
return;
}
for(i = 1; i <= num_addrs; ++i) {
status = pdc_system_map_find_addrs(&addr_result,
module_instance, i);
if(PDC_OK == status) {
dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
dev->num_addrs++;
} else {
printk(KERN_WARNING
"Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
status, i);
}
}
}
/**
* system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
*
* This function attempts to retrieve and register all the devices firmware
* knows about via the SYSTEM_MAP PDC call.
*/
static void __init system_map_inventory(void)
{
int i;
long status = PDC_OK;
for (i = 0; i < 256; i++) {
struct parisc_device *dev;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
status = pdc_system_map_find_mods(&module_result,
&module_path, i);
if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
break;
if (status != PDC_OK)
continue;
dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
if (!dev)
continue;
register_parisc_device(dev);
/* if available, get the additional addresses for a module */
if (!module_result.add_addrs)
continue;
add_system_map_addresses(dev, module_result.add_addrs, i);
}
walk_central_bus();
return;
}
void __init do_memory_inventory(void)
{
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_memconfig();
break;
case PDC_TYPE_SYSTEM_MAP:
sprockets_memconfig();
break;
case PDC_TYPE_SNAKE:
pagezero_memconfig();
return;
default:
panic("Unknown PDC type!\n");
}
if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
printk(KERN_WARNING "Bad memory configuration returned!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
pagezero_memconfig();
}
}
void __init do_device_inventory(void)
{
printk(KERN_INFO "Searching for devices...\n");
init_parisc_bus();
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_inventory();
break;
case PDC_TYPE_SYSTEM_MAP:
system_map_inventory();
break;
case PDC_TYPE_SNAKE:
snake_inventory();
break;
default:
panic("Unknown PDC type!\n");
}
printk(KERN_INFO "Found devices:\n");
print_parisc_devices();
}

600
arch/parisc/kernel/irq.c Normal file
View file

@ -0,0 +1,600 @@
/*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
* Copyright (C) 1999-2000 Grant Grundler
* Copyright (c) 2005 Matthew Wilcox
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
extern irqreturn_t timer_interrupt(int, void *);
extern irqreturn_t ipi_interrupt(int, void *);
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
** Numbered *Big Endian*! (ie bit 0 is MSB)
*/
static volatile unsigned long cpu_eiem = 0;
/*
** local ACK bitmap ... habitually set to 1, but reset to zero
** between ->ack() and ->end() of the interrupt to prevent
** re-interruption of a processing interrupt.
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
static void cpu_mask_irq(struct irq_data *d)
{
unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
* The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
* handle it, and the set_eiem() at the bottom will ensure it
* then gets disabled */
}
static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem |= eirr_bit;
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */
smp_send_all_nop();
}
static void cpu_unmask_irq(struct irq_data *d)
{
__cpu_unmask_irq(d->irq);
}
void cpu_ack_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
per_cpu(local_ack_eiem, cpu) &= ~mask;
/* disable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
/* and now ack it */
mtctl(mask, 23);
}
void cpu_eoi_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
per_cpu(local_ack_eiem, cpu) |= mask;
/* enable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
}
#ifdef CONFIG_SMP
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
if (irqd_is_per_cpu(d))
return -EINVAL;
/* whatever mask they set, we just allow one CPU */
cpu_dest = cpumask_first_and(dest, cpu_online_mask);
return cpu_dest;
}
static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
bool force)
{
int cpu_dest;
cpu_dest = cpu_check_affinity(d, dest);
if (cpu_dest < 0)
return -1;
cpumask_copy(d->affinity, dest);
return 0;
}
#endif
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
.irq_mask = cpu_mask_irq,
.irq_unmask = cpu_unmask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = cpu_set_affinity_irq,
#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
.irq_retrigger = NULL,
};
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing for arch specific interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
seq_printf(p, "%*s: ", prec, "STK");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
seq_puts(p, " Kernel stack usage\n");
# ifdef CONFIG_IRQSTACKS
seq_printf(p, "%*s: ", prec, "IST");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
seq_puts(p, " Interrupt stack usage\n");
# endif
#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_puts(p, " Rescheduling interrupts\n");
#endif
seq_printf(p, "%*s: ", prec, "UAH");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
seq_puts(p, " Unaligned access handler traps\n");
seq_printf(p, "%*s: ", prec, "FPA");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
seq_puts(p, " Floating point assist traps\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_puts(p, " TLB shootdowns\n");
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, " CPU%d", j);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
struct irq_desc *desc = irq_to_desc(i);
struct irqaction *action;
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
while ((action = action->next))
seq_printf(p, ", %s", action->name);
#else
for ( ;action; action = action->next) {
unsigned int k, avg, min, max;
min = max = action->cr16_hist[0];
for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
int hist = action->cr16_hist[k];
if (hist) {
avg += hist;
} else
break;
if (hist > max) max = hist;
if (hist < min) min = hist;
}
avg /= k;
seq_printf(p, " %s[%d/%d/%d]", action->name,
min,avg,max);
}
#endif
seq_putc(p, '\n');
skip:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
if (i == NR_IRQS)
arch_show_interrupts(p, 3);
return 0;
}
/*
** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
**
** To use txn_XXX() interfaces, get a Virtual IRQ first.
** Then use that to get the Transaction address and data.
*/
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_has_action(irq))
return -EBUSY;
if (irq_get_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
irq_set_chip_and_handler(irq, type, handle_percpu_irq);
irq_set_chip_data(irq, data);
__cpu_unmask_irq(irq);
}
return 0;
}
int txn_claim_irq(int irq)
{
return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
}
/*
* The bits_wide parameter accommodates the limitations of the HW/SW which
* use these bits:
* Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
* V-class (EPIC): 6 bits
* N/L/A-class (iosapic): 8 bits
* PCI 2.2 MSI: 16 bits
* Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
*
* On the service provider side:
* o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
* o PA 2.0 wide mode 6-bits (per processor)
* o IA64 8-bits (0-256 total)
*
* So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
* by the processor...and the N/L-class I/O subsystem supports more bits than
* PA2.0 has. The first case is the problem.
*/
int txn_alloc_irq(unsigned int bits_wide)
{
int irq;
/* never return irq 0 cause that's the interval timer */
for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
if (cpu_claim_irq(irq, NULL, NULL) < 0)
continue;
if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
continue;
return irq;
}
/* unlikely, but be prepared */
return -1;
}
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
struct irq_data *d = irq_get_irq_data(irq);
cpumask_copy(d->affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
}
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
while ((next_cpu < nr_cpu_ids) &&
(!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu);
}
unsigned int txn_alloc_data(unsigned int virt_irq)
{
return virt_irq - CPU_IRQ_BASE;
}
static inline int eirr_to_irq(unsigned long eirr)
{
int bit = fls_long(eirr);
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
#ifdef CONFIG_IRQSTACKS
/*
* IRQ STACK - used for irq handler
*/
#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
volatile unsigned int slock[4];
volatile unsigned int lock[1];
};
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
int sysctl_panic_on_stackoverflow = 1;
static inline void stack_overflow_check(struct pt_regs *regs)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
#define STACK_MARGIN (256*6)
/* Our stack starts directly behind the thread_info struct. */
unsigned long stack_start = (unsigned long) current_thread_info();
unsigned long sp = regs->gr[30];
unsigned long stack_usage;
unsigned int *last_usage;
int cpu = smp_processor_id();
/* if sr7 != 0, we interrupted a userspace process which we do not want
* to check for stack overflow. We will only check the kernel stack. */
if (regs->sr[7])
return;
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
#ifdef CONFIG_IRQSTACKS
if (likely(stack_usage <= THREAD_SIZE))
goto check_kernel_stack; /* found kernel stack */
/* check irq stack usage */
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
stack_usage = sp - stack_start;
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow irq stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
goto panic_check;
check_kernel_stack:
#endif
/* check kernel stack usage */
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow kernel stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
#endif
}
#ifdef CONFIG_IRQSTACKS
/* in entry.S: */
void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
static void execute_on_irq_stack(void *func, unsigned long param1)
{
union irq_stack_union *union_ptr;
unsigned long irq_stack;
volatile unsigned int *irq_stack_in_use;
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
irq_stack = (unsigned long) &union_ptr->stack;
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
64); /* align for stack frame usage */
/* We may be called recursive. If we are already using the irq stack,
* just continue to use it. Use spinlocks to serialize
* the irq stack usage.
*/
irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
if (!__ldcw(irq_stack_in_use)) {
void (*direct_call)(unsigned long p1) = func;
/* We are using the IRQ stack already.
* Do direct call on current stack. */
direct_call(param1);
return;
}
/* This is where we switch to the IRQ stack. */
call_on_stack(param1, func, irq_stack);
/* free up irq stack usage. */
*irq_stack_in_use = 1;
}
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
struct irq_desc *desc;
cpumask_t dest;
#endif
old_regs = set_irq_regs(regs);
local_irq_disable();
irq_enter();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val)
goto set_out;
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
desc = irq_to_desc(irq);
cpumask_copy(&dest, desc->irq_data.affinity);
if (irqd_is_per_cpu(&desc->irq_data) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
stack_overflow_check(regs);
#ifdef CONFIG_IRQSTACKS
execute_on_irq_stack(&generic_handle_irq, irq);
#else
generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
set_irq_regs(old_regs);
return;
set_out:
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
goto out;
}
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
.flags = IRQF_PERCPU,
};
#endif
static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq);
}
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
irq_set_handler(IPI_IRQ, handle_percpu_irq);
setup_irq(IPI_IRQ, &ipi_action);
#endif
}
void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
#ifdef CONFIG_SMP
if (!cpu_eiem) {
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
}
#else
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}

944
arch/parisc/kernel/module.c Normal file
View file

@ -0,0 +1,944 @@
/* Kernel dynamically loadable module help for PARISC.
*
* The best reference for this stuff is probably the Processor-
* Specific ELF Supplement for PA-RISC:
* http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
* Copyright (C) 2008 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Notes:
* - PLT stub handling
* On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
* ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
* fail to reach their PLT stub if we only create one big stub array for
* all sections at the beginning of the core or init section.
* Instead we now insert individual PLT stub entries directly in front of
* of the code sections where the stubs are actually called.
* This reduces the distance between the PCREL location and the stub entry
* so that the relocations can be fulfilled.
* While calculating the final layout of the kernel module in memory, the
* kernel module loader calls arch_mod_section_prepend() to request the
* to be reserved amount of memory in front of each individual section.
*
* - SEGREL32 handling
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
* if (in_init(me, (void *)val))
* val -= (uint32_t)me->module_init;
* else
* val -= (uint32_t)me->module_core;
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
* The unwind table mechanism has the ability to specify an offset for
* the unwind table; however, because we split off the init functions into
* a different piece of memory, it is not possible to do this using a
* single offset. Instead, we use the above hack for now.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/unwind.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
#define RELOC_REACHABLE(val, bits) \
(( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
0 : 1)
#define CHECK_RELOC(val, bits) \
if (!RELOC_REACHABLE(val, bits)) { \
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
return -ENOEXEC; \
}
/* Maximum number of GOT entries. We use a long displacement ldd from
* the bottom of the table, which has a maximum signed displacement of
* 0x3fff; however, since we're only going forward, this becomes
* 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
* at most 1023 entries.
* To overcome this 14bit displacement with some kernel modules, we'll
* use instead the unusal 16bit displacement method (see reassemble_16a)
* which gives us a maximum positive displacement of 0x7fff, and as such
* allows us to allocate up to 4095 GOT entries. */
#define MAX_GOTS 4095
/* three functions to determine where in the module core
* or init pieces the location is */
static inline int in_init(struct module *me, void *loc)
{
return (loc >= me->module_init &&
loc <= (me->module_init + me->init_size));
}
static inline int in_core(struct module *me, void *loc)
{
return (loc >= me->module_core &&
loc <= (me->module_core + me->core_size));
}
static inline int in_local(struct module *me, void *loc)
{
return in_init(me, loc) || in_core(me, loc);
}
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
};
struct stub_entry {
Elf32_Word insns[2]; /* each stub entry has two insns */
};
#else
struct got_entry {
Elf64_Addr addr;
};
struct stub_entry {
Elf64_Word insns[4]; /* each stub entry has four insns */
};
#endif
/* Field selection types defined by hppa */
#define rnd(x) (((x)+0x1000)&~0x1fff)
/* fsel: full 32 bits */
#define fsel(v,a) ((v)+(a))
/* lsel: select left 21 bits */
#define lsel(v,a) (((v)+(a))>>11)
/* rsel: select right 11 bits */
#define rsel(v,a) (((v)+(a))&0x7ff)
/* lrsel with rounding of addend to nearest 8k */
#define lrsel(v,a) (((v)+rnd(a))>>11)
/* rrsel with rounding of addend to nearest 8k */
#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
#define mask(x,sz) ((x) & ~((1<<(sz))-1))
/* The reassemble_* functions prepare an immediate value for
insertion into an opcode. pa-risc uses all sorts of weird bitfields
in the instruction to hold the value. */
static inline int sign_unext(int x, int len)
{
int len_ones;
len_ones = (1 << len) - 1;
return x & len_ones;
}
static inline int low_sign_unext(int x, int len)
{
int sign, temp;
sign = (x >> (len-1)) & 1;
temp = sign_unext(x, len-1);
return (temp << 1) | sign;
}
static inline int reassemble_14(int as14)
{
return (((as14 & 0x1fff) << 1) |
((as14 & 0x2000) >> 13));
}
static inline int reassemble_16a(int as16)
{
int s, t;
/* Unusual 16-bit encoding, for wide mode only. */
t = (as16 << 1) & 0xffff;
s = (as16 & 0x8000);
return (t ^ s ^ (s >> 1)) | (s >> 15);
}
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
static inline int reassemble_21(int as21)
{
return (((as21 & 0x100000) >> 20) |
((as21 & 0x0ffe00) >> 8) |
((as21 & 0x000180) << 7) |
((as21 & 0x00007c) << 14) |
((as21 & 0x000003) << 12));
}
static inline int reassemble_22(int as22)
{
return (((as22 & 0x200000) >> 21) |
((as22 & 0x1f0000) << 5) |
((as22 & 0x00f800) << 5) |
((as22 & 0x000400) >> 8) |
((as22 & 0x0003ff) << 3));
}
void *module_alloc(unsigned long size)
{
/* using RWX means less protection for modules, but it's
* easier than trying to map the text, data, init_text and
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_RWX, NUMA_NO_NODE,
__builtin_return_address(0));
}
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF32_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL17F:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#else
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_LTOFF21L:
case R_PARISC_LTOFF14R:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_FPTR64:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#endif
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
vfree(module_region);
}
/* Additional bytes needed in front of individual sections */
unsigned int arch_mod_section_prepend(struct module *mod,
unsigned int section)
{
/* size needed for all stubs of this section (including
* one additional for correct alignment of the stubs) */
return (mod->arch.section[section].stub_entries + 1)
* sizeof(struct stub_entry);
}
#define CONST
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST Elf_Shdr *sechdrs,
CONST char *secstrings,
struct module *me)
{
unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
len = hdr->e_shnum * sizeof(me->arch.section[0]);
me->arch.section = kzalloc(len, GFP_KERNEL);
if (!me->arch.section)
return -ENOMEM;
for (i = 1; i < hdr->e_shnum; i++) {
const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
unsigned int count, s;
if (strncmp(secstrings + sechdrs[i].sh_name,
".PARISC.unwind", 14) == 0)
me->arch.unwind_section = i;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* some of these are not relevant for 32-bit/64-bit
* we leave them here to make the code common. the
* compiler will do its thing and optimize out the
* stuff we don't need
*/
gots += count_gots(rels, nrels);
fdescs += count_fdescs(rels, nrels);
/* XXX: By sorting the relocs and finding duplicate entries
* we could reduce the number of necessary stubs and save
* some memory. */
count = count_stubs(rels, nrels);
if (!count)
continue;
/* so we need relocation stubs. reserve necessary memory. */
/* sh_info gives the section for which we need to add stubs. */
s = sechdrs[i].sh_info;
/* each code section should only have one relocation section */
WARN_ON(me->arch.section[s].stub_entries);
/* store number of stubs we need for this section */
me->arch.section[s].stub_entries += count;
}
/* align things a bit */
me->core_size = ALIGN(me->core_size, 16);
me->arch.got_offset = me->core_size;
me->core_size += gots * sizeof(struct got_entry);
me->core_size = ALIGN(me->core_size, 16);
me->arch.fdesc_offset = me->core_size;
me->core_size += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
return 0;
}
#ifdef CONFIG_64BIT
static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
{
unsigned int i;
struct got_entry *got;
value += addend;
BUG_ON(value == 0);
got = me->module_core + me->arch.got_offset;
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
BUG_ON(++me->arch.got_count > me->arch.got_max);
got[i].addr = value;
out:
DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
value);
return i * sizeof(struct got_entry);
}
#endif /* CONFIG_64BIT */
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
return 0;
}
/* Look for existing fdesc entry. */
while (fdesc->addr) {
if (fdesc->addr == value)
return (Elf_Addr)fdesc;
fdesc++;
}
BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
/* Create new one */
fdesc->addr = value;
fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* CONFIG_64BIT */
enum elf_stub_type {
ELF_STUB_GOT,
ELF_STUB_MILLI,
ELF_STUB_DIRECT,
};
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
{
struct stub_entry *stub;
int __maybe_unused d;
/* initialize stub_offset to point in front of the section */
if (!me->arch.section[targetsec].stub_offset) {
loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
sizeof(struct stub_entry);
/* get correct alignment for the stubs */
loc0 = ALIGN(loc0, sizeof(struct stub_entry));
me->arch.section[targetsec].stub_offset = loc0;
}
/* get address of stub entry */
stub = (void *) me->arch.section[targetsec].stub_offset;
me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
/* do not write outside available stub area */
BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
#ifndef CONFIG_64BIT
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
* be,n R'XXX(%sr4,%r1)
*/
//value = *(unsigned long *)((value + addend) & ~3); /* why? */
stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
#else
/* for 64-bit we have three kinds of stubs:
* for normal function calls:
* ldd 0(%dp),%dp
* ldd 10(%dp), %r1
* bve (%r1)
* ldd 18(%dp), %dp
*
* for millicode:
* ldil 0, %r1
* ldo 0(%r1), %r1
* ldd 10(%r1), %r1
* bve,n (%r1)
*
* for direct branches (jumps between different section of the
* same module):
* ldil 0, %r1
* ldo 0(%r1), %r1
* bve,n (%r1)
*/
switch (stub_type) {
case ELF_STUB_GOT:
d = get_got(me, value, addend);
if (d <= 15) {
/* Format 5 */
stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
stub->insns[0] |= low_sign_unext(d, 5) << 16;
} else {
/* Format 3 */
stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
stub->insns[0] |= reassemble_16a(d);
}
stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
stub->insns[2] = 0xe820d000; /* bve (%r1) */
stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
break;
case ELF_STUB_MILLI:
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_14(rrsel(value, addend));
break;
case ELF_STUB_DIRECT:
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
stub->insns[2] = 0xe820d002; /* bve,n (%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_14(rrsel(value, addend));
break;
}
#endif
return (Elf_Addr)stub;
}
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Word *loc;
Elf32_Addr val;
Elf32_Sword addend;
Elf32_Addr dot;
Elf_Addr loc0;
unsigned int targetsec = sechdrs[relsec].sh_info;
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
DEBUGP("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
/* This is the start of the target section */
loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf32_Addr)loc & ~0x03;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
strtab + sym->st_name,
(uint32_t)loc, val, addend,
r(R_PARISC_PLABEL32)
r(R_PARISC_DIR32)
r(R_PARISC_DIR21L)
r(R_PARISC_DIR14R)
r(R_PARISC_SEGREL32)
r(R_PARISC_DPREL21L)
r(R_PARISC_DPREL14R)
r(R_PARISC_PCREL17F)
r(R_PARISC_PCREL22F)
"UNKNOWN");
#undef r
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_PARISC_PLABEL32:
/* 32-bit function address */
/* no function descriptors... */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR32:
/* direct 32-bit ref */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR21L:
/* left 21 bits of effective address */
val = lrsel(val, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DIR14R:
/* right 14 bits of effective address */
val = rrsel(val, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DPREL14R:
/* right 14 bit of relative address */
val = rrsel(val - dp, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL17F:
/* 17-bit PC relative address */
/* calculate direct call offset */
val += addend;
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 17)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value, addend,
ELF_STUB_DIRECT, loc0, targetsec);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 17);
}
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
break;
case R_PARISC_PCREL22F:
/* 22-bit PC relative address; only defined for pa20 */
/* calculate direct call offset */
val += addend;
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 22)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value, addend,
ELF_STUB_DIRECT, loc0, targetsec);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 22);
}
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#else
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
Elf64_Word *loc;
Elf64_Xword *loc64;
Elf64_Addr val;
Elf64_Sxword addend;
Elf64_Addr dot;
Elf_Addr loc0;
unsigned int targetsec = sechdrs[relsec].sh_info;
DEBUGP("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
/* This is the start of the target section */
loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf64_Addr)loc & ~0x03;
loc64 = (Elf64_Xword *)loc;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
strtab + sym->st_name,
loc, val, addend,
r(R_PARISC_LTOFF14R)
r(R_PARISC_LTOFF21L)
r(R_PARISC_PCREL22F)
r(R_PARISC_DIR64)
r(R_PARISC_SEGREL32)
r(R_PARISC_FPTR64)
"UNKNOWN");
#undef r
#endif
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_PARISC_LTOFF21L:
/* LT-relative; left 21 bits */
val = get_got(me, val, addend);
DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
val = lrsel(val, 0);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_LTOFF14R:
/* L(ltoff(val+addend)) */
/* LT-relative; right 14 bits */
val = get_got(me, val, addend);
val = rrsel(val, 0);
DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL22F:
/* PC-relative; 22 bits */
DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
val += addend;
/* can we reach it locally? */
if (in_local(me, (void *)val)) {
/* this is the case where the symbol is local
* to the module, but in a different section,
* so stub the jump in case it's more than 22
* bits away */
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 22)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value,
addend, ELF_STUB_DIRECT,
loc0, targetsec);
} else {
/* Ok, we can reach it directly. */
val = sym->st_value;
val += addend;
}
} else {
val = sym->st_value;
if (strncmp(strtab + sym->st_name, "$$", 2)
== 0)
val = get_stub(me, val, addend, ELF_STUB_MILLI,
loc0, targetsec);
else
val = get_stub(me, val, addend, ELF_STUB_GOT,
loc0, targetsec);
}
DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 22);
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_DIR64:
/* 64-bit effective address */
*loc64 = val + addend;
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {
*loc64 = get_fdesc(me, val+addend);
DEBUGP("FDESC for %s at %p points to %lx\n",
strtab + sym->st_name, *loc64,
((Elf_Fdesc *)*loc64)->addr);
} else {
/* if the symbol is not local to this
* module then val+addend is a pointer
* to the function descriptor */
DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
*loc64 = val + addend;
}
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#endif
static void
register_unwind_table(struct module *me,
const Elf_Shdr *sechdrs)
{
unsigned char *table, *end;
unsigned long gp;
if (!me->arch.unwind_section)
return;
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size;
gp = (Elf_Addr)me->module_core + me->arch.got_offset;
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
}
static void
deregister_unwind_table(struct module *me)
{
if (me->arch.unwind)
unwind_table_remove(me->arch.unwind);
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int i;
unsigned long nsyms;
const char *strtab = NULL;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
Elf_Fdesc *entry;
u32 *addr;
entry = (Elf_Fdesc *)me->init;
printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
entry->gp, entry->addr);
addr = (u32 *)entry->addr;
printk("INSNS: %x %x %x %x\n",
addr[0], addr[1], addr[2], addr[3]);
printk("got entries used %ld, gots max %ld\n"
"fdescs used %ld, fdescs max %ld\n",
me->arch.got_count, me->arch.got_max,
me->arch.fdesc_count, me->arch.fdesc_max);
#endif
register_unwind_table(me, sechdrs);
/* haven't filled in me->symtab yet, so have to find it
* ourselves */
for (i = 1; i < hdr->e_shnum; i++) {
if(sechdrs[i].sh_type == SHT_SYMTAB
&& (sechdrs[i].sh_flags & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
* the sechdrs pointer */
symhdr = (Elf_Shdr *)&sechdrs[i];
strtab = (char *)sechdrs[strindex].sh_addr;
break;
}
}
DEBUGP("module %s: strtab %p, symhdr %p\n",
me->name, strtab, symhdr);
if(me->arch.got_count > MAX_GOTS) {
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
kfree(me->arch.section);
me->arch.section = NULL;
/* no symbol table */
if(symhdr == NULL)
return 0;
oldptr = (void *)symhdr->sh_addr;
newptr = oldptr + 1; /* we start counting at 1 */
nsyms = symhdr->sh_size / sizeof(Elf_Sym);
DEBUGP("OLD num_symtab %lu\n", nsyms);
for (i = 1; i < nsyms; i++) {
oldptr++; /* note, count starts at 1 so preincrement */
if(strncmp(strtab + oldptr->st_name,
".L", 2) == 0)
continue;
if(newptr != oldptr)
*newptr++ = *oldptr;
else
newptr++;
}
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
}

View file

@ -0,0 +1,49 @@
/*
* linux/arch/parisc/kernel/pa7300lc.c
* - PA7300LC-specific functions
*
* Copyright (C) 2000 Philipp Rumpf */
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
/* CPU register indices */
#define MIOC_STATUS 0xf040
#define MIOC_CONTROL 0xf080
#define MDERRADD 0xf0e0
#define DMAERR 0xf0e8
#define DIOERR 0xf0ec
#define HIDMAMEM 0xf0f4
/* this returns the HPA of the CPU it was called on */
static u32 cpu_hpa(void)
{
return 0xfffb0000;
}
static void pa7300lc_lpmc(int code, struct pt_regs *regs)
{
u32 hpa;
printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
show_regs(regs);
hpa = cpu_hpa();
printk(KERN_WARNING
"MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n"
"MDERRADD %08x\n" "DMAERR %08x\n"
"DIOERR %08x\n" "HIDMAMEM %08x\n",
gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
}
void pa7300lc_init(void)
{
cpu_lpmc = pa7300lc_lpmc;
}

1294
arch/parisc/kernel/pacache.S Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,164 @@
/*
* Architecture-specific kernel symbols
*
* Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Dave Kennedy
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
* Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(__xchg64);
EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <asm/uaccess.h>
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
/* Global fixups */
extern void fixup_get_user_skip_1(void);
extern void fixup_get_user_skip_2(void);
extern void fixup_put_user_skip_1(void);
extern void fixup_put_user_skip_2(void);
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
EXPORT_SYMBOL(fixup_put_user_skip_2);
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
extern void $$divI(void);
extern void $$divU(void);
extern void $$remI(void);
extern void $$remU(void);
extern void $$mulI(void);
extern void $$divU_3(void);
extern void $$divU_5(void);
extern void $$divU_6(void);
extern void $$divU_9(void);
extern void $$divU_10(void);
extern void $$divU_12(void);
extern void $$divU_7(void);
extern void $$divU_14(void);
extern void $$divU_15(void);
extern void $$divI_3(void);
extern void $$divI_5(void);
extern void $$divI_6(void);
extern void $$divI_7(void);
extern void $$divI_9(void);
extern void $$divI_10(void);
extern void $$divI_12(void);
extern void $$divI_14(void);
extern void $$divI_15(void);
EXPORT_SYMBOL($$divI);
EXPORT_SYMBOL($$divU);
EXPORT_SYMBOL($$remI);
EXPORT_SYMBOL($$remU);
EXPORT_SYMBOL($$mulI);
EXPORT_SYMBOL($$divU_3);
EXPORT_SYMBOL($$divU_5);
EXPORT_SYMBOL($$divU_6);
EXPORT_SYMBOL($$divU_9);
EXPORT_SYMBOL($$divU_10);
EXPORT_SYMBOL($$divU_12);
EXPORT_SYMBOL($$divU_7);
EXPORT_SYMBOL($$divU_14);
EXPORT_SYMBOL($$divU_15);
EXPORT_SYMBOL($$divI_3);
EXPORT_SYMBOL($$divI_5);
EXPORT_SYMBOL($$divI_6);
EXPORT_SYMBOL($$divI_7);
EXPORT_SYMBOL($$divI_9);
EXPORT_SYMBOL($$divI_10);
EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
#ifdef CONFIG_64BIT
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __umoddi3(void);
extern void __moddi3(void);
EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__moddi3);
#endif
#ifndef CONFIG_64BIT
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
#ifdef CONFIG_DISCONTIGMEM
#include <asm/mmzone.h>
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map);
#endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
/* from pacache.S -- needed for clear/copy_page */
EXPORT_SYMBOL(clear_page_asm);
EXPORT_SYMBOL(copy_page_asm);

View file

@ -0,0 +1,596 @@
/*
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
** See Documentation/DMA-API-HOWTO.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
** (c) Copyright 2000 John Marvin
**
** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
** (I assume it's from David Mosberger-Tang but there was no Copyright)
**
** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
**
** - ggg
*/
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/io.h>
#include <asm/page.h> /* get_order */
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static unsigned long pcxl_used_bytes __read_mostly = 0;
static unsigned long pcxl_used_pages __read_mostly = 0;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
static spinlock_t pcxl_res_lock;
static char *pcxl_res_map;
static int pcxl_res_hint;
static int pcxl_res_size;
#ifdef DEBUG_PCXL_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
/*
** Dump a hex representation of the resource map.
*/
#ifdef DUMP_RESMAP
static
void dump_resmap(void)
{
u_long *res_ptr = (unsigned long *)pcxl_res_map;
u_long i = 0;
printk("res_map: ");
for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
printk("%08lx ", *res_ptr);
printk("\n");
}
#else
static inline void dump_resmap(void) {;}
#endif
static int pa11_dma_supported( struct device *dev, u64 mask)
{
return 1;
}
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
pte++;
} while (vaddr < end);
return 0;
}
static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
pte_t * pte = pte_alloc_kernel(pmd, vaddr);
if (!pte)
return -ENOMEM;
if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
return -ENOMEM;
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
return 0;
}
static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
unsigned long paddr)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
pmd_t *pmd;
pmd = pmd_alloc(NULL, dir, vaddr);
if (!pmd)
return -ENOMEM;
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
return -ENOMEM;
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
return 0;
}
static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
unsigned long size)
{
pte_t * pte;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pmd_none(*pmd))
return;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return;
}
pte = pte_offset_map(pmd, vaddr);
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
pte_t page = *pte;
pte_clear(&init_mm, vaddr, pte);
purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
if (pte_none(page) || pte_present(page))
continue;
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (vaddr < end);
}
static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
unsigned long size)
{
pmd_t * pmd;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
}
static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
unmap_uncached_pmd(dir, vaddr, end - vaddr);
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
}
#define PCXL_SEARCH_LOOP(idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) \
{ \
if(0 == ((*res_ptr) & mask)) { \
*res_ptr |= mask; \
idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
pcxl_res_hint = idx + (size >> 3); \
goto resource_found; \
} \
}
#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
res_ptr = (u##size *)&pcxl_res_map[0]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
}
unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
u_long mask, flags;
unsigned int pages_needed = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_needed;
DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
size, pages_needed, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_needed <= 8) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
} else if(pages_needed <= 16) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
} else if(pages_needed <= 32) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
} else {
panic("%s: pcxl_alloc_range() Too many pages to map.\n",
__FILE__);
}
dump_resmap();
panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
__FILE__);
resource_found:
DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
res_idx, mask, pcxl_res_hint);
pcxl_used_pages += pages_needed;
pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
/*
** return the corresponding vaddr in the pcxl dma map
*/
return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
}
#define PCXL_FREE_MAPPINGS(idx, m, size) \
u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
/* BUG_ON((*res_ptr & m) != m); */ \
*res_ptr &= ~m;
/*
** clear bits in the pcxl resource map
*/
static void
pcxl_free_range(unsigned long vaddr, size_t size)
{
u_long mask, flags;
unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
unsigned int pages_mapped = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_mapped;
DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
res_idx, size, pages_mapped, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_mapped <= 8) {
PCXL_FREE_MAPPINGS(res_idx, mask, 8);
} else if(pages_mapped <= 16) {
PCXL_FREE_MAPPINGS(res_idx, mask, 16);
} else if(pages_mapped <= 32) {
PCXL_FREE_MAPPINGS(res_idx, mask, 32);
} else {
panic("%s: pcxl_free_range() Too many pages to unmap.\n",
__FILE__);
}
pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
}
static int proc_pcxl_dma_show(struct seq_file *m, void *v)
{
#if 0
u_long i = 0;
unsigned long *res_ptr = (u_long *)pcxl_res_map;
#endif
unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
PCXL_DMA_MAP_SIZE, total_pages);
seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
seq_puts(m, " total: free: used: % used:\n");
seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
(pcxl_used_bytes * 100) / pcxl_res_size);
seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
total_pages - pcxl_used_pages, pcxl_used_pages,
(pcxl_used_pages * 100 / total_pages));
#if 0
seq_puts(m, "\nResource bitmap:");
for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
if ((i & 7) == 0)
seq_puts(m,"\n ");
seq_printf(m, "%s %08lx", buf, *res_ptr);
}
#endif
seq_putc(m, '\n');
return 0;
}
static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_pcxl_dma_show, NULL);
}
static const struct file_operations proc_pcxl_dma_ops = {
.owner = THIS_MODULE,
.open = proc_pcxl_dma_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init
pcxl_dma_init(void)
{
if (pcxl_dma_start == 0)
return 0;
spin_lock_init(&pcxl_res_lock);
pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
pcxl_res_hint = 0;
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
else {
struct proc_dir_entry* ent;
ent = proc_create("pcxl_dma", 0, proc_gsc_root,
&proc_pcxl_dma_ops);
if (!ent)
printk(KERN_WARNING
"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
}
return 0;
}
__initcall(pcxl_dma_init);
static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
unsigned long vaddr;
unsigned long paddr;
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size);
paddr = __get_free_pages(flag, order);
flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr);
*dma_handle = (dma_addr_t) paddr;
#if 0
/* This probably isn't needed to support EISA cards.
** ISA cards will certainly only support 24-bit DMA addressing.
** Not clear if we can, want, or need to support ISA.
*/
if (!dev || *dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
#endif
return (void *)vaddr;
}
static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
free_pages((unsigned long)__va(dma_handle), order);
}
static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
* simple map/unmap case. However, it IS necessary if if
* pci_dma_sync_single_* has been called and the buffer reused.
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
return;
}
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sglist++ ) {
unsigned long vaddr = sg_virt_addr(sglist);
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sglist) = sglist->length;
flush_kernel_dcache_range(vaddr, sglist->length);
}
return nents;
}
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
return;
}
static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
struct hppa_dma_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = pa11_dma_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_consistent,
.free_consistent = pa11_dma_free_consistent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
static void *fail_alloc_consistent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return NULL;
}
static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *addr;
addr = (void *)__get_free_pages(flag, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
return addr;
}
static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t iova)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
}
struct hppa_dma_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = fail_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_noncoherent,
.free_consistent = pa11_dma_free_noncoherent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};

299
arch/parisc/kernel/pci.c Normal file
View file

@ -0,0 +1,299 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH
* Copyright (C) 1999-2001 Hewlett-Packard Company
* Copyright (C) 1999-2001 Grant Grundler
*/
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(x...) printk(KERN_DEBUG x)
#else
# define DBGC(x...)
#endif
#if DEBUG_RESOURCES
#define DBG_RES(x...) printk(KERN_DEBUG x)
#else
#define DBG_RES(x...)
#endif
/* To be used as: mdelay(pci_post_reset_delay);
*
* post_reset is the time the kernel should stall to prevent anyone from
* accessing the PCI bus once #RESET is de-asserted.
* PCI spec somewhere says 1 second but with multi-PCI bus systems,
* this makes the boot time much longer than necessary.
* 20ms seems to work for all the HP PCI implementations to date.
*
* #define pci_post_reset_delay 50
*/
struct pci_port_ops *pci_port __read_mostly;
struct pci_bios_ops *pci_bios __read_mostly;
static int pci_hba_count __read_mostly;
/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
#define PCI_HBA_MAX 32
static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
/********************************************************************
**
** I/O port space support
**
*********************************************************************/
/* EISA port numbers and PCI port numbers share the same interface. Some
* machines have both EISA and PCI adapters installed. Rather than turn
* pci_port into an array, we reserve bus 0 for EISA and call the EISA
* routines if the access is to a port on bus 0. We don't want to fix
* EISA and ISA drivers which assume port space is <= 0xffff.
*/
#ifdef CONFIG_EISA
#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
#else
#define EISA_IN(size)
#define EISA_OUT(size)
#endif
#define PCI_PORT_IN(type, size) \
u##size in##type (int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_IN(size); \
if (!parisc_pci_hba[b]) return (u##size) -1; \
return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
} \
EXPORT_SYMBOL(in##type);
PCI_PORT_IN(b, 8)
PCI_PORT_IN(w, 16)
PCI_PORT_IN(l, 32)
#define PCI_PORT_OUT(type, size) \
void out##type (u##size d, int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_OUT(size); \
if (!parisc_pci_hba[b]) return; \
pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
} \
EXPORT_SYMBOL(out##type);
PCI_PORT_OUT(b, 8)
PCI_PORT_OUT(w, 16)
PCI_PORT_OUT(l, 32)
/*
* BIOS32 replacement.
*/
static int __init pcibios_init(void)
{
if (!pci_bios)
return -1;
if (pci_bios->init) {
pci_bios->init();
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
/* Set the CLS for PCI as early as possible. */
pci_cache_line_size = pci_dfl_cache_line_size;
return 0;
}
/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
void pcibios_fixup_bus(struct pci_bus *bus)
{
if (pci_bios->fixup_bus) {
pci_bios->fixup_bus(bus);
} else {
printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
}
}
/*
* Called by pci_set_master() - a driver interface.
*
* Legacy PDC guarantees to set:
* Map Memory BAR's into PA IO space.
* Map Expansion ROM BAR into one common PA IO space per bus.
* Map IO BAR's into PCI IO space.
* Command (see below)
* Cache Line Size
* Latency Timer
* Interrupt Line
* PPB: secondary latency timer, io/mmio base/limit,
* bus numbers, bridge control
*
*/
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
/* If someone already mucked with this, don't touch it. */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat >= 16) return;
/*
** HP generally has fewer devices on the bus than other architectures.
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
(0x80 << 8) | pci_cache_line_size);
}
void __init pcibios_init_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
unsigned short bridge_ctl;
/* We deal only with pci controllers and pci-pci bridges. */
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
/* PCI-PCI bridge - set the cache line and default latency
(32) for primary and secondary buses. */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
}
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for
* an available address, each candidate is first "aligned" and
* then checked if the resource is available until a match is found.
*
* Since we are just checking candidates, don't use any fields other
* than res->start.
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t alignment)
{
resource_size_t mask, align, start = res->start;
DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
pci_name(((struct pci_dev *) data)),
res->parent, res->start, res->end,
(int) res->flags, size, alignment);
/* If it's not IO, then it's gotta be MEM */
align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/* Align to largest of MIN or input size */
mask = max(alignment, align) - 1;
start += mask;
start &= ~mask;
return start;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
if (write_combine)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot |= _PAGE_NO_CACHE;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
* We enable the port IO and memory IO bits if the device has any BARs of
* that type, and we enable the PERR and SERR bits unconditionally.
* Drivers that do not need parity (eg graphics and possibly networking)
* can clear these bits if they want.
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
u16 cmd, old_cmd;
err = pci_enable_resources(dev, mask);
if (err < 0)
return err;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
#if 0
/* If bridge/bus controller has FBB enabled, child must too. */
if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
cmd |= PCI_COMMAND_FAST_BACK;
#endif
if (cmd != old_cmd) {
dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n",
old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/* PA-RISC specific */
void pcibios_register_hba(struct pci_hba_data *hba)
{
if (pci_hba_count >= PCI_HBA_MAX) {
printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
return;
}
parisc_pci_hba[pci_hba_count] = hba;
hba->hba_num = pci_hba_count++;
}
subsys_initcall(pcibios_init);

View file

@ -0,0 +1,302 @@
/*
* interfaces to Chassis Codes via PDC (firmware)
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
* Copyright (C) 2002-2006 Thibaut VARENE <varenet@parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* TODO: poll chassis warns, trigger (configurable) machine shutdown when
* needed.
* Find out how to get Chassis warnings out of PAT boxes?
*/
#undef PDC_CHASSIS_DEBUG
#ifdef PDC_CHASSIS_DEBUG
#define DPRINTK(fmt, args...) printk(fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#define PDC_CHASSIS_VER "0.05"
#ifdef CONFIG_PDC_CHASSIS
static unsigned int pdc_chassis_enabled __read_mostly = 1;
/**
* pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
* @str configuration param: 0 to disable chassis log
* @return 1
*/
static int __init pdc_chassis_setup(char *str)
{
/*panic_timeout = simple_strtoul(str, NULL, 0);*/
get_option(&str, &pdc_chassis_enabled);
return 1;
}
__setup("pdcchassis=", pdc_chassis_setup);
/**
* pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
* @pdc_chassis_old: 1 if old pdc chassis style
*
* Currently, only E class and A180 are known to work with this.
* Inspired by Christoph Plattner
*/
#if 0
static void __init pdc_chassis_checkold(void)
{
switch(CPU_HVERSION) {
case 0x480: /* E25 */
case 0x481: /* E35 */
case 0x482: /* E45 */
case 0x483: /* E55 */
case 0x516: /* A180 */
break;
default:
break;
}
DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
}
#endif
/**
* pdc_chassis_panic_event() - Called by the panic handler.
*
* As soon as a panic occurs, we should inform the PDC.
*/
static int pdc_chassis_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_panic_block = {
.notifier_call = pdc_chassis_panic_event,
.priority = INT_MAX,
};
/**
* parisc_reboot_event() - Called by the reboot handler.
*
* As soon as a reboot occurs, we should inform the PDC.
*/
static int pdc_chassis_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_reboot_block = {
.notifier_call = pdc_chassis_reboot_event,
.priority = INT_MAX,
};
#endif /* CONFIG_PDC_CHASSIS */
/**
* parisc_pdc_chassis_init() - Called at boot time.
*/
void __init parisc_pdc_chassis_init(void)
{
#ifdef CONFIG_PDC_CHASSIS
if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
/* Let see if we have something to handle... */
printk(KERN_INFO "Enabling %s chassis codes support v%s\n",
is_pdc_pat() ? "PDC_PAT" : "regular",
PDC_CHASSIS_VER);
/* initialize panic notifier chain */
atomic_notifier_chain_register(&panic_notifier_list,
&pdc_chassis_panic_block);
/* initialize reboot notifier chain */
register_reboot_notifier(&pdc_chassis_reboot_block);
}
#endif /* CONFIG_PDC_CHASSIS */
}
/**
* pdc_chassis_send_status() - Sends a predefined message to the chassis,
* and changes the front panel LEDs according to the new system state
* @retval: PDC call return value.
*
* Only machines with 64 bits PDC PAT and those reported in
* pdc_chassis_checkold() are supported atm.
*
* returns 0 if no error, -1 if no supported PDC is present or invalid message,
* else returns the appropriate PDC error code.
*
* For a list of predefined messages, see asm-parisc/pdc_chassis.h
*/
int pdc_chassis_send_status(int message)
{
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
switch(message) {
case PDC_CHASSIS_DIRECT_BSTART:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
break;
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
break;
case PDC_CHASSIS_DIRECT_HPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
break;
default:
retval = -1;
}
} else retval = -1;
#else
if (1) {
switch (message) {
case PDC_CHASSIS_DIRECT_BSTART:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_INIT));
break;
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
break;
case PDC_CHASSIS_DIRECT_HPMC:
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
break;
default:
retval = -1;
}
} else retval = -1;
#endif /* CONFIG_64BIT */
} /* if (pdc_chassis_enabled) */
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}
#ifdef CONFIG_PDC_CHASSIS_WARN
#ifdef CONFIG_PROC_FS
static int pdc_chassis_warn_show(struct seq_file *m, void *v)
{
unsigned long warn;
u32 warnreg;
if (pdc_chassis_warn(&warn) != PDC_OK)
return -EIO;
warnreg = (warn & 0xFFFFFFFF);
if ((warnreg >> 24) & 0xFF)
seq_printf(m, "Chassis component failure! (eg fan or PSU): 0x%.2x\n",
(warnreg >> 24) & 0xFF);
seq_printf(m, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
seq_printf(m, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
seq_printf(m, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
return 0;
}
static int pdc_chassis_warn_open(struct inode *inode, struct file *file)
{
return single_open(file, pdc_chassis_warn_show, NULL);
}
static const struct file_operations pdc_chassis_warn_fops = {
.open = pdc_chassis_warn_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pdc_chassis_create_procfs(void)
{
unsigned long test;
int ret;
ret = pdc_chassis_warn(&test);
if ((ret == PDC_BAD_PROC) || (ret == PDC_BAD_OPTION)) {
/* seems that some boxes (eg L1000) do not implement this */
printk(KERN_INFO "Chassis warnings not supported.\n");
return 0;
}
printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n",
PDC_CHASSIS_VER);
proc_create("chassis", 0400, NULL, &pdc_chassis_warn_fops);
return 0;
}
__initcall(pdc_chassis_create_procfs);
#endif /* CONFIG_PROC_FS */
#endif /* CONFIG_PDC_CHASSIS_WARN */

View file

@ -0,0 +1,282 @@
/*
* PDC Console support - ie use firmware to dump text via boot console
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* The PDC console is a simple console, which can be used for debugging
* boot related problems on HP PA-RISC machines. It is also useful when no
* other console works.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
* On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
#define EARLY_BOOTUP_DEBUG
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/tty.h>
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock);
static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
int i = 0;
unsigned long flags;
spin_lock_irqsave(&pdc_console_lock, flags);
do {
i += pdc_iodc_print(s + i, count - i);
} while (i < count);
spin_unlock_irqrestore(&pdc_console_lock, flags);
}
int pdc_console_poll_key(struct console *co)
{
int c;
unsigned long flags;
spin_lock_irqsave(&pdc_console_lock, flags);
c = pdc_iodc_getc();
spin_unlock_irqrestore(&pdc_console_lock, flags);
return c;
}
static int pdc_console_setup(struct console *co, char *options)
{
return 0;
}
#if defined(CONFIG_PDC_CONSOLE)
#include <linux/vt_kern.h>
#include <linux/tty_flip.h>
#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
static void pdc_console_poll(unsigned long unused);
static DEFINE_TIMER(pdc_console_timer, pdc_console_poll, 0, 0);
static struct tty_port tty_port;
static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&tty_port, tty);
mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
return 0;
}
static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
del_timer_sync(&pdc_console_timer);
tty_port_tty_set(&tty_port, NULL);
}
}
static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
pdc_console_write(NULL, buf, count);
return count;
}
static int pdc_console_tty_write_room(struct tty_struct *tty)
{
return 32768; /* no limit, no buffer used */
}
static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
{
return 0; /* no buffer */
}
static const struct tty_operations pdc_console_tty_ops = {
.open = pdc_console_tty_open,
.close = pdc_console_tty_close,
.write = pdc_console_tty_write,
.write_room = pdc_console_tty_write_room,
.chars_in_buffer = pdc_console_tty_chars_in_buffer,
};
static void pdc_console_poll(unsigned long unused)
{
int data, count = 0;
while (1) {
data = pdc_console_poll_key(NULL);
if (data == -1)
break;
tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL);
count ++;
}
if (count)
tty_flip_buffer_push(&tty_port);
if (pdc_cons.flags & CON_ENABLED)
mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
}
static struct tty_driver *pdc_console_tty_driver;
static int __init pdc_console_tty_driver_init(void)
{
int err;
/* Check if the console driver is still registered.
* It is unregistered if the pdc console was not selected as the
* primary console. */
struct console *tmp;
console_lock();
for_each_console(tmp)
if (tmp == &pdc_cons)
break;
console_unlock();
if (!tmp) {
printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
return -ENODEV;
}
printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
pdc_cons.flags &= ~CON_BOOT;
pdc_console_tty_driver = alloc_tty_driver(1);
if (!pdc_console_tty_driver)
return -ENOMEM;
tty_port_init(&tty_port);
pdc_console_tty_driver->driver_name = "pdc_cons";
pdc_console_tty_driver->name = "ttyB";
pdc_console_tty_driver->major = MUX_MAJOR;
pdc_console_tty_driver->minor_start = 0;
pdc_console_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
pdc_console_tty_driver->init_termios = tty_std_termios;
pdc_console_tty_driver->flags = TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(pdc_console_tty_driver, &pdc_console_tty_ops);
tty_port_link_device(&tty_port, pdc_console_tty_driver, 0);
err = tty_register_driver(pdc_console_tty_driver);
if (err) {
printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
tty_port_destroy(&tty_port);
return err;
}
return 0;
}
module_init(pdc_console_tty_driver_init);
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
*index = c->index;
return pdc_console_tty_driver;
}
#else
#define pdc_console_device NULL
#endif
static struct console pdc_cons = {
.name = "ttyB",
.write = pdc_console_write,
.device = pdc_console_device,
.setup = pdc_console_setup,
.flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
static int pdc_console_initialized;
static void pdc_console_init_force(void)
{
if (pdc_console_initialized)
return;
++pdc_console_initialized;
/* If the console is duplex then copy the COUT parameters to CIN. */
if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
/* register the pdc console */
register_console(&pdc_cons);
}
void __init pdc_console_init(void)
{
#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
pdc_console_init_force();
#endif
#ifdef EARLY_BOOTUP_DEBUG
printk(KERN_INFO "Initialized PDC Console for debugging.\n");
#endif
}
/*
* Used for emergencies. Currently only used if an HPMC occurs. If an
* HPMC occurs, it is possible that the current console may not be
* properly initialised after the PDC IO reset. This routine unregisters
* all of the current consoles, reinitializes the pdc console and
* registers it.
*/
void pdc_console_restart(void)
{
struct console *console;
if (pdc_console_initialized)
return;
/* If we've already seen the output, don't bother to print it again */
if (console_drivers != NULL)
pdc_cons.flags &= ~CON_PRINTBUFFER;
while ((console = console_drivers) != NULL)
unregister_console(console_drivers);
/* force registering the pdc console */
pdc_console_init_force();
}

851
arch/parisc/kernel/perf.c Normal file
View file

@ -0,0 +1,851 @@
/*
* Parisc performance counters
* Copyright (C) 2001 Randolph Chung <tausq@debian.org>
*
* This code is derived, with permission, from HP/UX sources.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Edited comment from original sources:
*
* This driver programs the PCX-U/PCX-W performance counters
* on the PA-RISC 2.0 chips. The driver keeps all images now
* internally to the kernel to hopefully eliminate the possibility
* of a bad image halting the CPU. Also, there are different
* images for the PCX-W and later chips vs the PCX-U chips.
*
* Only 1 process is allowed to access the driver at any time,
* so the only protection that is needed is at open and close.
* A variable "perf_enabled" is used to hold the state of the
* driver. The spinlock "perf_lock" is used to protect the
* modification of the state during open/close operations so
* multiple processes don't get into the driver simultaneously.
*
* This driver accesses the processor directly vs going through
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
* on every box.
*/
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <asm/perf.h>
#include <asm/parisc-device.h>
#include <asm/processor.h>
#include <asm/runway.h>
#include <asm/io.h> /* for __raw_read() */
#include "perf_images.h"
#define MAX_RDR_WORDS 24
#define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
/* definition of RDR regs */
struct rdr_tbl_ent {
uint16_t width;
uint8_t num_words;
uint8_t write_control;
};
static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
static int perf_enabled __read_mostly;
static spinlock_t perf_lock;
struct parisc_device *cpu_device __read_mostly;
/* RDRs to write for PCX-W */
static const int perf_rdrs_W[] =
{ 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDRs to write for PCX-U */
static const int perf_rdrs_U[] =
{ 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDR register descriptions for PCX-W */
static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 16, 1, 16 }, /* RDR 1 */
{ 72, 2, 0 }, /* RDR 2 */
{ 81, 2, 0 }, /* RDR 3 */
{ 328, 6, 0 }, /* RDR 4 */
{ 160, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 164, 3, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 35, 1, 0 }, /* RDR 9 */
{ 6, 1, 0 }, /* RDR 10 */
{ 18, 1, 0 }, /* RDR 11 */
{ 13, 1, 0 }, /* RDR 12 */
{ 8, 1, 0 }, /* RDR 13 */
{ 8, 1, 0 }, /* RDR 14 */
{ 8, 1, 0 }, /* RDR 15 */
{ 1530, 24, 0 }, /* RDR 16 */
{ 16, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 152, 3, 24 }, /* RDR 20 */
{ 152, 3, 24 }, /* RDR 21 */
{ 233, 4, 48 }, /* RDR 22 */
{ 233, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 11, 1, 0 }, /* RDR 26 */
{ 18, 1, 0 }, /* RDR 27 */
{ 128, 2, 0 }, /* RDR 28 */
{ 0, 0, 0 }, /* RDR 29 */
{ 16, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/* RDR register descriptions for PCX-U */
static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 32, 1, 16 }, /* RDR 1 */
{ 20, 1, 0 }, /* RDR 2 */
{ 0, 0, 0 }, /* RDR 3 */
{ 344, 6, 0 }, /* RDR 4 */
{ 176, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 0, 0, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 0, 0, 0 }, /* RDR 9 */
{ 28, 1, 0 }, /* RDR 10 */
{ 33, 1, 0 }, /* RDR 11 */
{ 0, 0, 0 }, /* RDR 12 */
{ 230, 4, 0 }, /* RDR 13 */
{ 32, 1, 0 }, /* RDR 14 */
{ 128, 2, 0 }, /* RDR 15 */
{ 1494, 24, 0 }, /* RDR 16 */
{ 18, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 158, 3, 24 }, /* RDR 20 */
{ 158, 3, 24 }, /* RDR 21 */
{ 194, 4, 48 }, /* RDR 22 */
{ 194, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 28, 1, 0 }, /* RDR 26 */
{ 33, 1, 0 }, /* RDR 27 */
{ 88, 2, 0 }, /* RDR 28 */
{ 32, 1, 0 }, /* RDR 29 */
{ 24, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/*
* A non-zero write_control in the above tables is a byte offset into
* this array.
*/
static const uint64_t perf_bitmasks[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
0xfffffffffffffffful,
0xfffffffffffffffcul,
0xff00000000000000ul
};
/*
* Write control bitmasks for Pa-8700 processor given
* some things have changed slightly.
*/
static const uint64_t perf_bitmasks_piranha[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
0xfffffffffffffffful,
0xfffffffffffffffful,
0xfffc000000000000ul
};
static const uint64_t *bitmask_array; /* array of bitmasks to use */
/******************************************************************************
* Function Prototypes
*****************************************************************************/
static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
static int perf_rdr_clear(uint32_t rdr_num);
static int perf_write_image(uint64_t *memaddr);
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
/* External Assembly Routines */
extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
extern void perf_intrigue_enable_perf_counters (void);
extern void perf_intrigue_disable_perf_counters (void);
/******************************************************************************
* Function Definitions
*****************************************************************************/
/*
* configure:
*
* Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
{
long error;
uint32_t raddr[4];
/* Stop the counters*/
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
return -EINVAL;
}
printk("Preparing to write image\n");
/* Write the image to the chip */
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
return -EINVAL;
}
printk("Preparing to start counters\n");
/* Start the counters */
perf_start_counters();
return sizeof(uint32_t);
}
/*
* Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
static int perf_open(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
if (perf_enabled) {
spin_unlock(&perf_lock);
return -EBUSY;
}
perf_enabled = 1;
spin_unlock(&perf_lock);
return 0;
}
/*
* Close the device.
*/
static int perf_release(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
perf_enabled = 0;
spin_unlock(&perf_lock);
return 0;
}
/*
* Read does nothing for this driver
*/
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
{
return 0;
}
/*
* write:
*
* This routine downloads the image to the chip. It must be
* called on the processor that the download should happen
* on.
*/
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
int err;
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (count != sizeof(uint32_t))
return -EIO;
if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
return err;
/* Get the interface type and test type */
interface_type = (image_type >> 16) & 0xffff;
test = (image_type & 0xffff);
/* Make sure everything makes sense */
/* First check the machine type is correct for
the requested image */
if (((perf_processor_interface == CUDA_INTF) &&
(interface_type != CUDA_INTF)) ||
((perf_processor_interface == ONYX_INTF) &&
(interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
((interface_type == ONYX_INTF) &&
(test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
return count;
}
/*
* Patch the images that need to know the IVA addresses.
*/
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
extern void $i_itlb_miss_2_0();
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
/*
* We can only use the lower 32-bits, the upper 32-bits should be 0
* anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[BIG_CPI][17] = itlb_addr;
onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
/* Unknown type */
}
#endif
}
/*
* ioctl routine
* All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long error_start;
uint32_t raddr[4];
int error = 0;
switch (cmd) {
case PA_PERF_ON:
/* Start the counters */
perf_start_counters();
break;
case PA_PERF_OFF:
error_start = perf_stop_counters(raddr);
if (error_start != 0) {
printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
error = -EFAULT;
break;
}
/* copy out the Counters */
if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
}
break;
case PA_PERF_VERSION:
/* Return the version # */
error = put_user(PERF_VERSION, (int *)arg);
break;
default:
error = -ENOTTY;
}
return error;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.open = perf_open,
.release = perf_release
};
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
&perf_fops
};
/*
* Initialize the module
*/
static int __init perf_init(void)
{
int ret;
/* Determine correct processor interface to use */
bitmask_array = perf_bitmasks;
if (boot_cpu_data.cpu_type == pcxu ||
boot_cpu_data.cpu_type == pcxu_) {
perf_processor_interface = ONYX_INTF;
} else if (boot_cpu_data.cpu_type == pcxw ||
boot_cpu_data.cpu_type == pcxw_ ||
boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako ||
boot_cpu_data.cpu_type == mako2) {
perf_processor_interface = CUDA_INTF;
if (boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako ||
boot_cpu_data.cpu_type == mako2)
bitmask_array = perf_bitmasks_piranha;
} else {
perf_processor_interface = UNKNOWN_INTF;
printk("Performance monitoring counters not supported on this processor\n");
return -ENODEV;
}
ret = misc_register(&perf_dev);
if (ret) {
printk(KERN_ERR "Performance monitoring counters: "
"cannot register misc device.\n");
return ret;
}
/* Patch the images to match the system */
perf_patch_images();
spin_lock_init(&perf_lock);
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
per_cpu(cpu_data, 0).dev->name);
return 0;
}
/*
* perf_start_counters(void)
*
* Start the counters.
*/
static void perf_start_counters(void)
{
/* Enable performance monitor counters */
perf_intrigue_enable_perf_counters();
}
/*
* perf_stop_counters
*
* Stop the performance counters and save counts
* in a per_processor array.
*/
static int perf_stop_counters(uint32_t *raddr)
{
uint64_t userbuf[MAX_RDR_WORDS];
/* Disable performance counters */
perf_intrigue_disable_perf_counters();
if (perf_processor_interface == ONYX_INTF) {
uint64_t tmp64;
/*
* Read the counters
*/
if (!perf_rdr_read_ubuf(16, userbuf))
return -13;
/* Counter0 is bits 1398 to 1429 */
tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
/* OR sticky0 (bit 1430) to counter0 bit 32 */
tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
raddr[0] = (uint32_t)tmp64;
/* Counter1 is bits 1431 to 1462 */
tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
/* OR sticky1 (bit 1463) to counter1 bit 32 */
tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
raddr[1] = (uint32_t)tmp64;
/* Counter2 is bits 1464 to 1495 */
tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
raddr[3] = (uint32_t)tmp64;
/*
* Zero out the counters
*/
/*
* The counters and sticky-bits comprise the last 132 bits
* (1398 - 1529) of RDR16 on a U chip. We'll zero these
* out the easy way: zero out last 10 bits of dword 21,
* all of dword 22 and 58 bits (plus 6 don't care bits) of
* dword 23.
*/
userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
userbuf[22] = 0;
userbuf[23] = 0;
/*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
perf_rdr_write(16, userbuf);
} else {
/*
* Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
/*
* Clear out the counters
*/
perf_rdr_clear(15);
/*
* Copy the counters
*/
raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
return 0;
}
/*
* perf_rdr_get_entry
*
* Retrieve a pointer to the description of what this
* RDR contains.
*/
static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
{
if (perf_processor_interface == ONYX_INTF) {
return &perf_rdr_tbl_U[rdr_num];
} else {
return &perf_rdr_tbl_W[rdr_num];
}
}
/*
* perf_rdr_read_ubuf
*
* Read the RDR value into the buffer specified.
*/
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
{
uint64_t data, data_mask = 0;
uint32_t width, xbits, i;
const struct rdr_tbl_ent *tentry;
tentry = perf_rdr_get_entry(rdr_num);
if ((width = tentry->width) == 0)
return 0;
/* Clear out buffer */
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
}
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
data_mask = 1;
data_mask <<= (64 - xbits);
data_mask--;
}
/* Grab all of the data */
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
data = perf_rdr_shift_in_U(rdr_num, width);
} else {
data = perf_rdr_shift_in_W(rdr_num, width);
}
if (xbits) {
buffer[i] |= (data << (64 - xbits));
if (i) {
buffer[i-1] |= ((data >> xbits) & data_mask);
}
} else {
buffer[i] = data;
}
}
return 1;
}
/*
* perf_rdr_clear
*
* Zero out the given RDR register
*/
static int perf_rdr_clear(uint32_t rdr_num)
{
const struct rdr_tbl_ent *tentry;
int32_t i;
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) {
return -1;
}
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, 0UL);
} else {
perf_rdr_shift_out_W(rdr_num, 0UL);
}
}
return 0;
}
/*
* perf_write_image
*
* Write the given image out to the processor
*/
static int perf_write_image(uint64_t *memaddr)
{
uint64_t buffer[MAX_RDR_WORDS];
uint64_t *bptr;
uint32_t dwords;
const uint32_t *intrigue_rdr;
const uint64_t *intrigue_bitmask;
uint64_t tmp64;
void __iomem *runway;
const struct rdr_tbl_ent *tentry;
int i;
/* Clear out counters */
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_clear(16);
/* Toggle performance monitor */
perf_intrigue_enable_perf_counters();
perf_intrigue_disable_perf_counters();
intrigue_rdr = perf_rdrs_U;
} else {
perf_rdr_clear(15);
intrigue_rdr = perf_rdrs_W;
}
/* Write all RDRs */
while (*intrigue_rdr != -1) {
tentry = perf_rdr_get_entry(*intrigue_rdr);
perf_rdr_read_ubuf(*intrigue_rdr, buffer);
bptr = &buffer[0];
dwords = tentry->num_words;
if (tentry->write_control) {
intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
while (dwords--) {
tmp64 = *intrigue_bitmask & *memaddr++;
tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
*bptr++ = tmp64;
}
} else {
while (dwords--) {
*bptr++ = *memaddr++;
}
}
perf_rdr_write(*intrigue_rdr, buffer);
intrigue_rdr++;
}
/*
* Now copy out the Runway stuff which is not in RDRs
*/
if (cpu_device == NULL)
{
printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
return -1;
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
return 0;
}
/*
* perf_rdr_write
*
* Write the given RDR register with the contents
* of the given buffer.
*/
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
{
const struct rdr_tbl_ent *tentry;
int32_t i;
printk("perf_rdr_write\n");
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) { return; }
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
}
}
printk("perf_rdr_write done\n");
}
module_init(perf_init);

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,307 @@
/*
* PARISC Architecture-dependent parts of process handling
* based on the work for i386
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/pgalloc.h>
#include <asm/unwind.h>
#include <asm/sections.h>
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
/*
** The Wright Brothers and Gecko systems have a H/W problem
** (Lasi...'nuf said) may cause a broadcast reset to lockup
** the system. An HVERSION dependent PDC call was developed
** to perform a "safe", platform specific broadcast reset instead
** of kludging up all the code.
**
** Older machines which do not implement PDC_BROADCAST_RESET will
** return (with an error) and the regular broadcast reset can be
** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
** the PDC call will not return (the system will be reset).
*/
void machine_restart(char *cmd)
{
#ifdef FASTBOOT_SELFTEST_SUPPORT
/*
** If user has modified the Firmware Selftest Bitmap,
** run the tests specified in the bitmap after the
** system is rebooted w/PDC_DO_RESET.
**
** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
**
** Using "directed resets" at each processor with the MEM_TOC
** vector cleared will also avoid running destructive
** memory self tests. (Not implemented yet)
*/
if (ftc_bitmap) {
pdc_do_firm_test_reset(ftc_bitmap);
}
#endif
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* "Normal" system reset */
pdc_do_reset();
/* Nope...box should reset with just CMD_RESET now */
gsc_writel(CMD_RESET, COMMAND_GLOBAL);
/* Wait for RESET to lay us to rest. */
while (1) ;
}
void machine_halt(void)
{
/*
** The LED/ChassisCodes are updated by the led_halt()
** function, called by the reboot notifier chain.
*/
}
void (*chassis_power_off)(void);
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
/* If there is a registered power off handler, call it. */
if (chassis_power_off)
chassis_power_off();
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
pdc_soft_power_button(0);
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
}
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
}
void release_thread(struct task_struct *dead_task)
{
}
/*
* Fill in the FPU structure for a core dump.
*/
int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
{
if (regs == NULL)
return 0;
memcpy(r, regs->fr, sizeof *r);
return 1;
}
int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
{
memcpy(r, tsk->thread.regs.fr, sizeof(*r));
return 1;
}
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text */
extern void * const ret_from_kernel_thread;
extern void * const child_return;
#ifdef CONFIG_HPUX
extern void * const hpux_child_return;
#endif
if (unlikely(p->flags & PF_KTHREAD)) {
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
return 0;
/* kernel thread */
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
cregs->gr[27] = ((unsigned long *)usp)[3];
cregs->gr[26] = ((unsigned long *)usp)[2];
#else
cregs->gr[26] = usp;
#endif
cregs->gr[25] = arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special
* return for a kernel thread) */
if (usp) {
usp = ALIGN(usp, 4);
if (likely(usp))
cregs->gr[30] = usp;
}
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
BUG();
#endif
} else {
cregs->kpc = (unsigned long) &child_return;
}
/* Setup thread TLS area from the 4th parameter in clone */
if (clone_flags & CLONE_SETTLS)
cregs->cr27 = cregs->gr[23];
}
return 0;
}
unsigned long thread_saved_pc(struct task_struct *t)
{
return t->thread.regs.kpc;
}
unsigned long
get_wchan(struct task_struct *p)
{
struct unwind_frame_info info;
unsigned long ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* These bracket the sleeping functions..
*/
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
}
#ifdef CONFIG_64BIT
void *dereference_function_descriptor(void *ptr)
{
Elf64_Fdesc *desc = ptr;
void *p;
if (!probe_kernel_address(&desc->addr, p))
ptr = p;
return ptr;
}
#endif
static inline unsigned long brk_rnd(void)
{
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
else
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
if (ret < mm->brk)
return mm->brk;
return ret;
}

View file

@ -0,0 +1,435 @@
/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
* Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/param.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
extern int update_cr16_clocksource(void); /* from time.c */
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
** Consolidate per CPU initialization into (mostly) one module.
** Monarch CPU will initialize boot_cpu_data which shouldn't
** change once the system has booted.
**
** The callback *should* do per-instance initialization of
** everything including the monarch. "Per CPU" init code in
** setup.c:start_parisc() has migrated here and start_parisc()
** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
**
** The goal of consolidating CPU initialization into one place is
** to make sure all CPUs get initialized the same way.
** The code path not shared is how PDC hands control of the CPU to the OS.
** The initialization of OS data structures is the same (done below).
*/
/**
* init_cpu_profiler - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
static void
init_percpu_prof(unsigned long cpunum)
{
struct cpuinfo_parisc *p;
p = &per_cpu(cpu_data, cpunum);
p->prof_counter = 1;
p->prof_multiplier = 1;
}
/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
*
* Determine if processor driver should claim this chip (return 0) or not
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
static int processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
#ifdef CONFIG_SMP
if (num_online_cpus() >= nr_cpu_ids) {
printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
return 1;
}
#else
if (boot_cpu_data.cpu_count > 0) {
printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
return 1;
}
#endif
/* logical CPU ID and update global counter
* May get overwritten by PAT code.
*/
cpuid = boot_cpu_data.cpu_count;
txn_addr = dev->hpa.start; /* for legacy PDC */
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
struct pdc_pat_cpu_num cpu_info;
#endif
pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
if (!pa_pdc_cell)
panic("couldn't allocate memory for PDC_PAT_CELL!");
status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
dev->mod_index, PA_VIEW, pa_pdc_cell);
BUG_ON(PDC_OK != status);
/* verify it's the same as what do_pat_inventory() found */
BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
kfree(pa_pdc_cell);
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
* of cpuid is for physical CPUs and we just don't care yet.
* We'll care when we need to query PAT PDC about a CPU *after*
* boot time (ie shutdown a CPU from an OS perspective).
*/
/* get the cpu number */
status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
BUG_ON(PDC_OK != status);
if (cpu_info.cpu_num >= NR_CPUS) {
printk(KERN_WARNING "IGNORING CPU at 0x%x,"
" cpu_slot_id > NR_CPUS"
" (%ld > %d)\n",
dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
/* Ignore CPU since it will only crash */
boot_cpu_data.cpu_count--;
return 1;
} else {
cpuid = cpu_info.cpu_num;
}
#endif
}
#endif
p = &per_cpu(cpu_data, cpuid);
boot_cpu_data.cpu_count++;
/* initialize counters - CPU 0 gets it_value set in time_init() */
if (cpuid)
memset(p, 0, sizeof(struct cpuinfo_parisc));
p->loops_per_jiffy = loops_per_jiffy;
p->dev = dev; /* Save IODC data in case we need it */
p->hpa = dev->hpa.start; /* save CPU hpa */
p->cpuid = cpuid; /* save CPU id */
p->txn_addr = txn_addr; /* save CPU IRQ address */
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
** for boot_cpu by the above memset().
*/
init_percpu_prof(cpuid);
#endif
/*
** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
** OS control. RENDEZVOUS is the default state - see mem_set above.
** p->state = STATE_RENDEZVOUS;
*/
#if 0
/* CPU 0 IRQ table is statically allocated/initialized */
if (cpuid) {
struct irqaction actions[];
/*
** itimer and ipi IRQ handlers are statically initialized in
** arch/parisc/kernel/irq.c. ie Don't need to register them.
*/
actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
if (!actions) {
/* not getting it's own table, share with monarch */
actions = cpu_irq_actions[0];
}
cpu_irq_actions[cpuid] = actions;
}
#endif
/*
* Bring this CPU up now! (ignore bootstrap cpuid == 0)
*/
#ifdef CONFIG_SMP
if (cpuid) {
set_cpu_present(cpuid, true);
cpu_up(cpuid);
}
#endif
/* If we've registered more than one cpu,
* we'll use the jiffies clocksource since cr16
* is not synchronized between CPUs.
*/
update_cr16_clocksource();
return 0;
}
/**
* collect_boot_cpu_data - Fill the boot_cpu_data structure.
*
* This function collects and stores the generic processor information
* in the boot_cpu_data structure.
*/
void __init collect_boot_cpu_data(void)
{
memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
/* get CPU-Model Information... */
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
printk(KERN_INFO
"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
printk(KERN_INFO "vers %08lx\n",
boot_cpu_data.pdc.versions);
if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
(boot_cpu_data.pdc.cpuid >> 5) & 127,
boot_cpu_data.pdc.cpuid & 31,
boot_cpu_data.pdc.cpuid);
if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
printk(KERN_INFO "capabilities 0x%lx\n",
boot_cpu_data.pdc.capabilities);
if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
printk(KERN_INFO "model %s\n",
boot_cpu_data.pdc.sys_model_name);
boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
}
/**
* init_per_cpu - Handle individual processor initializations.
* @cpunum: logical processor number.
*
* This function handles initialization for *every* CPU
* in the system:
*
* o Set "default" CPU width for trap handlers
*
* o Enable FP coprocessor
* REVISIT: this could be done in the "code 22" trap handler.
* (frowands idea - that way we know which processes need FP
* registers saved on the interrupt stack.)
* NEWS FLASH: wide kernels need FP coprocessor enabled to handle
* formatted printing of %lx for example (double divides I think)
*
* o Enable CPU profiling hooks.
*/
int init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
set_firmware_width();
ret = pdc_coproc_cfg(&coproc_cfg);
if(ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)
** and clear the T-bit.
*/
asm volatile ("fstd %fr0,8(%sp)");
} else {
printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
#ifdef CONFIG_64BIT
"Halting Machine - FP required\n"
#endif
, coproc_cfg.ccr_functional);
#ifdef CONFIG_64BIT
mdelay(100); /* previous chars get pushed to console */
panic("FP CoProc not reported");
#endif
}
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
return ret;
}
/*
* Display CPU info for all CPUs.
*/
int
show_cpuinfo (struct seq_file *m, void *v)
{
unsigned long cpu;
for_each_online_cpu(cpu) {
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
if (0 == cpuinfo->hpa)
continue;
#endif
seq_printf(m, "processor\t: %lu\n"
"cpu family\t: PA-RISC %s\n",
cpu, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
/* cpu MHz */
seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
seq_printf(m, "capabilities\t:");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
seq_puts(m, " os32");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
seq_puts(m, " os64");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
seq_puts(m, " iopdir_fdc");
switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
case PDC_MODEL_NVA_SUPPORTED:
seq_puts(m, " nva_supported");
break;
case PDC_MODEL_NVA_SLOW:
seq_puts(m, " nva_slow");
break;
case PDC_MODEL_NVA_UNSUPPORTED:
seq_puts(m, " needs_equivalent_aliasing");
break;
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name,
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
boot_cpu_data.hversion,
boot_cpu_data.sversion );
/* print cachesize info */
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
cpuinfo->loops_per_jiffy / (500000 / HZ),
(cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
}
return 0;
}
static const struct parisc_device_id processor_tbl[] = {
{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
{ 0, }
};
static struct parisc_driver cpu_driver = {
.name = "CPU",
.id_table = processor_tbl,
.probe = processor_probe
};
/**
* processor_init - Processor initialization procedure.
*
* Register this driver.
*/
void __init processor_init(void)
{
register_parisc_driver(&cpu_driver);
}

305
arch/parisc/kernel/ptrace.c Normal file
View file

@ -0,0 +1,305 @@
/*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
* Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
* Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
* Copyright (C) 2008 Helge Deller <deller@gmx.de>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/seccomp.h>
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
/* PSW bits we allow the debugger to modify */
#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* make sure the trap bits are not set */
pa_psw(task)->r = 0;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
/*
* The following functions are called by ptrace_resume() when
* enabling or disabling single/block tracing.
*/
void user_disable_single_step(struct task_struct *task)
{
ptrace_disable(task);
}
void user_enable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
if (pa_psw(task)->n) {
struct siginfo si;
/* Nullified, just crank over the queue. */
task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
pa_psw(task)->n = 0;
pa_psw(task)->x = 0;
pa_psw(task)->y = 0;
pa_psw(task)->z = 0;
pa_psw(task)->b = 0;
ptrace_disable(task);
/* Don't wake up the task, but let the
parent know something happened. */
si.si_code = TRAP_TRACE;
si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
force_sig_info(SIGTRAP, &si, task);
/* notify_parent(task, SIGCHLD); */
return;
}
/* Enable recovery counter traps. The recovery counter
* itself will be set to zero on a task switch. If the
* task is suspended on a syscall then the syscall return
* path will overwrite the recovery counter with a suitable
* value such that it traps once back in user space. We
* disable interrupts in the tasks PSW here also, to avoid
* interrupts while the recovery counter is decrementing.
*/
pa_psw(task)->r = 1;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
void user_enable_block_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* Enable taken branch trap. */
pa_psw(task)->r = 0;
pa_psw(task)->t = 1;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
long ret = -EIO;
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR:
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, (unsigned long __user *) data);
break;
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
FIXME. There is a problem at the moment in that r3-r18 are only
saved if the process is ptraced on syscall entry, and even then
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
if (addr == PT_PSW) {
/* Allow writing to Nullify, Divide-step-correction,
* and carry/borrow bits.
* BEWARE, if you set N, and then single step, it won't
* stop on the nullified instruction.
*/
data &= USER_PSW_BITS;
task_regs(child)->gr[0] &= ~USER_PSW_BITS;
task_regs(child)->gr[0] |= data;
ret = 0;
break;
}
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
addr == PT_SAR) {
*(unsigned long *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_COMPAT
/* This function is needed to translate 32 bit pt_regs offsets in to
* 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
* will request offset 12 if it wants gr3, but the lower 32 bits of
* the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
* This code relies on a 32 bit pt_regs being comprised of 32 bit values
* except for the fp registers which (a) are 64 bits, and (b) follow
* the gr registers at the start of pt_regs. The 32 bit pt_regs should
* be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
* being 64 bit in both cases.
*/
static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
if (offset < 0)
return sizeof(struct pt_regs);
else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
return offset + 32*4;
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8;
else
return sizeof(struct pt_regs);
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data)
{
compat_uint_t tmp;
long ret = -EIO;
switch (request) {
case PTRACE_PEEKUSR:
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
break;
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
FIXME. There is a problem at the moment in that r3-r18 are only
saved if the process is ptraced on syscall entry, and even then
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
if (addr == PT_PSW) {
/* Since PT_PSW==0, it is valid for 32 bit processes
* under 64 bit kernels as well.
*/
ret = arch_ptrace(child, request, addr, data);
} else {
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
*(__u64 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
addr == PT_SAR+4) {
/* Zero the top 32 bits */
*(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
*(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
}
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#endif
long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
/* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
#ifdef CONFIG_64BIT
if (!is_compat_task())
audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
regs->gr[24], regs->gr[23]);
else
#endif
audit_syscall_entry(regs->gr[20] & 0xffffffff,
regs->gr[26] & 0xffffffff,
regs->gr[25] & 0xffffffff,
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
return ret ? : regs->gr[20];
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
int stepping = test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP);
audit_syscall_exit(regs);
if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, stepping);
}

304
arch/parisc/kernel/real2.S Normal file
View file

@ -0,0 +1,304 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
*/
#include <asm/pdc.h>
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/asm-offsets.h>
#include <linux/linkage.h>
.section .bss
.export pdc_result
.export pdc_result2
.align 8
pdc_result:
.block ASM_PDC_RESULT_SIZE
pdc_result2:
.block ASM_PDC_RESULT_SIZE
.export real_stack
.export real32_stack
.export real64_stack
.align 64
real_stack:
real32_stack:
real64_stack:
.block 8192
#ifdef CONFIG_64BIT
# define REG_SZ 8
#else
# define REG_SZ 4
#endif
#define N_SAVED_REGS 9
save_cr_space:
.block REG_SZ * N_SAVED_REGS
save_cr_end:
/************************ 32-bit real-mode calls ***********************/
/* This can be called in both narrow and wide kernels */
.text
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
* unsigned int iodc_fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
ENTRY(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
STREG %r27, -1*REG_SZ(%sp)
STREG %r29, -2*REG_SZ(%sp)
#endif
STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save iodc_fn */
copy %arg2, %r31
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldw 0(%arg1), %arg0 /* note overwriting arg0 */
ldw -8(%arg1), %arg2
ldw -12(%arg1), %arg3
ldw -4(%arg1), %arg1 /* obviously must do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
#ifdef CONFIG_64BIT
rsm PSW_SM_W, %r0 /* go narrow */
#endif
load32 PA(ric_ret), %r2
bv 0(%r31)
nop
ric_ret:
#ifdef CONFIG_64BIT
ssm PSW_SM_W, %r0 /* go wide */
#endif
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
LDREG -REG_SZ(%sp), %sp /* restore SP */
#ifdef CONFIG_64BIT
LDREG -1*REG_SZ(%sp), %r27
LDREG -2*REG_SZ(%sp), %r29
ldo -2*REG_SZ(%sp), %sp
callee_rest
#endif
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
save_control_regs:
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28)
PUSH_CR(%cr26, %r28)
PUSH_CR(%cr27, %r28)
PUSH_CR(%cr28, %r28)
PUSH_CR(%cr29, %r28)
PUSH_CR(%cr30, %r28)
PUSH_CR(%cr31, %r28)
PUSH_CR(%cr15, %r28)
bv 0(%r2)
nop
restore_control_regs:
load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26)
POP_CR(%cr30, %r26)
POP_CR(%cr29, %r26)
POP_CR(%cr28, %r26)
POP_CR(%cr27, %r26)
POP_CR(%cr26, %r26)
POP_CR(%cr25, %r26)
POP_CR(%cr24, %r26)
bv 0(%r2)
nop
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.text
.align 128
rfi_virt2real:
/* switch to real mode... */
rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 REAL_MODE_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_v2r_1:
tophys_r1 %r2
bv 0(%r2)
nop
.text
.align 128
rfi_real2virt:
rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 KERNEL_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_r2v_1:
tovirt_r1 %r2
bv 0(%r2)
nop
#ifdef CONFIG_64BIT
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
ENTRY(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save fn */
copy %arg2, %r31
/* set up the new ap */
ldo 64(%arg1), %r29
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
ldd 2*REG_SZ(%arg1), %arg2
ldd 3*REG_SZ(%arg1), %arg3
ldd 4*REG_SZ(%arg1), %r22
ldd 5*REG_SZ(%arg1), %r21
ldd 6*REG_SZ(%arg1), %r20
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
load32 PA(r64_ret), %r2
bv 0(%r31)
nop
r64_ret:
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
ldd -8(%sp), %sp /* restore SP */
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC(real64_call_asm)
#endif
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
ENTRY(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
ENDPROC(__canonicalize_funcptr_for_compare)

402
arch/parisc/kernel/setup.c Normal file
View file

@ -0,0 +1,402 @@
/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
* Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/seq_file.h>
#define PCI_DEBUG
#include <linux/pci.h>
#undef PCI_DEBUG
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */
#include <asm/pdc_chassis.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/unwind.h>
static char __initdata command_line[COMMAND_LINE_SIZE];
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
/* Collect stuff passed in from the boot loader */
/* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
if (boot_args[0] < 64) {
/* called from hpux boot loader */
boot_command_line[0] = '\0';
} else {
strlcpy(boot_command_line, (char *)__va(boot_args[1]),
COMMAND_LINE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
{
initrd_start = (unsigned long)__va(boot_args[2]);
initrd_end = (unsigned long)__va(boot_args[3]);
}
#endif
}
strcpy(command_line, boot_command_line);
*cmdline_p = command_line;
}
#ifdef CONFIG_PA11
void __init dma_ops_init(void)
{
switch (boot_cpu_data.cpu_type) {
case pcx:
/*
* We've got way too many dependencies on 1.1 semantics
* to support 1.0 boxes at this point.
*/
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
case pcxs:
case pcxt:
hppa_dma_ops = &pcx_dma_ops;
break;
case pcxl2:
pa7300lc_init();
case pcxl: /* falls through */
hppa_dma_ops = &pcxl_dma_ops;
break;
default:
break;
}
}
#endif
extern int init_per_cpu(int cpuid);
extern void collect_boot_cpu_data(void);
void __init setup_arch(char **cmdline_p)
{
#ifdef CONFIG_64BIT
extern int parisc_narrow_firmware;
#endif
unwind_init();
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
#ifdef CONFIG_64BIT
printk(KERN_INFO "The 64-bit Kernel has started...\n");
#else
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
pdc_console_init();
#ifdef CONFIG_64BIT
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
}
#endif
setup_pdc();
setup_cmdline(cmdline_p);
collect_boot_cpu_data();
do_memory_inventory(); /* probe for physical memory */
parisc_cache_init();
paging_init();
#ifdef CONFIG_CHASSIS_LCD_LED
/* initialize the LCD/LED after boot_cpu_data is available ! */
led_init(); /* LCD/LED initialization */
#endif
#ifdef CONFIG_PA11
dma_ops_init();
#endif
#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con; /* we use do_take_over_console() later ! */
#endif
}
/*
* Display CPU info for all CPUs.
* for parisc this is in processor.c
*/
extern int show_cpuinfo (struct seq_file *m, void *v);
static void *
c_start (struct seq_file *m, loff_t *pos)
{
/* Looks like the caller will call repeatedly until we return
* 0, signaling EOF perhaps. This could be used to sequence
* through CPUs for example. Since we print all cpu info in our
* show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
* we only allow for one "position". */
return ((long)*pos < 1) ? (void *)1 : NULL;
}
static void *
c_next (struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void
c_stop (struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo
};
static void __init parisc_proc_mkdir(void)
{
/*
** Can't call proc_mkdir() until after proc_root_init() has been
** called by start_kernel(). In other words, this code can't
** live in arch/.../setup.c because start_parisc() calls
** start_kernel().
*/
switch (boot_cpu_data.cpu_type) {
case pcxl:
case pcxl2:
if (NULL == proc_gsc_root)
{
proc_gsc_root = proc_mkdir("bus/gsc", NULL);
}
break;
case pcxt_:
case pcxu:
case pcxu_:
case pcxw:
case pcxw_:
case pcxw2:
if (NULL == proc_runway_root)
{
proc_runway_root = proc_mkdir("bus/runway", NULL);
}
break;
case mako:
case mako2:
if (NULL == proc_mckinley_root)
{
proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
}
break;
default:
/* FIXME: this was added to prevent the compiler
* complaining about missing pcx, pcxs and pcxt
* I'm assuming they have neither gsc nor runway */
break;
}
}
static struct resource central_bus = {
.name = "Central Bus",
.start = F_EXTEND(0xfff80000),
.end = F_EXTEND(0xfffaffff),
.flags = IORESOURCE_MEM,
};
static struct resource local_broadcast = {
.name = "Local Broadcast",
.start = F_EXTEND(0xfffb0000),
.end = F_EXTEND(0xfffdffff),
.flags = IORESOURCE_MEM,
};
static struct resource global_broadcast = {
.name = "Global Broadcast",
.start = F_EXTEND(0xfffe0000),
.end = F_EXTEND(0xffffffff),
.flags = IORESOURCE_MEM,
};
static int __init parisc_init_resources(void)
{
int result;
result = request_resource(&iomem_resource, &central_bus);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, central_bus.name);
return result;
}
result = request_resource(&iomem_resource, &local_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %saddress space!\n",
__FILE__, local_broadcast.name);
return result;
}
result = request_resource(&iomem_resource, &global_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, global_broadcast.name);
return result;
}
return 0;
}
extern void gsc_init(void);
extern void processor_init(void);
extern void ccio_init(void);
extern void hppb_init(void);
extern void dino_init(void);
extern void iosapic_init(void);
extern void lba_init(void);
extern void sba_init(void);
extern void eisa_init(void);
static int __init parisc_init(void)
{
u32 osid = (OS_ID_LINUX << 16);
parisc_proc_mkdir();
parisc_init_resources();
do_device_inventory(); /* probe for hardware */
parisc_pdc_chassis_init();
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
/* tell PDC we're Linux. Nevermind failure. */
pdc_stable_write(0x40, &osid, sizeof(osid));
processor_init();
#ifdef CONFIG_SMP
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
num_online_cpus(), num_present_cpus(),
#else
pr_info("CPU(s): 1 x %s at %d.%06d MHz\n",
#endif
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
parisc_setup_cache_timing();
/* These are in a non-obvious order, will fix when we have an iotree */
#if defined(CONFIG_IOSAPIC)
iosapic_init();
#endif
#if defined(CONFIG_IOMMU_SBA)
sba_init();
#endif
#if defined(CONFIG_PCI_LBA)
lba_init();
#endif
/* CCIO before any potential subdevices */
#if defined(CONFIG_IOMMU_CCIO)
ccio_init();
#endif
/*
* Need to register Asp & Wax before the EISA adapters for the IRQ
* regions. EISA must come before PCI to be sure it gets IRQ region
* 0.
*/
#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
gsc_init();
#endif
#ifdef CONFIG_EISA
eisa_init();
#endif
#if defined(CONFIG_HPPB)
hppb_init();
#endif
#if defined(CONFIG_GSC_DINO)
dino_init();
#endif
#ifdef CONFIG_CHASSIS_LCD_LED
register_led_regions(); /* register LED port info in procfs */
#endif
return 0;
}
arch_initcall(parisc_init);
void start_parisc(void)
{
extern void start_kernel(void);
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
cpunum = smp_processor_id();
set_firmware_width_unlocked();
ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
if (ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10);
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
asm volatile ("fstd %fr0,8(%sp)");
} else {
panic("must have an fpu to boot linux");
}
start_kernel();
// not reached
}

575
arch/parisc/kernel/signal.c Normal file
View file

@ -0,0 +1,575 @@
/*
* linux/arch/parisc/kernel/signal.c: Architecture-specific signal
* handling support.
*
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
* Copyright (C) 2000 Linuxcare, Inc.
*
* Based on the ia64, i386, and alpha versions.
*
* Like the IA-64, we are a recent enough port (we are *starting*
* with glibc2.2) that we do not need to support the old non-realtime
* Linux signals. Therefore we don't. HP/UX signals will go in
* arch/parisc/hpux/signal.c when we figure out how to do them.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_COMPAT
#include "signal32.h"
#endif
#define DEBUG_SIG 0
#define DEBUG_SIG_LEVEL 2
#if DEBUG_SIG
#define DBG(LEVEL, ...) \
((DEBUG_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
/* gcc will complain if a pointer is cast to an integer of different
* size. If you really need to do this (and we do for an ELF32 user
* application in an ELF64 kernel) then you have to do a cast to an
* integer of the same size first. The A() macro accomplishes
* this. */
#define A(__x) ((unsigned long)(__x))
/*
* Do a signal return - restore sigcontext.
*/
/* Trampoline for calling rt_sigreturn() */
#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */
#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
#define INSN_NOP 0x08000240 /* nop */
/* For debugging */
#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
long err = 0;
err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
err |= __get_user(regs->sar, &sc->sc_sar);
DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n",
regs->iaoq[0],regs->iaoq[1]);
DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
compat_sigset_t compat_set;
struct compat_rt_sigframe __user * compat_frame;
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
(usp - sigframe_size);
DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
regs->orig_r28 = 1; /* no restarts for sigreturn */
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
goto give_sigsegv;
sigset_32to64(&set,&compat_set);
} else
#endif
{
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto give_sigsegv;
}
set_current_blocked(&set);
/* Good thing we saved the old gr[30], eh? */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
&compat_frame->uc.uc_mcontext);
// FIXME: Load upper half from register file
if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &compat_frame->uc.uc_stack);
if (compat_restore_altstack(&compat_frame->uc.uc_stack))
goto give_sigsegv;
} else
#endif
{
DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
&frame->uc.uc_mcontext);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &frame->uc.uc_stack);
if (restore_altstack(&frame->uc.uc_stack))
goto give_sigsegv;
}
/* If we are on the syscall path IAOQ will not be restored, and
* if we are on the interrupt path we must not corrupt gr31.
*/
if (in_syscall)
regs->gr[31] = regs->iaoq[0];
#if DEBUG_SIG
DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
show_regs(regs);
#endif
return;
give_sigsegv:
DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
force_sig(SIGSEGV, current);
return;
}
/*
* Set up a signal frame.
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
/*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
don't use the parameter it doesn't matter */
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
/* Align alternate stack and reserve 64 bytes for the signal
handler's frame marker. */
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
return (void __user *) sp; /* Stacks grow up. Fun. */
}
static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
{
unsigned long flags = 0;
long err = 0;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
regs->gr[31], regs->gr[31]+4);
} else {
err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n",
regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
err |= __put_user(regs->sar, &sc->sc_sar);
DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
static long
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
int in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
int err = 0;
#ifdef CONFIG_64BIT
struct compat_rt_sigframe __user * compat_frame;
compat_sigset_t compat_set;
#endif
usp = (regs->gr[30] & ~(0x01UL));
/*FIXME: frame_size parameter is unused, remove it. */
frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
DBG(1,"SETUP_RT_FRAME: START\n");
DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info);
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info);
err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs, in_syscall);
sigset_64to32(&compat_set,set);
err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
} else
#endif
{
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
/* FIXME: Should probably be converted as well for the compat case */
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
}
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. The first words of tramp are used to
save the previous sigrestartblock trampoline that might be
on the stack. We start the sigreturn trampoline at
SIGRESTARTBLOCK_TRAMP+X. */
err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
err |= __put_user(INSN_LDI_R20,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
err |= __put_user(INSN_BLE_SR2_R0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
#if DEBUG_SIG
/* Assert that we're flushing in the correct space... */
{
unsigned long sid;
asm ("mfsp %%sr3,%0" : "=r" (sid));
DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
sid, frame->tramp);
}
#endif
flush_user_dcache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
flush_user_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
/* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
* TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
* So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
*/
rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
if (err)
return -EFAULT;
haddr = A(ksig->ka.sa.sa_handler);
/* The sa_handler may be a pointer to a function descriptor */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
#endif
if (haddr & PA_PLABEL_FDESC) {
Elf32_Fdesc fdesc;
Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
return -EFAULT;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
}
#ifdef CONFIG_64BIT
} else {
Elf64_Fdesc fdesc;
Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
return -EFAULT;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
haddr, regs->gr[19], in_syscall);
}
#endif
/* The syscall return path will create IAOQ values from r31.
*/
sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1;
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W;
#endif
/* If we are singlestepping, arrange a trap to be delivered
when we return to userspace. Note the semantics -- we
should trap before the first insn in the handler is
executed. Ref:
http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
*/
if (pa_psw(current)->r) {
pa_psw(current)->r = 0;
psw |= PSW_R;
mtctl(-1, 0);
}
regs->gr[0] = psw;
regs->iaoq[0] = haddr | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
}
regs->gr[2] = rp; /* userland return pointer */
regs->gr[26] = ksig->sig; /* signal number */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
} else
#endif
{
regs->gr[25] = A(&frame->info); /* siginfo pointer */
regs->gr[24] = A(&frame->uc); /* ucontext pointer */
}
DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
regs->gr[30], sigframe_size,
regs->gr[30] + sigframe_size);
/* Raise the user stack pointer to make a proper call frame. */
regs->gr[30] = (A(frame) + sigframe_size);
DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
current->comm, current->pid, frame, regs->gr[30],
regs->iaoq[0], regs->iaoq[1], rp);
return 0;
}
/*
* OK, we're invoking a handler.
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
{
int ret;
sigset_t *oldset = sigmask_to_save();
DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
ksig->sig, ksig->ka, ksig->info, oldset, regs);
/* Set up the stack frame */
ret = setup_rt_frame(ksig, oldset, regs, in_syscall);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP));
DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
regs->gr[28]);
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
if (regs->orig_r28)
return;
regs->orig_r28 = 1; /* no more restarts */
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
regs->gr[28] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
* we have to do is fiddle the return pointer.
*/
regs->gr[31] -= 8; /* delayed branching */
break;
}
}
static inline void
insert_restart_trampoline(struct pt_regs *regs)
{
if (regs->orig_r28)
return;
regs->orig_r28 = 1; /* no more restarts */
switch(regs->gr[28]) {
case -ERESTART_RESTARTBLOCK: {
/* Restart the system call - no handlers present */
unsigned int *usp = (unsigned int *)regs->gr[30];
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
* 8: ldw 0(%sp), %r31
* 12: be 0x100(%sr2, %r0)
* 16: ldi __NR_restart_syscall, %r20
*/
#ifdef CONFIG_64BIT
put_user(regs->gr[31] >> 32, &usp[0]);
put_user(regs->gr[31] & 0xffffffff, &usp[1]);
put_user(0x0fc010df, &usp[2]);
#else
put_user(regs->gr[31], &usp[0]);
put_user(0x0fc0109f, &usp[2]);
#endif
put_user(0xe0008200, &usp[3]);
put_user(0x34140000, &usp[4]);
/* Stack is 64-byte aligned, and we only need
* to flush 1 cache line.
* Flushing one cacheline is cheap.
* "sync" on bigger (> 4 way) boxes is not.
*/
flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4);
flush_user_icache_range(regs->gr[30], regs->gr[30] + 4);
regs->gr[31] = regs->gr[30] + 8;
return;
}
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR: {
/* Hooray for delayed branching. We don't
* have to restore %r20 (the system call
* number) because it gets loaded in the delay
* slot of the branch external instruction.
*/
regs->gr[31] -= 8;
return;
}
default:
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* We need to be able to restore the syscall arguments (r21-r26) to
* restart syscalls. Thus, the syscall path should save them in the
* pt_regs structure (it's okay to do so since they are caller-save
* registers). As noted below, the syscall number gets restored for
* us due to the magic of delayed branching.
*/
asmlinkage void
do_signal(struct pt_regs *regs, long in_syscall)
{
struct ksignal ksig;
DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
regs, regs->sr[7], in_syscall);
if (get_signal(&ksig)) {
DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
/* Restart a system call if necessary. */
if (in_syscall)
syscall_restart(regs, &ksig.ka);
handle_signal(&ksig, regs, in_syscall);
return;
}
/* Did we come from a system call? */
if (in_syscall)
insert_restart_trampoline(regs);
DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
regs->gr[28]);
restore_saved_sigmask();
}
void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING))
do_signal(regs, in_syscall);
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}

View file

@ -0,0 +1,377 @@
/* Signal support for 32-bit kernel builds
*
* Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org>
*
* Code was mostly borrowed from kernel/signal.c.
* See kernel/signal.c for additional Copyrights.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/unistd.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include "signal32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
#if DEBUG_COMPAT_SIG
#define DBG(LEVEL, ...) \
((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
inline void
sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
{
s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
}
inline void
sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
{
s32->sig[0] = s64->sig[0] & 0xffffffffUL;
s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
}
long
restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs)
{
long err = 0;
compat_uint_t compat_reg;
compat_uint_t compat_regt;
int regn;
/* When loading 32-bit values into 64-bit registers make
sure to clear the upper 32-bits */
DBG(2,"restore_sigcontext32: PER_LINUX32 process\n");
DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs);
DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc));
for(regn=0; regn < 32; regn++){
err |= __get_user(compat_reg,&sc->sc_gr[regn]);
regs->gr[regn] = compat_reg;
/* Load upper half */
err |= __get_user(compat_regt,&rf->rf_gr[regn]);
regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n",
regn, regs->gr[regn], compat_regt, compat_reg);
}
DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr));
/* XXX: BE WARNED FR's are 64-BIT! */
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
/* Better safe than sorry, pass __get_user two things of
the same size and let gcc do the upward conversion to
64-bits */
err |= __get_user(compat_reg, &sc->sc_iaoq[0]);
/* Load upper half */
err |= __get_user(compat_regt, &rf->rf_iaoq[0]);
regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n",
&sc->sc_iaoq[0], compat_reg);
err |= __get_user(compat_reg, &sc->sc_iaoq[1]);
/* Load upper half */
err |= __get_user(compat_regt, &rf->rf_iaoq[1]);
regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n",
&sc->sc_iaoq[1],compat_reg);
DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n",
regs->iaoq[0],regs->iaoq[1]);
err |= __get_user(compat_reg, &sc->sc_iasq[0]);
/* Load the upper half for iasq */
err |= __get_user(compat_regt, &rf->rf_iasq[0]);
regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt);
err |= __get_user(compat_reg, &sc->sc_iasq[1]);
/* Load the upper half for iasq */
err |= __get_user(compat_regt, &rf->rf_iasq[1]);
regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n",
regs->iasq[0],regs->iasq[1]);
err |= __get_user(compat_reg, &sc->sc_sar);
/* Load the upper half for sar */
err |= __get_user(compat_regt, &rf->rf_sar);
regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg;
DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt);
DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar);
DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]);
return err;
}
/*
* Set up the sigcontext structure for this process.
* This is not an easy task if the kernel is 64-bit, it will require
* that we examine the process personality to determine if we need to
* truncate for a 32-bit userspace.
*/
long
setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs, int in_syscall)
{
compat_int_t flags = 0;
long err = 0;
compat_uint_t compat_reg;
compat_uint_t compat_regb;
int regn;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
DBG(1,"setup_sigcontext32: in_syscall\n");
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* Truncate gr31 */
compat_reg = (compat_uint_t)(regs->gr[31]);
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->gr[31] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->gr[31]+4);
err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
/* Truncate sr3 */
compat_reg = (compat_uint_t)(regs->sr[3]);
err |= __put_user(compat_reg, &sc->sc_iasq[0]);
err |= __put_user(compat_reg, &sc->sc_iasq[1]);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->sr[3] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[0]);
err |= __put_user(compat_reg, &rf->rf_iasq[1]);
DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n",
regs->gr[31], regs->gr[31]+4);
} else {
compat_reg = (compat_uint_t)(regs->iaoq[0]);
err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iaoq[1]);
err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iasq[0]);
err |= __put_user(compat_reg, &sc->sc_iasq[0]);
DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n",
&sc->sc_iasq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iasq[0] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[0]);
DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
compat_reg = (compat_uint_t)(regs->iasq[1]);
err |= __put_user(compat_reg, &sc->sc_iasq[1]);
DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n",
&sc->sc_iasq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->iasq[1] >> 32);
err |= __put_user(compat_reg, &rf->rf_iasq[1]);
DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
/* Print out the IAOQ for debugging */
DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n",
regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
DBG(1,"setup_sigcontext32: Truncating general registers.\n");
for(regn=0; regn < 32; regn++){
/* Truncate a general register */
compat_reg = (compat_uint_t)(regs->gr[regn]);
err |= __put_user(compat_reg, &sc->sc_gr[regn]);
/* Store upper half */
compat_regb = (compat_uint_t)(regs->gr[regn] >> 32);
err |= __put_user(compat_regb, &rf->rf_gr[regn]);
/* DEBUG: Write out the "upper / lower" register data */
DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn,
compat_regb, compat_reg);
}
/* Copy the floating point registers (same size)
XXX: BE WARNED FR's are 64-BIT! */
DBG(1,"setup_sigcontext32: Copying from regs to sc, "
"sc->sc_fr size = %#lx, regs->fr size = %#lx\n",
sizeof(regs->fr), sizeof(sc->sc_fr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
compat_reg = (compat_uint_t)(regs->sar);
err |= __put_user(compat_reg, &sc->sc_sar);
DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->sar >> 32);
err |= __put_user(compat_reg, &rf->rf_sar);
DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg);
DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]);
return err;
}
int
copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
{
compat_uptr_t addr;
int err;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
return -EFAULT;
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
if (to->si_code < 0)
err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (to->si_code >> 16) {
case __SI_CHLD >> 16:
err |= __get_user(to->si_utime, &from->si_utime);
err |= __get_user(to->si_stime, &from->si_stime);
err |= __get_user(to->si_status, &from->si_status);
default:
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
break;
case __SI_FAULT >> 16:
err |= __get_user(addr, &from->si_addr);
to->si_addr = compat_ptr(addr);
break;
case __SI_POLL >> 16:
err |= __get_user(to->si_band, &from->si_band);
err |= __get_user(to->si_fd, &from->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
err |= __get_user(to->si_int, &from->si_int);
break;
}
}
return err;
}
int
copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
{
compat_uptr_t addr;
compat_int_t val;
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
addr = ptr_to_compat(from->si_addr);
err |= __put_user(addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
val = (compat_int_t)from->si_int;
err |= __put_user(val, &to->si_int);
break;
case __SI_RT >> 16: /* Not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
val = (compat_int_t)from->si_int;
err |= __put_user(val, &to->si_int);
break;
}
}
return err;
}

View file

@ -0,0 +1,91 @@
/*
* Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PARISC64_KERNEL_SIGNAL32_H
#define _PARISC64_KERNEL_SIGNAL32_H
#include <linux/compat.h>
/* 32-bit ucontext as seen from an 64-bit kernel */
struct compat_ucontext {
compat_uint_t uc_flags;
compat_uptr_t uc_link;
compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/
/* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
compat_uint_t pad[1];
struct compat_sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
/* ELF32 signal handling */
int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
/* In a deft move of uber-hackery, we decide to carry the top half of all
* 64-bit registers in a non-portable, non-ABI, hidden structure.
* Userspace can read the hidden structure if it *wants* but is never
* guaranteed to be in the same place. In fact the uc_sigmask from the
* ucontext_t structure may push the hidden register file downards
*/
struct compat_regfile {
/* Upper half of all the 64-bit registers that were truncated
on a copy to a 32-bit userspace */
compat_int_t rf_gr[32];
compat_int_t rf_iasq[2];
compat_int_t rf_iaoq[2];
compat_int_t rf_sar;
};
#define COMPAT_SIGRETURN_TRAMP 4
#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
COMPAT_SIGRESTARTBLOCK_TRAMP)
struct compat_rt_sigframe {
/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
Secondary to that it must protect the ERESTART_RESTARTBLOCK
trampoline we left on the stack (we were bad and didn't
change sp so we could run really fast.) */
compat_uint_t tramp[COMPAT_TRAMP_SIZE];
compat_siginfo_t info;
struct compat_ucontext uc;
/* Hidden location of truncated registers, *must* be last. */
struct compat_regfile regs;
};
/*
* The 32-bit ABI wants at least 48 bytes for a function call frame:
* 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
* which Linux/parisc uses is sp-20 for the saved return pointer...)
* Then, the stack pointer must be rounded to a cache line (64 bytes).
*/
#define SIGFRAME32 64
#define FUNCTIONCALLFRAME32 48
#define PARISC_RT_SIGFRAME_SIZE32 (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
long restore_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs);
long setup_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs, int in_syscall);
#endif

430
arch/parisc/kernel/smp.c Normal file
View file

@ -0,0 +1,430 @@
/*
** SMP Support
**
** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
**
** Lots of stuff stolen from arch/alpha/kernel/smp.c
** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
**
** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
** -grant (1/12/2001)
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#undef DEBUG_SMP
#ifdef DEBUG_SMP
static int smp_debug_lvl = 0;
#define smp_debug(lvl, printargs...) \
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
static volatile int cpu_now_booting;
static int parisc_max_cpus = 1;
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST
};
/********** SMP inter processor interrupt and communication routines */
#undef PER_CPU_IRQ_REGION
#ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
** *May* need this "hook" to register IPI handler
** once we have perCPU ExtIntr switch tables.
*/
static void
ipi_init(int cpuid)
{
#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if(cpu_online(cpuid) )
{
switch_to_idle_task(current);
}
return;
}
#endif
/*
** Yoink this CPU from the runnable list...
**
*/
static void
halt_processor(void)
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
set_cpu_online(smp_processor_id(), false);
local_irq_disable();
for (;;)
;
}
irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id)
{
int this_cpu = smp_processor_id();
struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
unsigned long ops;
unsigned long flags;
for (;;) {
spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
spin_lock_irqsave(lock, flags);
ops = p->pending_ipi;
p->pending_ipi = 0;
spin_unlock_irqrestore(lock, flags);
mb(); /* Order bit clearing and data access. */
if (!ops)
break;
while (ops) {
unsigned long which = ffz(~ops);
ops &= ~(1 << which);
switch (which) {
case IPI_NOP:
smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
break;
case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
inc_irq_stat(irq_resched_count);
scheduler_ipi();
break;
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
generic_smp_call_function_interrupt();
break;
case IPI_CPU_START:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
break;
case IPI_CPU_STOP:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
halt_processor();
break;
case IPI_CPU_TEST:
smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
break;
default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which);
return IRQ_NONE;
} /* Switch */
/* let in any pending interrupts */
local_irq_enable();
local_irq_disable();
} /* while (ops) */
}
return IRQ_HANDLED;
}
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
spin_unlock_irqrestore(lock, flags);
}
static void
send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{
int cpu;
for_each_cpu(cpu, mask)
ipi_send(cpu, op);
}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
BUG_ON(dest_cpu == NO_PROC_ID);
ipi_send(dest_cpu, op);
}
static inline void
send_IPI_allbutself(enum ipi_message_type op)
{
int i;
for_each_online_cpu(i) {
if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
inline void
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
static inline void
smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
void
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
smp_send_all_nop(void)
{
send_IPI_allbutself(IPI_NOP);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_single(cpu, IPI_CALL_FUNC);
}
/*
* Called by secondaries to update state and initialize CPU registers.
*/
static void __init
smp_cpu_init(int cpunum)
{
extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
/* Set modes and Enable floating point coprocessor */
(void) init_per_cpu(cpunum);
disable_sr_hashing();
mb();
/* Well, support 2.4 linux scheme as well. */
if (cpu_online(cpunum)) {
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
}
notify_cpu_starting(cpunum);
set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
start_cpu_itimer();
}
/*
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
void __init smp_callin(void)
{
int slave_id = cpu_now_booting;
smp_cpu_init(slave_id);
preempt_disable();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
local_irq_enable(); /* Interrupts have been off until now */
cpu_startup_entry(CPUHP_ONLINE);
/* NOTREACHED */
panic("smp_callin() AAAAaaaaahhhh....\n");
}
/*
* Bring one cpu online.
*/
int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
cpu_now_booting = cpuid;
/*
** boot strap code needs to know the task address since
** it also contains the process stack.
*/
smp_init_current_idle_task = idle ;
mb();
printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
**
** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
** is executed after receiving the rendezvous signal (an interrupt to
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/*
* OK, wait a bit for that CPU to finish staggering about.
* Slave will set a bit when it reaches smp_cpu_init().
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
goto alive ;
}
udelay(100);
barrier();
}
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
alive:
/* Remember the Slave data */
smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
cpuid, timeout * 100);
return 0;
}
void __init smp_prepare_boot_cpu(void)
{
int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
/* Setup BSP mappings */
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
set_cpu_online(bootstrap_processor, true);
set_cpu_present(bootstrap_processor, true);
}
/*
** inventory.c:do_inventory() hasn't yet been run and thus we
** don't 'discover' the additional CPUs until later.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(ipi_lock, cpu));
init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
if (!max_cpus)
printk(KERN_INFO "SMP mode deactivated.\n");
}
void smp_cpus_done(unsigned int cpu_max)
{
return;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus)
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
#ifdef CONFIG_PROC_FS
int __init
setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
#endif

View file

@ -0,0 +1,63 @@
/*
* Stack trace management functions
*
* Copyright (C) 2009 Helge Deller <deller@gmx.de>
* based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
* and parisc unwind functions by Randolph Chung <tausq@debian.org>
*
* TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
*/
#include <linux/module.h>
#include <linux/stacktrace.h>
#include <asm/unwind.h>
static void dump_trace(struct task_struct *task, struct stack_trace *trace)
{
struct unwind_frame_info info;
/* initialize unwind info */
if (task == current) {
unsigned long sp;
struct pt_regs r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, task, &r);
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
/* unwind stack and save entries in stack_trace struct */
trace->nr_entries = 0;
while (trace->nr_entries < trace->max_entries) {
if (unwind_once(&info) < 0 || info.ip == 0)
break;
if (__kernel_text_address(info.ip))
trace->entries[trace->nr_entries++] = info.ip;
}
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View file

@ -0,0 +1,391 @@
/*
* PARISC specific syscalls
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/uaccess.h>
#include <asm/elf.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/random.h>
/* we construct an artificial offset for the mapping based on the physical
* address of the kernel mapping variable */
#define GET_LAST_MMAP(filp) \
(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
#define SET_LAST_MMAP(filp, val) \
{ /* nothing */ }
static int get_offset(unsigned int last_mmap)
{
return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
}
static unsigned long shared_align_offset(unsigned int last_mmap,
unsigned long pgoff)
{
return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
}
static inline unsigned long COLOR_ALIGN(unsigned long addr,
unsigned int last_mmap, unsigned long pgoff)
{
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
unsigned long off = (SHM_COLOUR-1) &
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
return base + off;
}
/*
* Top of mmap area (just below the process stack).
*/
static unsigned long mmap_upper_limit(void)
{
unsigned long stack_base;
/* Limit stack size - see setup_arg_pages() in fs/exec.c */
stack_base = rlimit_max(RLIMIT_STACK);
if (stack_base > STACK_SIZE_MAX)
stack_base = STACK_SIZE_MAX;
return PAGE_ALIGN(STACK_TOP - stack_base);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
if (len > task_size)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp);
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
if (addr) {
if (do_color_align && last_mmap)
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
goto found_addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_legacy_base;
info.high_limit = mmap_upper_limit();
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr;
}
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
#ifdef CONFIG_64BIT
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
#endif
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp);
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
/* requesting a specific address */
if (addr) {
if (do_color_align && last_mmap)
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
goto found_addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
goto found_addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr;
}
static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
/* parisc stack always grows up - so a unlimited stack should
* not be an indicator to use the legacy memory layout.
* if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
* return 1;
*/
return sysctl_legacy_va_layout;
}
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
/*
* 8 bits of randomness in 32bit mmaps, 20 address space bits
* 28 bits of randomness in 64bit mmaps, 40 address space bits
*/
if (current->flags & PF_RANDOMIZE) {
if (is_32bit_task())
rnd = get_random_int() % (1<<8);
else
rnd = get_random_int() % (1<<28);
}
return rnd << PAGE_SHIFT;
}
static unsigned long mmap_legacy_base(void)
{
return TASK_UNMAPPED_BASE + mmap_rnd();
}
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_legacy_base = mmap_legacy_base();
mm->mmap_base = mmap_upper_limit();
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
return sys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
return sys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
}
/* Fucking broken ABI */
#ifdef CONFIG_64BIT
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return sys_truncate(path, (long)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return sys_ftruncate(fd, (long)high << 32 | low);
}
/* stubs for the benefit of the syscall_table since truncate64 and truncate
* are identical on LP64 */
asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
{
return sys_truncate(path, length);
}
asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
{
return sys_ftruncate(fd, length);
}
asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return sys_fcntl(fd, cmd, arg);
}
#else
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return sys_truncate64(path, (loff_t)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return sys_ftruncate64(fd, (loff_t)high << 32 | low);
}
#endif
asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
unsigned int high, unsigned int low)
{
return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
size_t count, unsigned int high, unsigned int low)
{
return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
size_t count)
{
return sys_readahead(fd, (loff_t)high << 32 | low, count);
}
asmlinkage long parisc_fadvise64_64(int fd,
unsigned int high_off, unsigned int low_off,
unsigned int high_len, unsigned int low_len, int advice)
{
return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
(loff_t)high_len << 32 | low_len, advice);
}
asmlinkage long parisc_sync_file_range(int fd,
u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
unsigned int flags)
{
return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
}
asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
{
return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
((u64)lenhi << 32) | lenlo);
}
asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{
return -ENOMEM;
}
asmlinkage int sys_free_hugepages(unsigned long addr)
{
return -EINVAL;
}
long parisc_personality(unsigned long personality)
{
long err;
if (personality(current->personality) == PER_LINUX32
&& personality(personality) == PER_LINUX)
personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
if (personality(err) == PER_LINUX32)
err = (err & ~PER_MASK) | PER_LINUX;
return err;
}

View file

@ -0,0 +1,33 @@
/*
* sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 2000-2001 Hewlett Packard Company
* Copyright (C) 2000 John Marvin
* Copyright (C) 2001 Matthew Wilcox
* Copyright (C) 2014 Helge Deller <deller@gmx.de>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. Based heavily on sys_ia32.c and sys_sparc32.c.
*/
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
int r22, int r21, int r20)
{
printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n",
current->comm, current->pid, r20);
return -ENOSYS;
}
asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
const char __user * pathname)
{
return sys_fanotify_mark(fanotify_fd, flags,
((__u64)mask1 << 32) | mask0,
dfd, pathname);
}

View file

@ -0,0 +1,945 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* System call entry code / Linux gateway page
* Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
* Licensed under the GNU GPL.
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
/*
How does the Linux gateway page on PA-RISC work?
------------------------------------------------
The Linux gateway page on PA-RISC is "special".
It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
terminology it's Execute, promote to PL0) in the page map. So anything
executing on this page executes with kernel level privilege (there's more to it
than that: to have this happen, you also have to use a branch with a ,gate
completer to activate the privilege promotion). The upshot is that everything
that runs on the gateway page runs at kernel privilege but with the current
user process address space (although you have access to kernel space via %sr2).
For the 0x100 syscall entry, we redo the space registers to point to the kernel
address space (preserving the user address space in %sr3), move to wide mode if
required, save the user registers and branch into the kernel syscall entry
point. For all the other functions, we execute at kernel privilege but don't
flip address spaces. The basic upshot of this is that these code snippets are
executed atomically (because the kernel can't be pre-empted) and they may
perform architecturally forbidden (to PL3) operations (like setting control
registers).
*/
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <linux/linkage.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
*/
#define KILL_INSN break 0,0
.level LEVEL
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
* dereferenced, so null pointers will still fault. We start
* the actual entry point at 0x100. We put break instructions
* at the beginning of the page to trap null indirect function
* pointers.
*/
.align PAGE_SIZE
ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
KILL_INSN
.endr
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (3)
lws_entry:
gate lws_start, %r0 /* increase privilege */
depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
/* Fill from 0xb8 to 0xe0 */
.rept 10
KILL_INSN
.endr
/* This function MUST be located at 0xe0 for glibc's threading
mechanism to work. DO NOT MOVE THIS CODE EVER! */
set_thread_pointer:
gate .+8, %r0 /* increase privilege */
depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
be 0(%sr7,%r31) /* return to user space */
mtctl %r26, %cr27 /* move arg0 to the control register */
/* Increase the chance of trapping if random jumps occur to this
address, fill from 0xf0 to 0x100 */
.rept 4
KILL_INSN
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
.align 256
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
* since we don't support wide userland processes. We could
* also save the current SM other than in r0 and restore it on
* exit from the syscall, and also use that value to know
* whether to do narrow or wide syscalls. -PB
*/
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
b,n 1f
/* The top halves of argument registers must be cleared on syscall
* entry from narrow executable.
*/
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
depdi 0, 31, 32, %r23
depdi 0, 31, 32, %r22
depdi 0, 31, 32, %r21
1:
#endif
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
xor %r1,%r30,%r30
ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
/* N.B.: It is critical that we don't set sr7 to 0 until r30
* contains a valid kernel stack pointer. It is also
* critical that we don't start using the kernel stack
* until after sr7 has been set to 0.
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
/* Save some registers for sigcontext and potential task
switch (see entry.S for the details of which ones are
saved/restored). TASK_PT_PSW is zeroed so we can see whether
a process is on a syscall or not. For an interrupt the real
PSW value is stored. This is needed for gdb and sys_ptrace. */
STREG %r0, TASK_PT_PSW(%r1)
STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
STREG %r19, TASK_PT_GR19(%r1)
LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
#ifdef CONFIG_64BIT
extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
#if 0
xor %r19,%r2,%r2 /* clear bottom bit */
depd,z %r19,1,1,%r19
std %r19,TASK_PT_PSW(%r1)
#endif
#endif
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
STREG %r21, TASK_PT_GR21(%r1)
STREG %r22, TASK_PT_GR22(%r1)
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
STREG %r27, TASK_PT_GR27(%r1) /* user dp */
STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */
STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
save_fp %r27 /* or potential task switch */
mfctl %cr11, %r27 /* i.e. SAR */
STREG %r27, TASK_PT_SAR(%r1)
loadgp
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
copy %r19,%r2 /* W bit back to r2 */
#else
/* no need to save these on stack in wide mode because the first 8
* args are passed in registers */
stw %r22, -52(%r30) /* 5th argument */
stw %r21, -56(%r30) /* 6th argument */
#endif
/* Are we being ptraced? */
mfctl %cr30, %r1
LDREG TI_FLAGS(%r1),%r1
ldi _TIF_SYSCALL_TRACE_MASK, %r19
and,COND(=) %r1, %r19, %r0
b,n .Ltracesys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
ldil L%sys_call_table, %r1
ldo R%sys_call_table(%r1), %r19
#endif
comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Lrt_sigreturn
.Lin_syscall:
ldil L%syscall_exit,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit(%r2),%r2
.Lrt_sigreturn:
comib,<> 0,%r25,.Lin_syscall
ldil L%syscall_exit_rfi,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit_rfi(%r2),%r2
/* Note! Because we are not running where we were linked, any
calls to functions external to this file must be indirect. To
be safe, we apply the opposite rule to functions within this
file, with local labels given to them to ensure correctness. */
.Lsyscall_nosys:
syscall_nosys:
ldil L%syscall_exit,%r1
be R%syscall_exit(%sr7,%r1)
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Warning! This trace code is a virtual duplicate of the code above so be
* sure to maintain both! */
.Ltracesys:
tracesys:
/* Need to save more registers so the debugger can see where we
* are. This saves only the lower 8 bits of PSW, so that the C
* bit is still clear on syscalls, and the D bit is set if this
* full register save path has been executed. We check the D
* bit on syscall_return_rfi to determine which registers to
* restore. An interrupt results in a full PSW saved with the
* C bit set, a non-straced syscall entry results in C and D clear
* in the saved PSW.
*/
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
ssm 0,%r2
STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
mfsp %sr2,%r2
STREG %r2,TASK_PT_SR2(%r1)
mfsp %sr3,%r2
STREG %r2,TASK_PT_SR3(%r1)
STREG %r2,TASK_PT_SR4(%r1)
STREG %r2,TASK_PT_SR5(%r1)
STREG %r2,TASK_PT_SR6(%r1)
STREG %r2,TASK_PT_SR7(%r1)
STREG %r2,TASK_PT_IASQ0(%r1)
STREG %r2,TASK_PT_IASQ1(%r1)
LDREG TASK_PT_GR31(%r1),%r2
STREG %r2,TASK_PT_IAOQ0(%r1)
ldo 4(%r2),%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
ldo TASK_REGS(%r1),%r2
/* reg_save %r2 */
STREG %r3,PT_GR3(%r2)
STREG %r4,PT_GR4(%r2)
STREG %r5,PT_GR5(%r2)
STREG %r6,PT_GR6(%r2)
STREG %r7,PT_GR7(%r2)
STREG %r8,PT_GR8(%r2)
STREG %r9,PT_GR9(%r2)
STREG %r10,PT_GR10(%r2)
STREG %r11,PT_GR11(%r2)
STREG %r12,PT_GR12(%r2)
STREG %r13,PT_GR13(%r2)
STREG %r14,PT_GR14(%r2)
STREG %r15,PT_GR15(%r2)
STREG %r16,PT_GR16(%r2)
STREG %r17,PT_GR17(%r2)
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
copy %r2,%r26
ldil L%do_syscall_trace_enter,%r1
ldil L%tracesys_next,%r2
be R%do_syscall_trace_enter(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
tracesys_next:
/* do_syscall_trace_enter either returned the syscallno, or -1L,
* so we skip restoring the PT_GR20 below, since we pulled it from
* task->thread.regs.gr[20] above.
*/
copy %ret0,%r20
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
stw %r22, -52(%r30) /* 5th argument */
stw %r21, -56(%r30) /* 6th argument */
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Ltrace_rt_sigreturn
.Ltrace_in_syscall:
ldil L%tracesys_exit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */
tracesys_exit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldo TASK_REGS(%r1),%r26
bl do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
ldil L%syscall_exit,%r1
be,n R%syscall_exit(%sr7,%r1)
.Ltrace_rt_sigreturn:
comib,<> 0,%r25,.Ltrace_in_syscall
ldil L%tracesys_sigexit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_sigexit(%r2),%r2
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl do_syscall_trace_exit,%r2
ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
/*********************************************************
32/64-bit Light-Weight-Syscall ABI
* - Indicates a hint for userspace inline asm
implementations.
Syscall number (caller-saves)
- %r20
* In asm clobber.
Argument registers (caller-saves)
- %r26, %r25, %r24, %r23, %r22
* In asm input.
Return registers (caller-saves)
- %r28 (return), %r21 (errno)
* In asm output.
Caller-saves registers
- %r1, %r27, %r29
- %r2 (return pointer)
- %r31 (ble link register)
* In asm clobber.
Callee-saves registers
- %r3-%r18
- %r30 (stack pointer)
* Not in asm clobber.
If userspace is 32-bit:
Callee-saves registers
- %r19 (32-bit PIC register)
Differences from 32-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r19.
If userspace is 64-bit:
Callee-saves registers
- %r27 (64-bit PIC register)
Differences from 64-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r27.
Error codes returned by entry path:
ENOSYS - r20 was an invalid LWS number.
*********************************************************/
lws_start:
#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
* turn this on unconditionally.
*/
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
/* Clip LWS number to a 32-bit value always */
depdi 0, 31, 32, %r20
#endif
/* Is the lws entry number valid? */
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
mfsp %sr7,%r1 /* get userspace into sr3 */
mtsp %r1,%sr3
mtsp %r0,%sr2 /* get kernel space into sr2 */
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
/* Jump to lws, lws table pointers already relocated */
be,n 0(%sr2,%r21)
lws_exit_nosys:
ldo -ENOSYS(%r0),%r21 /* set errno */
/* Fall through: Return to userspace */
lws_exit:
#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
* of sp. Extract it and reset W if it is zero */
extrd,u,*<> %r30,63,1,%r1
rsm PSW_SM_W, %r0
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
be,n 0(%sr7, %r31)
/***************************************************
Implementing 32bit CAS as an atomic operation:
%r26 - Address to examine
%r25 - Old value to check (old)
%r24 - New value to set (new)
%r28 - Return prev through this register.
%r21 - Kernel error code
If debugging is DISabled:
%r21 has the following meanings:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
If debugging is enabled:
EDEADLOCK - CAS called recursively.
EAGAIN && r28 == 1 - CAS is busy. Lock contended.
EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
EFAULT - Read or write failed.
Scratch: r20, r28, r1
****************************************************/
/* Do not enable LWS debugging */
#define ENABLE_LWS_DEBUG 0
/* ELF64 Process entry path */
lws_compare_and_swap64:
#ifdef CONFIG_64BIT
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
* have 64-bit input registers, and calling
* the 64-bit LWS CAS returns ENOSYS.
*/
b,n lws_exit_nosys
#endif
/* ELF32 Process entry path */
lws_compare_and_swap32:
#ifdef CONFIG_64BIT
/* Clip all the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
lws_compare_and_swap:
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
/* Extract four bits from r26 and hash lock (Bits 4-7) */
extru %r26, 27, 4, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
# if ENABLE_LWS_DEBUG
/*
DEBUG, check for deadlock!
If the thread register values are the same
then we were the one that locked it last and
this is a recurisve call that will deadlock.
We *must* giveup this call and fail.
*/
ldw 4(%sr2,%r20), %r28 /* Load thread register */
/* WARNING: If cr27 cycles to the same value we have problems */
mfctl %cr27, %r21 /* Get current thread register */
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
b lws_exit /* Return error! */
ldo -EDEADLOCK(%r0), %r21
cas_lock:
cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
ldo 1(%r0), %r28 /* 1st case */
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
/*
prev = *addr;
if ( prev == old )
*addr = new;
return prev;
*/
/* NOTES:
This all works becuse intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
thus it is wholly atomic from usrspaces
perspective
*/
cas_action:
#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
/* DEBUG */
mfctl %cr27, %r1
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw,ma 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%sr3,%r26)
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
#endif
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
3:
/* Error occurred on load or store */
/* Free lock */
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
nop
nop
nop
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
/***************************************************
New CAS implementation which uses pointers and variable size
information. The value pointed by old and new MUST NOT change
while performing CAS. The lock only protect the value at %r26.
%r26 - Address to examine
%r25 - Pointer to the value to check (old)
%r24 - Pointer to the value to set (new)
%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
%r28 - Return non-zero on failure
%r21 - Kernel error code
%r21 has the following meanings:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
****************************************************/
/* ELF32 Process entry path */
lws_compare_and_swap_2:
#ifdef CONFIG_64BIT
/* Clip the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
depdi 0, 31, 32, %r23
#endif
/* Check the validity of the size pointer */
subi,>>= 4, %r23, %r0
b,n lws_exit_nosys
/* Jump to the functions which will load the old and new values into
registers depending on the their size */
shlw %r23, 2, %r29
blr %r29, %r0
nop
/* 8bit load */
4: ldb 0(%sr3,%r25), %r25
b cas2_lock_start
5: ldb 0(%sr3,%r24), %r24
nop
nop
nop
nop
nop
/* 16bit load */
6: ldh 0(%sr3,%r25), %r25
b cas2_lock_start
7: ldh 0(%sr3,%r24), %r24
nop
nop
nop
nop
nop
/* 32bit load */
8: ldw 0(%sr3,%r25), %r25
b cas2_lock_start
9: ldw 0(%sr3,%r24), %r24
nop
nop
nop
nop
nop
/* 64bit load */
#ifdef CONFIG_64BIT
10: ldd 0(%sr3,%r25), %r25
11: ldd 0(%sr3,%r24), %r24
#else
/* Load new value into r22/r23 - high/low */
10: ldw 0(%sr3,%r25), %r22
11: ldw 4(%sr3,%r25), %r23
/* Load new value into fr4 for atomic store later */
12: flddx 0(%sr3,%r24), %fr4
#endif
cas2_lock_start:
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
/* Extract four bits from r26 and hash lock (Bits 4-7) */
extru %r26, 27, 4, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
cas2_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
/*
prev = *addr;
if ( prev == old )
*addr = new;
return prev;
*/
/* NOTES:
This all works becuse intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
thus it is wholly atomic from usrspaces
perspective
*/
cas2_action:
/* Jump to the correct function */
blr %r29, %r0
/* Set %r28 as non-zero for now */
ldo 1(%r0),%r28
/* 8bit CAS */
13: ldb,ma 0(%sr3,%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
14: stb,ma %r24, 0(%sr3,%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
15: ldh,ma 0(%sr3,%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
16: sth,ma %r24, 0(%sr3,%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
17: ldw,ma 0(%sr3,%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
18: stw,ma %r24, 0(%sr3,%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd,ma 0(%sr3,%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
20: std,ma %r24, 0(%sr3,%r26)
copy %r0, %r28
#else
/* Compare first word */
19: ldw,ma 0(%sr3,%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
20: ldw,ma 4(%sr3,%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
21: fstdx %fr4, 0(%sr3,%r26)
copy %r0, %r28
#endif
cas2_end:
/* Free lock */
stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
22:
/* Error occurred on load or store */
/* Free lock */
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
nop
nop
/* Exception table entries, for the load and store, return EFAULT.
Each of the entries must be relocated. */
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
#ifndef CONFIG_64BIT
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
#endif
/* Make sure nothing else is placed on this page */
.align PAGE_SIZE
END(linux_gateway_page)
ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
.section .rodata,"a"
.align 8
/* Light-weight-syscall table */
/* Start of lws table. */
ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
END(lws_table)
/* End of lws table */
.align 8
ENTRY(sys_call_table)
#include "syscall_table.S"
END(sys_call_table)
#ifdef CONFIG_64BIT
.align 8
ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
END(sys_call_table64)
#endif
/*
All light-weight-syscall atomic operations
will use this set of locks
NOTE: The lws_lock_start symbol must be
at least 16-byte aligned for safe use
with ldcw.
*/
.section .data
.align L1_CACHE_BYTES
ENTRY(lws_lock_start)
/* lws locks */
.rept 16
/* Keep locks aligned at 16-bytes */
.word 1
.word 0
.word 0
.word 0
.endr
END(lws_lock_start)
.previous
.end

View file

@ -0,0 +1,447 @@
/* System Call Table
*
* Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
* the compatibility layer has a useful 32-bit implementation.
*/
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys32_##_name_
#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
#define ENTRY_OURS(_name_) .dword parisc_##_name_
#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys_##_name_
#define ENTRY_UHOH(_name_) .dword sys_##_name_
#define ENTRY_OURS(_name_) .dword sys_##_name_
#define ENTRY_COMP(_name_) .dword sys_##_name_
#else
#define ENTRY_SAME(_name_) .word sys_##_name_
#define ENTRY_DIFF(_name_) .word sys_##_name_
#define ENTRY_UHOH(_name_) .word sys_##_name_
#define ENTRY_OURS(_name_) .word parisc_##_name_
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
ENTRY_SAME(restart_syscall) /* 0 */
ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
ENTRY_COMP(open) /* 5 */
ENTRY_SAME(close)
ENTRY_SAME(waitpid)
ENTRY_SAME(creat)
ENTRY_SAME(link)
ENTRY_SAME(unlink) /* 10 */
ENTRY_COMP(execve)
ENTRY_SAME(chdir)
/* See comments in kernel/time.c!!! Maybe we don't need this? */
ENTRY_COMP(time)
ENTRY_SAME(mknod)
ENTRY_SAME(chmod) /* 15 */
ENTRY_SAME(lchown)
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
ENTRY_COMP(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
/* concerned about struct sockaddr in wide/narrow */
/* ---> I think sockaddr is OK unless the compiler packs the struct */
/* differently to align the char array */
ENTRY_SAME(bind)
ENTRY_SAME(setuid)
ENTRY_SAME(getuid)
ENTRY_COMP(stime) /* 25 */
ENTRY_COMP(ptrace)
ENTRY_SAME(alarm)
/* see stat comment */
ENTRY_COMP(newfstat)
ENTRY_SAME(pause)
/* struct utimbuf uses time_t which might vary */
ENTRY_COMP(utime) /* 30 */
/* struct sockaddr... */
ENTRY_SAME(connect)
ENTRY_SAME(listen)
ENTRY_SAME(access)
ENTRY_SAME(nice)
/* struct sockaddr... */
ENTRY_SAME(accept) /* 35 */
ENTRY_SAME(sync)
ENTRY_SAME(kill)
ENTRY_SAME(rename)
ENTRY_SAME(mkdir)
ENTRY_SAME(rmdir) /* 40 */
ENTRY_SAME(dup)
ENTRY_SAME(pipe)
ENTRY_COMP(times)
/* struct sockaddr... */
ENTRY_SAME(getsockname)
/* it seems possible brk() could return a >4G pointer... */
ENTRY_SAME(brk) /* 45 */
ENTRY_SAME(setgid)
ENTRY_SAME(getgid)
ENTRY_SAME(signal)
ENTRY_SAME(geteuid)
ENTRY_SAME(getegid) /* 50 */
ENTRY_SAME(acct)
ENTRY_SAME(umount)
/* struct sockaddr... */
ENTRY_SAME(getpeername)
ENTRY_COMP(ioctl)
ENTRY_COMP(fcntl) /* 55 */
ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid)
ENTRY_SAME(send)
ENTRY_SAME(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
ENTRY_COMP(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */
ENTRY_SAME(setsid)
ENTRY_SAME(pivot_root)
/* I don't like this */
ENTRY_UHOH(sgetmask)
ENTRY_UHOH(ssetmask)
ENTRY_SAME(setreuid) /* 70 */
ENTRY_SAME(setregid)
ENTRY_SAME(mincore)
ENTRY_COMP(sigpending)
ENTRY_SAME(sethostname)
/* Following 3 have linux-common-code structs containing longs -( */
ENTRY_COMP(setrlimit) /* 75 */
ENTRY_COMP(getrlimit)
ENTRY_COMP(getrusage)
/* struct timeval and timezone are maybe?? consistent wide and narrow */
ENTRY_COMP(gettimeofday)
ENTRY_COMP(settimeofday)
ENTRY_SAME(getgroups) /* 80 */
ENTRY_SAME(setgroups)
/* struct socketaddr... */
ENTRY_SAME(sendto)
ENTRY_SAME(symlink)
/* see stat comment */
ENTRY_COMP(newlstat)
ENTRY_SAME(readlink) /* 85 */
ENTRY_SAME(ni_syscall) /* was uselib */
ENTRY_SAME(swapon)
ENTRY_SAME(reboot)
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
ENTRY_COMP(truncate)
ENTRY_COMP(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
ENTRY_SAME(setpriority)
ENTRY_SAME(recv)
ENTRY_COMP(statfs)
ENTRY_COMP(fstatfs) /* 100 */
ENTRY_SAME(stat64)
ENTRY_SAME(ni_syscall) /* was socketcall */
ENTRY_SAME(syslog)
/* even though manpage says struct timeval contains longs, ours has
* time_t and suseconds_t -- both of which are safe wide/narrow */
ENTRY_COMP(setitimer)
ENTRY_COMP(getitimer) /* 105 */
ENTRY_SAME(capget)
ENTRY_SAME(capset)
ENTRY_OURS(pread64)
ENTRY_OURS(pwrite64)
ENTRY_SAME(getcwd) /* 110 */
ENTRY_SAME(vhangup)
ENTRY_SAME(fstat64)
ENTRY_SAME(vfork_wrapper)
/* struct rusage contains longs... */
ENTRY_COMP(wait4)
ENTRY_SAME(swapoff) /* 115 */
ENTRY_COMP(sysinfo)
ENTRY_SAME(shutdown)
ENTRY_SAME(fsync)
ENTRY_SAME(madvise)
ENTRY_SAME(clone_wrapper) /* 120 */
ENTRY_SAME(setdomainname)
ENTRY_COMP(sendfile)
/* struct sockaddr... */
ENTRY_SAME(recvfrom)
/* struct timex contains longs */
ENTRY_COMP(adjtimex)
ENTRY_SAME(mprotect) /* 125 */
/* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
ENTRY_COMP(sigprocmask)
ENTRY_SAME(ni_syscall) /* create_module */
ENTRY_SAME(init_module)
ENTRY_SAME(delete_module)
ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
/* time_t inside struct dqblk */
ENTRY_SAME(quotactl)
ENTRY_SAME(getpgid)
ENTRY_SAME(fchdir)
ENTRY_SAME(bdflush)
ENTRY_SAME(sysfs) /* 135 */
ENTRY_OURS(personality)
ENTRY_SAME(ni_syscall) /* for afs_syscall */
ENTRY_SAME(setfsuid)
ENTRY_SAME(setfsgid)
/* I think this might work */
ENTRY_SAME(llseek) /* 140 */
ENTRY_COMP(getdents)
/* it is POSSIBLE that select will be OK because even though fd_set
* contains longs, the macros and sizes are clever. */
ENTRY_COMP(select)
ENTRY_SAME(flock)
ENTRY_SAME(msync)
/* struct iovec contains pointers */
ENTRY_COMP(readv) /* 145 */
ENTRY_COMP(writev)
ENTRY_SAME(getsid)
ENTRY_SAME(fdatasync)
/* struct __sysctl_args is a mess */
ENTRY_COMP(sysctl)
ENTRY_SAME(mlock) /* 150 */
ENTRY_SAME(munlock)
ENTRY_SAME(mlockall)
ENTRY_SAME(munlockall)
/* struct sched_param is ok for now */
ENTRY_SAME(sched_setparam)
ENTRY_SAME(sched_getparam) /* 155 */
ENTRY_SAME(sched_setscheduler)
ENTRY_SAME(sched_getscheduler)
ENTRY_SAME(sched_yield)
ENTRY_SAME(sched_get_priority_max)
ENTRY_SAME(sched_get_priority_min) /* 160 */
ENTRY_COMP(sched_rr_get_interval)
ENTRY_COMP(nanosleep)
ENTRY_SAME(mremap)
ENTRY_SAME(setresuid)
ENTRY_SAME(getresuid) /* 165 */
ENTRY_COMP(sigaltstack)
ENTRY_SAME(ni_syscall) /* query_module */
ENTRY_SAME(poll)
/* structs contain pointers and an in_addr... */
ENTRY_SAME(ni_syscall) /* was nfsservctl */
ENTRY_SAME(setresgid) /* 170 */
ENTRY_SAME(getresgid)
ENTRY_SAME(prctl)
/* signals need a careful review */
ENTRY_SAME(rt_sigreturn_wrapper)
ENTRY_COMP(rt_sigaction)
ENTRY_COMP(rt_sigprocmask) /* 175 */
ENTRY_COMP(rt_sigpending)
ENTRY_COMP(rt_sigtimedwait)
/* even though the struct siginfo_t is different, it appears like
* all the paths use values which should be same wide and narrow.
* Also the struct is padded to 128 bytes which means we don't have
* to worry about faulting trying to copy in a larger 64-bit
* struct from a 32-bit user-space app.
*/
ENTRY_COMP(rt_sigqueueinfo)
ENTRY_COMP(rt_sigsuspend)
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
ENTRY_COMP(setsockopt)
ENTRY_COMP(getsockopt)
ENTRY_COMP(sendmsg)
ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
ENTRY_SAME(semget)
ENTRY_COMP(semctl)
ENTRY_COMP(msgsnd)
ENTRY_COMP(msgrcv)
ENTRY_SAME(msgget) /* 190 */
ENTRY_COMP(msgctl)
ENTRY_COMP(shmat)
ENTRY_SAME(shmdt)
ENTRY_SAME(shmget)
ENTRY_COMP(shmctl) /* 195 */
ENTRY_SAME(ni_syscall) /* streams1 */
ENTRY_SAME(ni_syscall) /* streams2 */
ENTRY_SAME(lstat64)
ENTRY_OURS(truncate64)
ENTRY_OURS(ftruncate64) /* 200 */
ENTRY_SAME(getdents64)
ENTRY_COMP(fcntl64)
ENTRY_SAME(ni_syscall) /* attrctl -- dead */
ENTRY_SAME(ni_syscall) /* acl_get -- dead */
ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
ENTRY_SAME(gettid)
ENTRY_OURS(readahead)
ENTRY_SAME(tkill)
ENTRY_COMP(sendfile64)
ENTRY_COMP(futex) /* 210 */
ENTRY_COMP(sched_setaffinity)
ENTRY_COMP(sched_getaffinity)
ENTRY_SAME(ni_syscall) /* set_thread_area */
ENTRY_SAME(ni_syscall) /* get_thread_area */
ENTRY_COMP(io_setup) /* 215 */
ENTRY_SAME(io_destroy)
ENTRY_COMP(io_getevents)
ENTRY_COMP(io_submit)
ENTRY_SAME(io_cancel)
ENTRY_SAME(alloc_hugepages) /* 220 */
ENTRY_SAME(free_hugepages)
ENTRY_SAME(exit_group)
ENTRY_COMP(lookup_dcookie)
ENTRY_SAME(epoll_create)
ENTRY_SAME(epoll_ctl) /* 225 */
ENTRY_SAME(epoll_wait)
ENTRY_SAME(remap_file_pages)
ENTRY_COMP(semtimedop)
ENTRY_COMP(mq_open)
ENTRY_SAME(mq_unlink) /* 230 */
ENTRY_COMP(mq_timedsend)
ENTRY_COMP(mq_timedreceive)
ENTRY_COMP(mq_notify)
ENTRY_COMP(mq_getsetattr)
ENTRY_COMP(waitid) /* 235 */
ENTRY_OURS(fadvise64_64)
ENTRY_SAME(set_tid_address)
ENTRY_SAME(setxattr)
ENTRY_SAME(lsetxattr)
ENTRY_SAME(fsetxattr) /* 240 */
ENTRY_SAME(getxattr)
ENTRY_SAME(lgetxattr)
ENTRY_SAME(fgetxattr)
ENTRY_SAME(listxattr)
ENTRY_SAME(llistxattr) /* 245 */
ENTRY_SAME(flistxattr)
ENTRY_SAME(removexattr)
ENTRY_SAME(lremovexattr)
ENTRY_SAME(fremovexattr)
ENTRY_COMP(timer_create) /* 250 */
ENTRY_COMP(timer_settime)
ENTRY_COMP(timer_gettime)
ENTRY_SAME(timer_getoverrun)
ENTRY_SAME(timer_delete)
ENTRY_COMP(clock_settime) /* 255 */
ENTRY_COMP(clock_gettime)
ENTRY_COMP(clock_getres)
ENTRY_COMP(clock_nanosleep)
ENTRY_SAME(tgkill)
ENTRY_COMP(mbind) /* 260 */
ENTRY_COMP(get_mempolicy)
ENTRY_COMP(set_mempolicy)
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
ENTRY_SAME(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)
ENTRY_SAME(inotify_add_watch) /* 270 */
ENTRY_SAME(inotify_rm_watch)
ENTRY_SAME(migrate_pages)
ENTRY_COMP(pselect6)
ENTRY_COMP(ppoll)
ENTRY_COMP(openat) /* 275 */
ENTRY_SAME(mkdirat)
ENTRY_SAME(mknodat)
ENTRY_SAME(fchownat)
ENTRY_COMP(futimesat)
ENTRY_SAME(fstatat64) /* 280 */
ENTRY_SAME(unlinkat)
ENTRY_SAME(renameat)
ENTRY_SAME(linkat)
ENTRY_SAME(symlinkat)
ENTRY_SAME(readlinkat) /* 285 */
ENTRY_SAME(fchmodat)
ENTRY_SAME(faccessat)
ENTRY_SAME(unshare)
ENTRY_COMP(set_robust_list)
ENTRY_COMP(get_robust_list) /* 290 */
ENTRY_SAME(splice)
ENTRY_OURS(sync_file_range)
ENTRY_SAME(tee)
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
ENTRY_COMP(utimensat)
ENTRY_COMP(signalfd)
ENTRY_SAME(ni_syscall) /* was timerfd */
ENTRY_SAME(eventfd)
ENTRY_OURS(fallocate) /* 305 */
ENTRY_SAME(timerfd_create)
ENTRY_COMP(timerfd_settime)
ENTRY_COMP(timerfd_gettime)
ENTRY_COMP(signalfd4)
ENTRY_SAME(eventfd2) /* 310 */
ENTRY_SAME(epoll_create1)
ENTRY_SAME(dup3)
ENTRY_SAME(pipe2)
ENTRY_SAME(inotify_init1)
ENTRY_COMP(preadv) /* 315 */
ENTRY_COMP(pwritev)
ENTRY_COMP(rt_tgsigqueueinfo)
ENTRY_SAME(perf_event_open)
ENTRY_COMP(recvmmsg)
ENTRY_SAME(accept4) /* 320 */
ENTRY_SAME(prlimit64)
ENTRY_SAME(fanotify_init)
ENTRY_DIFF(fanotify_mark)
ENTRY_COMP(clock_adjtime)
ENTRY_SAME(name_to_handle_at) /* 325 */
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
ENTRY_SAME(setns)
ENTRY_COMP(sendmmsg)
ENTRY_COMP(process_vm_readv) /* 330 */
ENTRY_COMP(process_vm_writev)
ENTRY_SAME(kcmp)
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
ENTRY_COMP(utimes)
ENTRY_SAME(renameat2)
ENTRY_SAME(seccomp)
ENTRY_SAME(getrandom)
ENTRY_SAME(memfd_create) /* 340 */
ENTRY_SAME(bpf)
/* Nothing yet */
#undef ENTRY_SAME
#undef ENTRY_DIFF
#undef ENTRY_UHOH
#undef ENTRY_COMP
#undef ENTRY_OURS

273
arch/parisc/kernel/time.c Normal file
View file

@ -0,0 +1,273 @@
/*
* linux/arch/parisc/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
* Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
*
* 1994-07-02 Alan Modra
* fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
* 1998-12-20 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/param.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <linux/timex.h>
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both
* accessed through CR16. The read-only register is 32 or 64 bits wide,
* and increments by 1 every CPU clock tick. The architecture only
* guarantees us a rate between 0.5 and 2, but all implementations use a
* rate of 1. The write-only register is 32-bits wide. When the lowest
* 32 bits of the read-only register compare equal to the write-only
* register, it raises a maskable external interrupt. Each processor has
* an Interval Timer of its own and they are not synchronised.
*
* We want to generate an interrupt every 1/HZ seconds. So we program
* CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
* is programmed with the intended time of the next tick. We can be
* held off for an arbitrarily long period of time by interrupts being
* disabled, so we may miss one or more ticks.
*/
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
unsigned long now, now2;
unsigned long next_tick;
unsigned long cycles_elapsed, ticks_elapsed = 1;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
profile_tick(CPU_PROFILING);
/* Initialize next_tick to the expected tick time. */
next_tick = cpuinfo->it_value;
/* Get current cycle counter (Control Register 16). */
now = mfctl(16);
cycles_elapsed = now - next_tick;
if ((cycles_elapsed >> 6) < cpt) {
/* use "cheap" math (add/subtract) instead
* of the more expensive div/mul method
*/
cycles_remainder = cycles_elapsed;
while (cycles_remainder > cpt) {
cycles_remainder -= cpt;
ticks_elapsed++;
}
} else {
/* TODO: Reduce this to one fdiv op */
cycles_remainder = cycles_elapsed % cpt;
ticks_elapsed += cycles_elapsed / cpt;
}
/* convert from "division remainder" to "remainder of clock tick" */
cycles_remainder = cpt - cycles_remainder;
/* Determine when (in CR16 cycles) next IT interrupt will fire.
* We want IT to fire modulo clocktick even if we miss/skip some.
* But those interrupts don't in fact get delivered that regularly.
*/
next_tick = now + cycles_remainder;
cpuinfo->it_value = next_tick;
/* Program the IT when to deliver the next interrupt.
* Only bottom 32-bits of next_tick are writable in CR16!
*/
mtctl(next_tick, 16);
/* Skip one clocktick on purpose if we missed next_tick.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
* later on a 1Ghz processor. We'll account for the missed
* tick on the next timer interrupt.
*
* "next_tick - now" will always give the difference regardless
* if one or the other wrapped. If "now" is "bigger" we'll end up
* with a very large unsigned number.
*/
now2 = mfctl(16);
if (next_tick - now2 > cpt)
mtctl(next_tick+cpt, 16);
#if 1
/*
* GGG: DEBUG code for how many cycles programming CR16 used.
*/
if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
" cyc %lX rem %lX "
" next/now %lX/%lX\n",
cpu, now2 - now, cycles_elapsed, cycles_remainder,
next_tick, now );
#endif
/* Can we differentiate between "early CR16" (aka Scenario 1) and
* "long delay" (aka Scenario 3)? I don't think so.
*
* Timer_interrupt will be delivered at least a few hundred cycles
* after the IT fires. But it's arbitrary how much time passes
* before we call it "late". I've picked one second.
*
* It's important NO printk's are between reading CR16 and
* setting up the next value. May introduce huge variance.
*/
if (unlikely(ticks_elapsed > HZ)) {
/* Scenario 3: very long delay? bad in any case */
printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
" cycles %lX rem %lX "
" next/now %lX/%lX\n",
cpu,
cycles_elapsed, cycles_remainder,
next_tick, now );
}
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping.
*/
if (!--cpuinfo->prof_counter) {
cpuinfo->prof_counter = cpuinfo->prof_multiplier;
update_process_times(user_mode(get_irq_regs()));
}
if (cpu == 0)
xtime_update(ticks_elapsed);
return IRQ_HANDLED;
}
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (regs->gr[0] & PSW_N)
pc -= 4;
#ifdef CONFIG_SMP
if (in_lock_functions(pc))
pc = regs->gr[2];
#endif
return pc;
}
EXPORT_SYMBOL(profile_pc);
/* clock source code */
static cycle_t read_cr16(struct clocksource *cs)
{
return get_cycles();
}
static struct clocksource clocksource_cr16 = {
.name = "cr16",
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#ifdef CONFIG_SMP
int update_cr16_clocksource(void)
{
/* since the cr16 cycle counters are not synchronized across CPUs,
we'll check if we should switch to a safe clocksource: */
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
clocksource_change_rating(&clocksource_cr16, 0);
return 1;
}
return 0;
}
#else
int update_cr16_clocksource(void)
{
return 0; /* no change */
}
#endif /*CONFIG_SMP*/
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
}
static struct platform_device rtc_generic_dev = {
.name = "rtc-generic",
.id = -1,
};
static int __init rtc_init(void)
{
if (platform_device_register(&rtc_generic_dev) < 0)
printk(KERN_ERR "unable to register rtc device...\n");
/* not necessarily an error */
return 0;
}
module_init(rtc_init);
void read_persistent_clock(struct timespec *ts)
{
static struct pdc_tod tod_data;
if (pdc_tod_read(&tod_data) == 0) {
ts->tv_sec = tod_data.tod_sec;
ts->tv_nsec = tod_data.tod_usec * 1000;
} else {
printk(KERN_ERR "Error reading tod clock\n");
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
void __init time_init(void)
{
unsigned long current_cr16_khz;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
}

View file

@ -0,0 +1,37 @@
/*
* arch/parisc/kernel/topology.c - Populate sysfs with topology information
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/cache.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int num;
for_each_present_cpu(num) {
register_cpu(&per_cpu(cpu_devices, num), num);
}
return 0;
}
subsys_initcall(topology_init);

870
arch/parisc/kernel/traps.c Normal file
View file

@ -0,0 +1,870 @@
/*
* linux/arch/parisc/traps.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <asm/assembly.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs);
static int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
while (mask != 0) {
*buf++ = (mask & x ? '1' : '0');
mask >>= 1;
}
*buf = '\0';
return nbits;
}
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FFMT "%016llx" /* fpregs are 64-bit always */
#define PRINTREGS(lvl,r,f,fmt,x) \
printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
(r)[(x)+2], (r)[(x)+3])
static void print_gr(char *level, struct pt_regs *regs)
{
int i;
char buf[64];
printk("%s\n", level);
printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
printbinary(buf, regs->gr[0], 32);
printk("%sPSW: %s %s\n", level, buf, print_tainted());
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->gr, "r", RFMT, i);
}
static void print_fr(char *level, struct pt_regs *regs)
{
int i;
char buf[64];
struct { u32 sw[2]; } s;
/* FR are 64bit everywhere. Need to use asm to get the content
* of fpsr/fper1, and we assume that we won't have a FP Identify
* in our way, otherwise we're screwed.
* The fldd is used to restore the T-bit if there was one, as the
* store clears it anyway.
* PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
asm volatile ("fstd %%fr0,0(%1) \n\t"
"fldd 0(%1),%%fr0 \n\t"
: "=m" (s) : "r" (&s) : "r0");
printk("%s\n", level);
printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
printbinary(buf, s.sw[0], 32);
printk("%sFPSR: %s\n", level, buf);
printk("%sFPER1: %08x\n", level, s.sw[1]);
/* here we'll print fr0 again, tho it'll be meaningless */
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->fr, "fr", FFMT, i);
}
void show_regs(struct pt_regs *regs)
{
int i, user;
char *level;
unsigned long cr30, cr31;
user = user_mode(regs);
level = user ? KERN_DEBUG : KERN_CRIT;
show_regs_print_info(level);
print_gr(level, regs);
for (i = 0; i < 8; i += 4)
PRINTREGS(level, regs->sr, "sr", RFMT, i);
if (user)
print_fr(level, regs);
cr30 = mfctl(30);
cr31 = mfctl(31);
printk("%s\n", level);
printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
level, regs->iir, regs->isr, regs->ior);
printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
level, current_thread_info()->cpu, cr30, cr31);
printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
if (user) {
printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
} else {
printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
parisc_show_stack(current, NULL, regs);
}
}
static DEFINE_RATELIMIT_STATE(_hppa_rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
printk(fmt, ##__VA_ARGS__); \
show_regs(regs); \
} \
}
static void do_show_stack(struct unwind_frame_info *info)
{
int i = 1;
printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
printk(KERN_CRIT " [<" RFMT ">] %pS\n",
info->ip, (void *) info->ip);
i++;
}
}
printk(KERN_CRIT "\n");
}
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs)
{
struct unwind_frame_info info;
struct task_struct *t;
t = task ? task : current;
if (regs) {
unwind_frame_init(&info, t, regs);
goto show_stack;
}
if (t == current) {
unsigned long sp;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
{
struct pt_regs r;
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
}
} else {
unwind_frame_init_from_blocked_task(&info, t);
}
show_stack:
do_show_stack(&info);
}
void show_stack(struct task_struct *t, unsigned long *sp)
{
return parisc_show_stack(t, sp, NULL);
}
int is_valid_bugaddr(unsigned long iaoq)
{
return 1;
}
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs)) {
if (err == 0)
return; /* STFU */
parisc_printk_ratelimited(1, regs,
KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
return;
}
oops_in_progress = 1;
oops_enter();
/* Amuse the user in a SPARC fashion */
if (err) printk(KERN_CRIT
" _______________________________ \n"
" < Your System ate a SPARC! Gah! >\n"
" ------------------------------- \n"
" \\ ^__^\n"
" (__)\\ )\\/\\\n"
" U ||----w |\n"
" || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* maybe the kernel hasn't booted very far yet and hasn't been able
* to initialize the serial or STI console. In that case we should
* re-enable the pdc console, so that the user will be able to
* identify the problem. */
if (!console_drivers)
pdc_console_restart();
if (err)
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, task_pid_nr(current), str, err);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __func__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
show_regs(regs);
dump_stack();
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
oops_exit();
do_exit(SIGSEGV);
}
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
static void handle_gdb_break(struct pt_regs *regs, int wot)
{
struct siginfo si;
si.si_signo = SIGTRAP;
si.si_errno = 0;
si.si_code = wot;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
force_sig_info(SIGTRAP, &si, current);
}
static void handle_break(struct pt_regs *regs)
{
unsigned iir = regs->iir;
if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
/* check if a BUG() or WARN() trapped here. */
enum bug_trap_type tt;
tt = report_bug(regs->iaoq[0] & ~3, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->iaoq[0] += 4;
regs->iaoq[1] += 4;
return; /* return to next instruction when WARN_ON(). */
}
die_if_kernel("Unknown kernel breakpoint", regs,
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
task_pid_nr(current), current->comm);
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
}
static void default_trap(int code, struct pt_regs *regs)
{
printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
show_regs(regs);
}
void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
struct pdc_hpmc_pim_11 *pim_narrow;
struct pdc_hpmc_pim_20 *pim_wide;
if (boot_cpu_data.cpu_type >= pcxu) {
pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
/*
* Note: The following code will probably generate a
* bunch of truncation error warnings from the compiler.
* Could be handled with an ifdef, but perhaps there
* is a better way.
*/
regs->gr[0] = pim_wide->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_wide->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_wide->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_wide->sr[i];
regs->iasq[0] = pim_wide->cr[17];
regs->iasq[1] = pim_wide->iasq_back;
regs->iaoq[0] = pim_wide->cr[18];
regs->iaoq[1] = pim_wide->iaoq_back;
regs->sar = pim_wide->cr[11];
regs->iir = pim_wide->cr[19];
regs->isr = pim_wide->cr[20];
regs->ior = pim_wide->cr[21];
}
else {
pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
regs->gr[0] = pim_narrow->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_narrow->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_narrow->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_narrow->sr[i];
regs->iasq[0] = pim_narrow->cr[17];
regs->iasq[1] = pim_narrow->iasq_back;
regs->iaoq[0] = pim_narrow->cr[18];
regs->iaoq[1] = pim_narrow->iaoq_back;
regs->sar = pim_narrow->cr[11];
regs->iir = pim_narrow->cr[19];
regs->isr = pim_narrow->cr[20];
regs->ior = pim_narrow->cr[21];
}
/*
* The following fields only have meaning if we came through
* another path. So just zero them here.
*/
regs->ksp = 0;
regs->kpc = 0;
regs->orig_r28 = 0;
}
/*
* This routine is called as a last resort when everything else
* has gone clearly wrong. We get called for faults in kernel space,
* and HPMC's.
*/
void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
{
static DEFINE_SPINLOCK(terminate_lock);
oops_in_progress = 1;
set_eiem(0);
local_irq_disable();
spin_lock(&terminate_lock);
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* restart pdc console if necessary */
if (!console_drivers)
pdc_console_restart();
/* Not all paths will gutter the processor... */
switch(code){
case 1:
transfer_pim_to_trap_frame(regs);
break;
default:
/* Fall through */
break;
}
{
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
do_show_stack(&info);
}
printk("\n");
printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
msg, code, regs, offset);
show_regs(regs);
spin_unlock(&terminate_lock);
/* put soft power button back under hardware control;
* if the user had pressed it once at any time, the
* system will shut down immediately right here. */
pdc_soft_power_button(0);
/* Call kernel panic() so reboot timeouts work properly
* FIXME: This function should be on the list of
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
* parisc_terminate, but rather the oter way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/
panic(msg);
}
void notrace handle_interruption(int code, struct pt_regs *regs)
{
unsigned long fault_address = 0;
unsigned long fault_space = 0;
struct siginfo si;
if (code == 1)
pdc_console_restart(); /* switch back to pdc if HPMC */
else
local_irq_enable();
/* Security check:
* If the priority level is still user, and the
* faulting space is not equal to the active space
* then the user is attempting something in a space
* that does not belong to them. Kill the process.
*
* This is normally the situation when the user
* attempts to jump into the kernel space at the
* wrong offset, be it at the gateway page or a
* random location.
*
* We cannot normally signal the process because it
* could *be* on the gateway page, and processes
* executing on the gateway page can't have signals
* delivered.
*
* We merely readjust the address into the users
* space, at a destination address of zero, and
* allow processing to continue.
*/
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
regs->iaoq[0] = 0 | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
return;
}
#if 0
printk(KERN_CRIT "Interruption # %d\n", code);
#endif
switch(code) {
case 1:
/* High-priority machine check (HPMC) */
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
parisc_terminate("High Priority Machine Check (HPMC)",
regs, code, 0);
/* NOT REACHED */
case 2:
/* Power failure interrupt */
printk(KERN_CRIT "Power failure interrupt !\n");
return;
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */
return;
case 5:
/* Low-priority machine check */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
flush_cache_all();
flush_tlb_all();
cpu_lpmc(5, regs);
return;
case 6:
/* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
break;
case 8:
/* Illegal instruction trap */
die_if_kernel("Illegal instruction", regs, code);
si.si_code = ILL_ILLOPC;
goto give_sigill;
case 9:
/* Break instruction trap */
handle_break(regs);
return;
case 10:
/* Privileged operation trap */
die_if_kernel("Privileged operation", regs, code);
si.si_code = ILL_PRVOPC;
goto give_sigill;
case 11:
/* Privileged register trap */
if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
/* This is a MFCTL cr26/cr27 to gr instruction.
* PCXS traps on this, so we need to emulate it.
*/
if (regs->iir & 0x00200000)
regs->gr[regs->iir & 0x1f] = mfctl(27);
else
regs->gr[regs->iir & 0x1f] = mfctl(26);
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
regs->iasq[0] = regs->iasq[1];
return;
}
die_if_kernel("Privileged register usage", regs, code);
si.si_code = ILL_PRVREG;
give_sigill:
si.si_signo = SIGILL;
si.si_errno = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGILL, &si, current);
return;
case 12:
/* Overflow Trap, let the userland signal handler do the cleanup */
si.si_signo = SIGFPE;
si.si_code = FPE_INTOVF;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
case 13:
/* Conditional Trap
The condition succeeds in an instruction which traps
on condition */
if(user_mode(regs)){
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
the insn pointed to by si_addr */
si.si_code = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
}
/* The kernel doesn't want to handle condition codes */
break;
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
__inc_irq_stat(irq_fpassist_count);
handle_fpe(regs);
return;
case 15:
/* Data TLB miss fault/Data page fault */
/* Fall through */
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
/* Fall through */
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
Still need to add slow path emulation code here!
If the insn used a non-shadow register, then the tlb
handlers could not have their side-effect (e.g. probe
writing to a target register) emulated since rfir would
erase the changes to said register. Instead we have to
setup everything, call this function we are in, and emulate
by hand. Technically we need to emulate:
fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
*/
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 18:
/* PCXS only -- later cpu's split this into types 26,27 & 28 */
/* Check for unaligned access */
if (check_unaligned(regs)) {
handle_unaligned(regs);
return;
}
/* Fall Through */
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
/* fall thru */
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
return;
case 25:
/* Taken branch trap */
regs->gr[0] &= ~PSW_T;
if (user_space(regs))
handle_gdb_break(regs, TRAP_BRANCH);
/* else this must be the start of a syscall - just let it
* run.
*/
return;
case 7:
/* Instruction access rights */
/* PCXL: Instruction memory protection trap */
/*
* This could be caused by either: 1) a process attempting
* to execute within a vma that does not have execute
* permission, or 2) an access rights violation caused by a
* flush only translation set up by ptep_get_and_clear().
* So we check the vma permissions to differentiate the two.
* If the vma indicates we have execute permission, then
* the cause is the latter one. In this case, we need to
* call do_page_fault() to fix the problem.
*/
if (user_mode(regs)) {
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
up_read(&current->mm->mmap_sem);
break; /* call do_page_fault() */
}
up_read(&current->mm->mmap_sem);
}
/* Fall Through */
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
fixup_exception(regs))
return;
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
si.si_errno = 0;
if (code == 7)
si.si_addr = (void __user *) regs->iaoq[0];
else
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
case 28:
/* Unaligned data reference trap */
handle_unaligned(regs);
return;
default:
if (user_mode(regs)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"handle_interruption() pid=%d command='%s'\n",
task_pid_nr(current), current->comm);
/* SIGBUS, for lack of a better one. */
si.si_signo = SIGBUS;
si.si_code = BUS_OBJERR;
si.si_errno = 0;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGBUS, &si, current);
return;
}
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Unexpected interruption", regs, code, 0);
/* NOT REACHED */
}
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
code, fault_space,
task_pid_nr(current), current->comm);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *) regs->ior;
force_sig_info(SIGSEGV, &si, current);
return;
}
}
else {
/*
* The kernel should never fault on its own address space,
* unless pagefault_disable() was called before.
*/
if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}
}
do_page_fault(regs, code, fault_address);
}
int __init check_ivt(void *iva)
{
extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
u32 length;
if (strcmp((char *)iva, "cows can fly"))
return -1;
ivap = (u32 *)iva;
for (i = 0; i < 8; i++)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
for (i=0; i<length/4; i++)
check += *hpmcp++;
for (i=0; i<8; i++)
check += ivap[i];
ivap[5] = -check;
return 0;
}
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
#endif
extern const void fault_vector_20;
void __init trap_init(void)
{
void *iva;
if (boot_cpu_data.cpu_type >= pcxu)
iva = (void *) &fault_vector_20;
else
#ifdef CONFIG_64BIT
panic("Can't boot 64-bit OS on PA1.1 processor!");
#else
iva = (void *) &fault_vector_11;
#endif
if (check_ivt(iva))
panic("IVT invalid");
}

View file

@ -0,0 +1,757 @@
/*
* Unaligned memory access handler
*
* Copyright (C) 2001 Randolph Chung <tausq@debian.org>
* Significantly tweaked by LaMont Jones <lamont@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FIXUP_BRANCH(lbl) \
"\tldil L%%" #lbl ", %%r1\n" \
"\tldo R%%" #lbl "(%%r1), %%r1\n" \
"\tbv,n %%r0(%%r1)\n"
/* If you use FIXUP_BRANCH, then you must list this clobber */
#define FIXUP_BRANCH_CLOBBER "r1"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
#define OPCODE2(a,b) ((a)<<26|(b)<<1)
#define OPCODE3(a,b) ((a)<<26|(b)<<2)
#define OPCODE4(a) ((a)<<26)
#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
#define OPCODE2_MASK OPCODE2(0x3f,1)
#define OPCODE3_MASK OPCODE3(0x3f,1)
#define OPCODE4_MASK OPCODE4(0x3f)
/* skip LDB - never unaligned (index) */
#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
/* skip LDB - never unaligned (short) */
#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
/* skip STB - never unaligned */
#define OPCODE_STH OPCODE1(0x03,1,0x9)
#define OPCODE_STW OPCODE1(0x03,1,0xa)
#define OPCODE_STD OPCODE1(0x03,1,0xb)
/* skip STBY - never unaligned */
/* skip STDBY - never unaligned */
#define OPCODE_STWA OPCODE1(0x03,1,0xe)
#define OPCODE_STDA OPCODE1(0x03,1,0xf)
#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
#define OPCODE_LDD_L OPCODE2(0x14,0)
#define OPCODE_FLDD_L OPCODE2(0x14,1)
#define OPCODE_STD_L OPCODE2(0x1c,0)
#define OPCODE_FSTD_L OPCODE2(0x1c,1)
#define OPCODE_LDW_M OPCODE3(0x17,1)
#define OPCODE_FLDW_L OPCODE3(0x17,0)
#define OPCODE_FSTW_L OPCODE3(0x1f,0)
#define OPCODE_STW_M OPCODE3(0x1f,1)
#define OPCODE_LDH_L OPCODE4(0x11)
#define OPCODE_LDW_L OPCODE4(0x12)
#define OPCODE_LDWM OPCODE4(0x13)
#define OPCODE_STH_L OPCODE4(0x19)
#define OPCODE_STW_L OPCODE4(0x1A)
#define OPCODE_STWM OPCODE4(0x1B)
#define MAJOR_OP(i) (((i)>>26)&0x3f)
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
#define IM14(i) IM((i),14)
#define ERR_NOTHANDLED -1
#define ERR_PAGEFAULT -2
int unaligned_enabled __read_mostly = 1;
void die_if_kernel (char *str, struct pt_regs *regs, long err);
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
"1: ldbs 0(%%sr1,%3), %%r20\n"
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %%r20, 23, 24, %0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r20", FIXUP_BRANCH_CLOBBER );
DPRINTF("val = 0x" RFMT "\n", val);
if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %4, %%sr1\n"
" depw %%r0,31,2,%3\n"
"1: ldw 0(%%sr1,%3),%0\n"
"2: ldw 4(%%sr1,%3),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtctl %%r19,11\n"
" vshd %0,%%r20,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
DPRINTF("val = 0x" RFMT "\n", val);
if (flop)
((__u32*)(regs->fr))[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
__u64 val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
regs->isr, regs->ior, toreg);
#ifdef CONFIG_PA20
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
" mtsp %4, %%sr1\n"
" depd %%r0,63,3,%3\n"
"1: ldd 0(%%sr1,%3),%0\n"
"2: ldd 8(%%sr1,%3),%%r20\n"
" subi 64,%%r19,%%r19\n"
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
#else
{
unsigned long valh=0,vall=0;
__asm__ __volatile__ (
" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %6, %%sr1\n"
" dep %%r0,31,2,%5\n"
"1: ldw 0(%%sr1,%5),%0\n"
"2: ldw 4(%%sr1,%5),%1\n"
"3: ldw 8(%%sr1,%5),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtsar %%r19\n"
" vshd %0,%1,%0\n"
" vshd %1,%%r20,%1\n"
" copy %%r0, %2\n"
"4: \n"
" .section .fixup,\"ax\"\n"
"5: ldi -2, %2\n"
FIXUP_BRANCH(4b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,5b)
ASM_EXCEPTIONTABLE_ENTRY(2b,5b)
ASM_EXCEPTIONTABLE_ENTRY(3b,5b)
: "=r" (valh), "=r" (vall), "=r" (ret)
: "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
val=((__u64)valh<<32)|(__u64)vall;
}
#endif
DPRINTF("val = 0x%llx\n", val);
if (flop)
regs->fr[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_sth(struct pt_regs *regs, int frreg)
{
unsigned long val = regs->gr[frreg];
int ret;
if (!frreg)
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" extrw,u %1, 23, 8, %%r19\n"
"1: stb %1, 1(%%sr1, %2)\n"
"2: stb %%r19, 0(%%sr1, %2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", FIXUP_BRANCH_CLOBBER );
return ret;
}
static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
{
unsigned long val;
int ret;
if (flop)
val = ((__u32*)(regs->fr))[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" zdep %2, 28, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" depwi,z -2, %%sar, 32, %%r19\n"
"1: ldw 0(%%sr1,%2),%%r20\n"
"2: ldw 4(%%sr1,%2),%%r21\n"
" vshd %%r0, %1, %%r22\n"
" vshd %1, %%r0, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
return 0;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
__u64 val;
int ret;
if (flop)
val = regs->fr[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
val, regs->isr, regs->ior);
#ifdef CONFIG_PA20
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" depd,z %2, 60, 3, %%r19\n"
" depd %%r0, 63, 3, %2\n"
" mtsar %%r19\n"
" depdi,z -2, %%sar, 64, %%r19\n"
"1: ldd 0(%%sr1,%2),%%r20\n"
"2: ldd 8(%%sr1,%2),%%r21\n"
" shrpd %%r0, %1, %%sar, %%r22\n"
" shrpd %1, %%r0, %%sar, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
" copy %%r0, %0\n"
"5: \n"
" .section .fixup,\"ax\"\n"
"6: ldi -2, %0\n"
FIXUP_BRANCH(5b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,6b)
ASM_EXCEPTIONTABLE_ENTRY(2b,6b)
ASM_EXCEPTIONTABLE_ENTRY(3b,6b)
ASM_EXCEPTIONTABLE_ENTRY(4b,6b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
#else
{
unsigned long valh=(val>>32),vall=(val&0xffffffffl);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
"2: ldw 8(%%sr1,%3),%%r21\n"
" vshd %1, %2, %%r1\n"
" vshd %%r0, %1, %1\n"
" vshd %2, %%r0, %2\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
"3: stw %1,0(%%sr1,%1)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
"6: \n"
" .section .fixup,\"ax\"\n"
"7: ldi -2, %0\n"
FIXUP_BRANCH(6b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,7b)
ASM_EXCEPTIONTABLE_ENTRY(2b,7b)
ASM_EXCEPTIONTABLE_ENTRY(3b,7b)
ASM_EXCEPTIONTABLE_ENTRY(4b,7b)
ASM_EXCEPTIONTABLE_ENTRY(5b,7b)
: "=r" (ret)
: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER );
}
#endif
return ret;
}
void handle_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
int modify = 0;
int ret = ERR_NOTHANDLED;
struct siginfo si;
register int flop=0; /* true if this is a flop */
__inc_irq_stat(irq_unaligned_count);
/* log a message with pacing */
if (user_mode(regs)) {
if (current->thread.flags & PARISC_UAC_SIGBUS) {
goto force_sigbus;
}
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
__ratelimit(&ratelimit)) {
char buf[256];
sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
printk(KERN_WARNING "%s", buf);
#ifdef DEBUG_UNALIGNED
show_regs(regs);
#endif
}
if (!unaligned_enabled)
goto force_sigbus;
}
/* handle modification - OK, it's ugly, see the instruction manual */
switch (MAJOR_OP(regs->iir))
{
case 0x03:
case 0x09:
case 0x0b:
if (regs->iir&0x20)
{
modify = 1;
if (regs->iir&0x1000) /* short loads */
if (regs->iir&0x200)
newbase += IM5_3(regs->iir);
else
newbase += IM5_2(regs->iir);
else if (regs->iir&0x2000) /* scaled indexed */
{
int shift=0;
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
shift= 1; break;
case OPCODE_LDW_I:
shift= 2; break;
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
shift= 3; break;
}
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
} else /* simple indexed */
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
}
break;
case 0x13:
case 0x1b:
modify = 1;
newbase += IM14(regs->iir);
break;
case 0x14:
case 0x1c:
if (regs->iir&8)
{
modify = 1;
newbase += IM14(regs->iir&~0xe);
}
break;
case 0x16:
case 0x1e:
modify = 1;
newbase += IM14(regs->iir&6);
break;
case 0x17:
case 0x1f:
if (regs->iir&4)
{
modify = 1;
newbase += IM14(regs->iir&~4);
}
break;
}
/* TODO: make this cleaner... */
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
case OPCODE_LDH_S:
ret = emulate_ldh(regs, R3(regs->iir));
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
ret = emulate_ldw(regs, R3(regs->iir),0);
break;
case OPCODE_STH:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW:
case OPCODE_STWA:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
case OPCODE_LDD_S:
case OPCODE_LDDA_S:
ret = emulate_ldd(regs, R3(regs->iir),0);
break;
case OPCODE_STD:
case OPCODE_STDA:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
case OPCODE_FLDWX:
case OPCODE_FLDWS:
case OPCODE_FLDWXR:
case OPCODE_FLDWSR:
flop=1;
ret = emulate_ldw(regs,FR3(regs->iir),1);
break;
case OPCODE_FLDDX:
case OPCODE_FLDDS:
flop=1;
ret = emulate_ldd(regs,R3(regs->iir),1);
break;
case OPCODE_FSTWX:
case OPCODE_FSTWS:
case OPCODE_FSTWXR:
case OPCODE_FSTWSR:
flop=1;
ret = emulate_stw(regs,FR3(regs->iir),1);
break;
case OPCODE_FSTDX:
case OPCODE_FSTDS:
flop=1;
ret = emulate_std(regs,R3(regs->iir),1);
break;
case OPCODE_LDCD_I:
case OPCODE_LDCW_I:
case OPCODE_LDCD_S:
case OPCODE_LDCW_S:
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
flop=1;
ret = emulate_ldd(regs,R2(regs->iir),1);
break;
case OPCODE_FSTD_L:
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
}
#endif
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_LDW_M:
ret = emulate_ldw(regs, R2(regs->iir),1);
break;
case OPCODE_FSTW_L:
flop=1;
ret = emulate_stw(regs, R2(regs->iir),1);
break;
case OPCODE_STW_M:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
switch (regs->iir & OPCODE4_MASK)
{
case OPCODE_LDH_L:
ret = emulate_ldh(regs, R2(regs->iir));
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_STH_L:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW_L:
case OPCODE_STWM:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
if (modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
if (ret == ERR_NOTHANDLED)
printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
DPRINTF("ret = %d\n", ret);
if (ret)
{
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
if (ret == ERR_PAGEFAULT)
{
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGSEGV, &si, current);
}
else
{
force_sigbus:
/* couldn't handle it ... */
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGBUS, &si, current);
}
return;
}
/* else we handled it, let life go on. */
regs->gr[0]|=PSW_N;
}
/*
* NB: check_unaligned() is only used for PCXS processors right
* now, so we only check for PA1.1 encodings at this point.
*/
int
check_unaligned(struct pt_regs *regs)
{
unsigned long align_mask;
/* Get alignment mask */
align_mask = 0UL;
switch (regs->iir & OPCODE1_MASK) {
case OPCODE_LDH_I:
case OPCODE_LDH_S:
case OPCODE_STH:
align_mask = 1UL;
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
case OPCODE_STW:
case OPCODE_STWA:
align_mask = 3UL;
break;
default:
switch (regs->iir & OPCODE4_MASK) {
case OPCODE_LDH_L:
case OPCODE_STH_L:
align_mask = 1UL;
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
case OPCODE_STW_L:
case OPCODE_STWM:
align_mask = 3UL;
break;
}
break;
}
return (int)(regs->ior & align_mask);
}

441
arch/parisc/kernel/unwind.c Normal file
View file

@ -0,0 +1,441 @@
/*
* Kernel unwinding support
*
* (c) 2002-2004 Randolph Chung <tausq@debian.org>
*
* Derived partially from the IA64 implementation. The PA-RISC
* Runtime Architecture Document is also a useful reference to
* understand what is happening here
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) printk(x)
#else
#define dbg(x...)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
static spinlock_t unwind_lock;
/*
* the kernel unwind block is not dynamically allocated so that
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
static struct unwind_table kernel_unwind_table __read_mostly;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
{
const struct unwind_table_entry *e = NULL;
unsigned long lo, hi, mid;
lo = 0;
hi = table->length - 1;
while (lo <= hi) {
mid = (hi - lo) / 2 + lo;
e = &table->table[mid];
if (addr < e->region_start)
hi = mid - 1;
else if (addr > e->region_end)
lo = mid + 1;
else
return e;
}
return NULL;
}
static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)
{
struct unwind_table *table;
const struct unwind_table_entry *e = NULL;
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
else
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
if (e) {
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
}
}
return e;
}
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
void *table_start, void *table_end)
{
struct unwind_table_entry *start = table_start;
struct unwind_table_entry *end =
(struct unwind_table_entry *)table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->gp = gp;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
table->length = end - start + 1;
INIT_LIST_HEAD(&table->list);
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
}
start->region_start += base_addr;
start->region_end += base_addr;
}
}
static int cmp_unwind_table_entry(const void *a, const void *b)
{
return ((const struct unwind_table_entry *)a)->region_start
- ((const struct unwind_table_entry *)b)->region_start;
}
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
sort(start, finish - start, sizeof(struct unwind_table_entry),
cmp_unwind_table_entry, NULL);
}
struct unwind_table *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
void *start, void *end)
{
struct unwind_table *table;
unsigned long flags;
struct unwind_table_entry *s = (struct unwind_table_entry *)start;
struct unwind_table_entry *e = (struct unwind_table_entry *)end;
unwind_table_sort(s, e);
table = kmalloc(sizeof(struct unwind_table), GFP_USER);
if (table == NULL)
return NULL;
unwind_table_init(table, name, base_addr, gp, start, end);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&table->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
return table;
}
void unwind_table_remove(struct unwind_table *table)
{
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_del(&table->list);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(table);
}
/* Called from setup_arch to import the kernel unwind info */
int __init unwind_init(void)
{
long start, stop;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
spin_lock_init(&unwind_lock);
printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
gp,
&__start___unwind[0], &__stop___unwind[0]);
#if 0
{
int i;
for (i = 0; i < 10; i++)
{
printk("region 0x%x-0x%x\n",
__start___unwind[i].region_start,
__start___unwind[i].region_end);
}
}
#endif
return 0;
}
#ifdef CONFIG_64BIT
#define get_func_addr(fptr) fptr[2]
#else
#define get_func_addr(fptr) fptr[0]
#endif
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
extern void handle_interruption(int, struct pt_regs *);
static unsigned long *hi = (unsigned long *)&handle_interruption;
if (pc == get_func_addr(hi)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
return 1;
}
return 0;
}
static void unwind_frame_regs(struct unwind_frame_info *info)
{
const struct unwind_table_entry *e;
unsigned long npc;
unsigned int insn;
long frame_size = 0;
int looking_for_rp, rpoffset = 0;
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
symname);
dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
if (strcmp(symname, "_switch_to_ret") == 0) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
dbg("_switch_to_ret @ %lx - setting "
"prev_sp=%lx prev_ip=%lx\n",
info->ip, info->prev_sp,
info->prev_ip);
return;
} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
strcmp(symname, "syscall_exit") == 0) {
info->prev_ip = info->prev_sp = 0;
return;
}
}
#endif
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
correctly. The rp is checked to see if it belongs to the
kernel text section, if not we assume we don't have a
correct stack frame and we continue to unwind the stack.
This is not quite correct, and will fail for loadable
modules. */
sp = info->sp & ~63;
do {
unsigned long tmp;
info->prev_sp = sp - 64;
info->prev_ip = 0;
if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
break;
info->prev_ip = tmp;
sp = info->prev_sp;
} while (!kernel_text_address(info->prev_ip));
info->rp = 0;
dbg("analyzing func @ %lx with no unwind info, setting "
"prev_sp=%lx prev_ip=%lx\n", info->ip,
info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
"Save_RP = %d, Millicode = %d size = %u\n",
e->region_start, e->region_end, e->Save_SP, e->Save_RP,
e->Millicode, e->Total_frame_size);
looking_for_rp = e->Save_RP;
for (npc = e->region_start;
(frame_size < (e->Total_frame_size << 3) ||
looking_for_rp) &&
npc < info->ip;
npc += 4) {
insn = *(unsigned int *)npc;
if ((insn & 0xffffc000) == 0x37de0000 ||
(insn & 0xffe00000) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
((insn & 0x3fff) >> 1);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if ((insn & 0xffe00008) == 0x73c00008) {
/* std,ma X,D(sp) */
frame_size += (insn & 0x1 ? -1 << 13 : 0) |
(((insn >> 4) & 0x3ff) << 3);
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=stw rp,"
"-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=std rp,"
"-16(sp) @ %lx\n", info->ip, npc);
}
}
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
else if (rpoffset)
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
info->prev_ip = info->rp;
info->rp = 0;
}
dbg("analyzing func @ %lx, setting prev_sp=%lx "
"prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
info->prev_ip, npc);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
info->sp = regs->gr[30];
info->ip = regs->iaoq[0];
info->rp = regs->gr[2];
info->r31 = regs->gr[31];
dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
t ? (int)t->pid : -1, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
r2->gr[30] = r->ksp;
r2->iaoq[0] = r->kpc;
unwind_frame_init(info, t, r2);
kfree(r2);
}
void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
{
unwind_frame_init(info, current, regs);
}
int unwind_once(struct unwind_frame_info *next_frame)
{
unwind_frame_regs(next_frame);
if (next_frame->prev_sp == 0 ||
next_frame->prev_ip == 0)
return -1;
next_frame->sp = next_frame->prev_sp;
next_frame->ip = next_frame->prev_ip;
next_frame->prev_sp = 0;
next_frame->prev_ip = 0;
dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
next_frame->t ? (int)next_frame->t->pid : -1,
next_frame->sp, next_frame->ip);
return 0;
}
int unwind_to_user(struct unwind_frame_info *info)
{
int ret;
do {
ret = unwind_once(info);
} while (!ret && !(info->ip & 3));
return ret;
}
unsigned long return_address(unsigned int level)
{
struct unwind_frame_info info;
struct pt_regs r;
unsigned long sp;
/* initialize unwind info */
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long) current_text_addr();
r.gr[2] = (unsigned long) __builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
/* unwind stack */
++level;
do {
if (unwind_once(&info) < 0 || info.ip == 0)
return 0;
if (!kernel_text_address(info.ip))
return 0;
} while (info.ip && level--);
return info.ip;
}

View file

@ -0,0 +1,160 @@
/* Kernel link layout for various "sections"
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
* Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
*/
/*
* Put page table entries (swapper_pg_dir) as the first thing in .bss. This
* will ensure that it has .bss alignment (PAGE_SIZE).
*/
#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
*(.data..vm0.pgd) \
*(.data..vm0.pte)
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
#else
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(8)
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(PAGE_SIZE)
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
EXIT_TEXT
}
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_text = .; /* Text and read-only data */
_stext = .;
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
}
. = ALIGN(PAGE_SIZE);
_etext = .;
/* End of text section */
/* Start of data section */
_sdata = .;
RO_DATA_SECTION(8)
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
.opd : {
*(.opd)
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
/* writeable */
/* Make sure this is page aligned so
* that we can properly leave these
* as writable
*/
. = ALIGN(PAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(8)
NOTES
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data..lock_aligned : {
*(.data..lock_aligned)
}
/* End of data section */
_edata = .;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
_end = . ;
STABS_DEBUG
.note 0 : { *(.note) }
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
*/
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
#endif
}
}