Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

111
arch/alpha/kernel/Makefile Normal file
View file

@ -0,0 +1,111 @@
#
# Makefile for the linux kernel.
#
extra-y := head.o vmlinux.lds
asflags-y := $(KBUILD_CFLAGS)
ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
alpha_ksyms.o systbls.o err_common.o io.o
obj-$(CONFIG_VGA_HOSE) += console.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
obj-$(CONFIG_SRM_ENV) += srm_env.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_RTC_DRV_ALPHA) += rtc.o
obj-$(CONFIG_AUDIT) += audit.o
ifdef CONFIG_ALPHA_GENERIC
obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \
core_mcpcia.o core_polaris.o core_t2.o \
core_tsunami.o
obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \
sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
sys_sable.o sys_sio.o sys_sx164.o sys_takara.o
ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS
obj-y += core_marvel.o core_titan.o core_wildfire.o
obj-y += sys_marvel.o sys_titan.o sys_wildfire.o
obj-y += err_ev7.o err_titan.o err_marvel.o
endif
obj-y += irq_pyxis.o irq_i8259.o irq_srm.o
obj-y += err_ev6.o
obj-y += es1888.o smc37c669.o smc37c93x.o pc873xx.o gct.o
obj-y += srmcons.o
else
# Misc support
obj-$(CONFIG_ALPHA_SRM) += srmcons.o
ifdef CONFIG_BINFMT_AOUT
obj-y += binfmt_loader.o
endif
# Core logic support
obj-$(CONFIG_ALPHA_APECS) += core_apecs.o
obj-$(CONFIG_ALPHA_CIA) += core_cia.o
obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o
obj-$(CONFIG_ALPHA_LCA) += core_lca.o
obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o
obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o
obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o
obj-$(CONFIG_ALPHA_T2) += core_t2.o
obj-$(CONFIG_ALPHA_TSUNAMI) += core_tsunami.o
obj-$(CONFIG_ALPHA_TITAN) += core_titan.o
obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o
# Board support
obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o
obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \
pc873xx.o
obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
pc873xx.o
obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \
pc873xx.o
obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
smc37c93x.o
obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
smc37c93x.o
obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o
obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o
obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o
obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o
obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o
obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o
obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \
es1888.o smc37c669.o
obj-$(CONFIG_ALPHA_MIKASA) += sys_mikasa.o irq_i8259.o irq_srm.o
obj-$(CONFIG_ALPHA_NAUTILUS) += sys_nautilus.o irq_i8259.o irq_srm.o
obj-$(CONFIG_ALPHA_NORITAKE) += sys_noritake.o irq_i8259.o
obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o
obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \
irq_srm.o smc37c669.o
obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o pc873xx.o
obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o
# Error support
obj-$(CONFIG_ALPHA_MARVEL) += err_ev7.o err_marvel.o
obj-$(CONFIG_ALPHA_NAUTILUS) += err_ev6.o
obj-$(CONFIG_ALPHA_TITAN) += err_ev6.o err_titan.o
endif # GENERIC

View file

@ -0,0 +1,102 @@
/*
* linux/arch/alpha/kernel/alpha_ksyms.c
*
* Export the alpha-specific functions that are needed for loadable
* modules.
*/
#include <linux/module.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/fpu.h>
#include <asm/machvec.h>
#include <linux/syscalls.h>
/* these are C runtime functions with special calling conventions: */
extern void __divl (void);
extern void __reml (void);
extern void __divq (void);
extern void __remq (void);
extern void __divlu (void);
extern void __remlu (void);
extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
EXPORT_SYMBOL(callback_getenv);
EXPORT_SYMBOL(callback_setenv);
EXPORT_SYMBOL(callback_save_env);
/* platform dependent support */
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(___memset);
EXPORT_SYMBOL(__memsetw);
EXPORT_SYMBOL(__constant_c_memset);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(alpha_read_fp_reg);
EXPORT_SYMBOL(alpha_read_fp_reg_s);
EXPORT_SYMBOL(alpha_write_fp_reg);
EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_ipv6_magic);
#ifdef CONFIG_MATHEMU_MODULE
extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
extern long (*alpha_fp_emul) (unsigned long pc);
EXPORT_SYMBOL(alpha_fp_emul_imprecise);
EXPORT_SYMBOL(alpha_fp_emul);
#endif
/*
* The following are specially called from the uaccess assembly stubs.
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
/*
* SMP-specific symbols.
*/
#ifdef CONFIG_SMP
EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif /* CONFIG_SMP */
/*
* The following are special because they're not called
* explicitly (the C compiler or assembler generates them in
* response to division operations). Fortunately, their
* interface isn't gonna change any time soon now, so it's OK
* to leave it out of version control.
*/
# undef memcpy
# undef memset
EXPORT_SYMBOL(__divl);
EXPORT_SYMBOL(__divlu);
EXPORT_SYMBOL(__divq);
EXPORT_SYMBOL(__divqu);
EXPORT_SYMBOL(__reml);
EXPORT_SYMBOL(__remlu);
EXPORT_SYMBOL(__remq);
EXPORT_SYMBOL(__remqu);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);

View file

@ -0,0 +1,43 @@
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/kbuild.h>
#include <asm/io.h>
void foo(void)
{
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
BLANK();
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_CRED, offsetof(struct task_struct, cred));
DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
BLANK();
DEFINE(CRED_UID, offsetof(struct cred, uid));
DEFINE(CRED_EUID, offsetof(struct cred, euid));
DEFINE(CRED_GID, offsetof(struct cred, gid));
DEFINE(CRED_EGID, offsetof(struct cred, egid));
BLANK();
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
DEFINE(PT_PTRACED, PT_PTRACED);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(SIGCHLD, SIGCHLD);
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
}

60
arch/alpha/kernel/audit.c Normal file
View file

@ -0,0 +1,60 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);

View file

@ -0,0 +1,52 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm_types.h>
#include <linux/binfmts.h>
#include <linux/a.out.h>
static int load_binary(struct linux_binprm *bprm)
{
struct exec *eh = (struct exec *)bprm->buf;
unsigned long loader;
struct file *file;
int retval;
if (eh->fh.f_magic != 0x183 || (eh->fh.f_flags & 0x3000) != 0x3000)
return -ENOEXEC;
if (bprm->loader)
return -ENOEXEC;
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
loader = bprm->vma->vm_end - sizeof(void *);
file = open_exec("/sbin/loader");
retval = PTR_ERR(file);
if (IS_ERR(file))
return retval;
/* Remember if the application is TASO. */
bprm->taso = eh->ah.entry < 0x100000000UL;
bprm->file = file;
bprm->loader = loader;
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
return search_binary_handler(bprm);
}
static struct linux_binfmt loader_format = {
.load_binary = load_binary,
};
static int __init init_loader_binfmt(void)
{
insert_binfmt(&loader_format);
return 0;
}
arch_initcall(init_loader_binfmt);

View file

@ -0,0 +1,93 @@
/*
* linux/arch/alpha/kernel/console.c
*
* Architecture-specific specific support for VGA device on
* non-0 I/O hose
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/vt.h>
#include <asm/vga.h>
#include <asm/machvec.h>
#include "pci_impl.h"
#ifdef CONFIG_VGA_HOSE
struct pci_controller *pci_vga_hose;
static struct resource alpha_vga = {
.name = "alpha-vga+",
.start = 0x3C0,
.end = 0x3DF
};
static struct pci_controller * __init
default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2)
{
if (h2->index < h1->index)
return h2;
return h1;
}
void __init
locate_and_init_vga(void *(*sel_func)(void *, void *))
{
struct pci_controller *hose = NULL;
struct pci_dev *dev = NULL;
/* Default the select function */
if (!sel_func) sel_func = (void *)default_vga_hose_select;
/* Find the console VGA device */
for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) {
if (!hose)
hose = dev->sysdata;
else
hose = sel_func(hose, dev->sysdata);
}
/* Did we already initialize the correct one? Is there one? */
if (!hose || (conswitchp == &vga_con && pci_vga_hose == hose))
return;
/* Create a new VGA ioport resource WRT the hose it is on. */
alpha_vga.start += hose->io_space->start;
alpha_vga.end += hose->io_space->start;
request_resource(hose->io_space, &alpha_vga);
/* Set the VGA hose and init the new console. */
pci_vga_hose = hose;
console_lock();
do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
console_unlock();
}
void __init
find_console_vga_hose(void)
{
u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
if (pu64[7] == 3) { /* TERM_TYPE == graphics */
struct pci_controller *hose;
int h = (pu64[30] >> 24) & 0xff; /* console hose # */
/*
* Our hose numbering DOES match the console's, so find
* the right one...
*/
for (hose = hose_head; hose; hose = hose->next) {
if (hose->index == h) break;
}
if (hose) {
printk("Console graphics on hose %d\n", h);
pci_vga_hose = hose;
}
}
}
#endif

View file

@ -0,0 +1,418 @@
/*
* linux/arch/alpha/kernel/core_apecs.c
*
* Rewritten for Apecs from the lca.c from:
*
* Written by David Mosberger (davidm@cs.arizona.edu) with some code
* taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
* bios code.
*
* Code common to all APECS core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_apecs.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include <asm/mce.h>
#include "proto.h"
#include "pci_impl.h"
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
* handle the system transaction. Another involves timing. Ho hum.
*/
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(args) printk args
#else
# define DBGC(args)
#endif
#define vuip volatile unsigned int *
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the APECS_HAXR2 register
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Type 0:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:11 Device select bit.
* 10:8 Function number
* 7:2 Register number
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
unsigned long addr;
u8 bus = pbus->number;
DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
" pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
if (bus == 0) {
int device = device_fn >> 3;
/* type 0 configuration cycle: */
if (device > 20) {
DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
device));
return -1;
}
*type1 = 0;
addr = (device_fn << 8) | (where);
} else {
/* type 1 configuration cycle: */
*type1 = 1;
addr = (bus << 16) | (device_fn << 8) | (where);
}
*pci_addr = addr;
DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static unsigned int
conf_read(unsigned long addr, unsigned char type1)
{
unsigned long flags;
unsigned int stat0, value;
unsigned int haxr2 = 0;
local_irq_save(flags); /* avoid getting hit by machine check */
DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
/* Reset status register to avoid losing errors. */
stat0 = *(vuip)APECS_IOC_DCSR;
*(vuip)APECS_IOC_DCSR = stat0;
mb();
DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0));
/* If Type1 access, must set HAE #2. */
if (type1) {
haxr2 = *(vuip)APECS_IOC_HAXR2;
mb();
*(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
DBGC(("conf_read: TYPE1 access\n"));
}
draina();
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
/* Access configuration space. */
/* Some SRMs step on these registers during a machine check. */
asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr)
: "$9", "$10", "$11", "$12", "$13", "$14", "memory");
if (mcheck_taken(0)) {
mcheck_taken(0) = 0;
value = 0xffffffffU;
mb();
}
mcheck_expected(0) = 0;
mb();
#if 1
/*
* david.rusling@reo.mts.dec.com. This code is needed for the
* EB64+ as it does not generate a machine check (why I don't
* know). When we build kernels for one particular platform
* then we can make this conditional on the type.
*/
draina();
/* Now look for any errors. */
stat0 = *(vuip)APECS_IOC_DCSR;
DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0));
/* Is any error bit set? */
if (stat0 & 0xffe0U) {
/* If not NDEV, print status. */
if (!(stat0 & 0x0800)) {
printk("apecs.c:conf_read: got stat0=%x\n", stat0);
}
/* Reset error status. */
*(vuip)APECS_IOC_DCSR = stat0;
mb();
wrmces(0x7); /* reset machine check */
value = 0xffffffff;
}
#endif
/* If Type1 access, must reset HAE #2 so normal IO space ops work. */
if (type1) {
*(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
mb();
}
local_irq_restore(flags);
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1)
{
unsigned long flags;
unsigned int stat0;
unsigned int haxr2 = 0;
local_irq_save(flags); /* avoid getting hit by machine check */
/* Reset status register to avoid losing errors. */
stat0 = *(vuip)APECS_IOC_DCSR;
*(vuip)APECS_IOC_DCSR = stat0;
mb();
/* If Type1 access, must set HAE #2. */
if (type1) {
haxr2 = *(vuip)APECS_IOC_HAXR2;
mb();
*(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
}
draina();
mcheck_expected(0) = 1;
mb();
/* Access configuration space. */
*(vuip)addr = value;
mb();
mb(); /* magic */
mcheck_expected(0) = 0;
mb();
#if 1
/*
* david.rusling@reo.mts.dec.com. This code is needed for the
* EB64+ as it does not generate a machine check (why I don't
* know). When we build kernels for one particular platform
* then we can make this conditional on the type.
*/
draina();
/* Now look for any errors. */
stat0 = *(vuip)APECS_IOC_DCSR;
/* Is any error bit set? */
if (stat0 & 0xffe0U) {
/* If not NDEV, print status. */
if (!(stat0 & 0x0800)) {
printk("apecs.c:conf_write: got stat0=%x\n", stat0);
}
/* Reset error status. */
*(vuip)APECS_IOC_DCSR = stat0;
mb();
wrmces(0x7); /* reset machine check */
}
#endif
/* If Type1 access, must reset HAE #2 so normal IO space ops work. */
if (type1) {
*(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
mb();
}
local_irq_restore(flags);
}
static int
apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr, pci_addr;
unsigned char type1;
long mask;
int shift;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
shift = (where & 3) * 8;
addr = (pci_addr << 5) + mask + APECS_CONF;
*value = conf_read(addr, type1) >> (shift);
return PCIBIOS_SUCCESSFUL;
}
static int
apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr, pci_addr;
unsigned char type1;
long mask;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
addr = (pci_addr << 5) + mask + APECS_CONF;
conf_write(addr, value << ((where & 3) * 8), type1);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops apecs_pci_ops =
{
.read = apecs_read_config,
.write = apecs_write_config,
};
void
apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
wmb();
*(vip)APECS_IOC_TBIA = 0;
mb();
}
void __init
apecs_init_arch(void)
{
struct pci_controller *hose;
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR;
hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR;
hose->sparse_io_base = APECS_IO - IDENT_ADDR;
hose->dense_io_base = 0;
/*
* Set up the PCI to main memory translation windows.
*
* Window 1 is direct access 1GB at 1GB
* Window 2 is scatter-gather 8MB at 8MB (for isa)
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
*(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
*(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
*(vuip)APECS_IOC_TB1R = 0;
*(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
*(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
*(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
apecs_pci_tbi(hose, 0, -1);
/*
* Finally, clear the HAXR2 register, which gets used
* for PCI Config Space accesses. That is the way
* we want to use it, and we do not want to depend on
* what ARC or SRM might have left behind...
*/
*(vuip)APECS_IOC_HAXR2 = 0;
mb();
}
void
apecs_pci_clr_err(void)
{
unsigned int jd;
jd = *(vuip)APECS_IOC_DCSR;
if (jd & 0xffe0L) {
*(vuip)APECS_IOC_SEAR;
*(vuip)APECS_IOC_DCSR = jd | 0xffe1L;
mb();
*(vuip)APECS_IOC_DCSR;
}
*(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA;
mb();
*(vuip)APECS_IOC_TBIA;
}
void
apecs_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_common *mchk_header;
struct el_apecs_procdata *mchk_procdata;
struct el_apecs_sysdata_mcheck *mchk_sysdata;
mchk_header = (struct el_common *)la_ptr;
mchk_procdata = (struct el_apecs_procdata *)
(la_ptr + mchk_header->proc_offset
- sizeof(mchk_procdata->paltemp));
mchk_sysdata = (struct el_apecs_sysdata_mcheck *)
(la_ptr + mchk_header->sys_offset);
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
apecs_pci_clr_err();
wrmces(0x7); /* reset machine check pending flag */
mb();
process_mcheck_info(vector, la_ptr, "APECS",
(mcheck_expected(0)
&& (mchk_sysdata->epic_dcsr & 0x0c00UL)));
}

1212
arch/alpha/kernel/core_cia.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,420 @@
/*
* linux/arch/alpha/kernel/core_irongate.c
*
* Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
*
* Copyright (C) 1999 Alpha Processor, Inc.,
* (David Daniel, Stig Telfer, Soohoon Lee)
*
* Code common to all IRONGATE core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_irongate.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/pci.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "pci_impl.h"
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
igcsr32 *IronECC;
/*
* Given a bus, device, and function number, compute resulting
* configuration space address accordingly. It is therefore not safe
* to have concurrent invocations to configuration space access
* routines, but there really shouldn't be any need for this.
*
* addr[31:24] reserved
* addr[23:16] bus number (8 bits = 128 possible buses)
* addr[15:11] Device number (5 bits)
* addr[10: 8] function number
* addr[ 7: 2] register number
*
* For IRONGATE:
* if (bus = addr[23:16]) == 0
* then
* type 0 config cycle:
* addr_on_pci[31:11] = id selection for device = addr[15:11]
* addr_on_pci[10: 2] = addr[10: 2] ???
* addr_on_pci[ 1: 0] = 00
* else
* type 1 config cycle (pass on with no decoding):
* addr_on_pci[31:24] = 0
* addr_on_pci[23: 2] = addr[23: 2]
* addr_on_pci[ 1: 0] = 01
* fi
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
unsigned long addr;
u8 bus = pbus->number;
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
"pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
*type1 = (bus != 0);
addr = (bus << 16) | (device_fn << 8) | where;
addr |= IRONGATE_CONF;
*pci_addr = addr;
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static int
irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*value = __kernel_ldbu(*(vucp)addr);
break;
case 2:
*value = __kernel_ldwu(*(vusp)addr);
break;
case 4:
*value = *(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
__kernel_stb(value, *(vucp)addr);
mb();
__kernel_ldbu(*(vucp)addr);
break;
case 2:
__kernel_stw(value, *(vusp)addr);
mb();
__kernel_ldwu(*(vusp)addr);
break;
case 4:
*(vuip)addr = value;
mb();
*(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops irongate_pci_ops =
{
.read = irongate_read_config,
.write = irongate_write_config,
};
int
irongate_pci_clr_err(void)
{
unsigned int nmi_ctl=0;
unsigned int IRONGATE_jd;
again:
IRONGATE_jd = IRONGATE0->stat_cmd;
printk("Iron stat_cmd %x\n", IRONGATE_jd);
IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
mb();
IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */
IRONGATE_jd = *IronECC;
printk("Iron ECC %x\n", IRONGATE_jd);
*IronECC = IRONGATE_jd; /* write again clears error bits */
mb();
IRONGATE_jd = *IronECC; /* re-read to force write */
/* Clear ALI NMI */
nmi_ctl = inb(0x61);
nmi_ctl |= 0x0c;
outb(nmi_ctl, 0x61);
nmi_ctl &= ~0x0c;
outb(nmi_ctl, 0x61);
IRONGATE_jd = *IronECC;
if (IRONGATE_jd & 0x300) goto again;
return 0;
}
#define IRONGATE_3GB 0xc0000000UL
/* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
memory for PCI. At this point we just reserve memory above 3Gb. Most
of this memory will be freed after PCI setup is done. */
static void __init
albacore_init_arch(void)
{
unsigned long memtop = max_low_pfn << PAGE_SHIFT;
unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
struct percpu_struct *cpu;
int pal_rev, pal_var;
cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
pal_rev = cpu->pal_revision & 0xffff;
pal_var = (cpu->pal_revision >> 16) & 0xff;
/* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
the CPU incorrectly (leave speculative stores enabled),
which causes memory corruption under certain conditions.
Issue a warning for such consoles. */
if (alpha_using_srm &&
(pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2)))
printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
"or later\n");
if (pci_mem > IRONGATE_3GB)
pci_mem = IRONGATE_3GB;
IRONGATE0->pci_mem = pci_mem;
alpha_mv.min_mem_address = pci_mem;
if (memtop > pci_mem) {
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start, initrd_end;
extern void *move_initrd(unsigned long);
/* Move the initrd out of the way. */
if (initrd_end && __pa(initrd_end) > pci_mem) {
unsigned long size;
size = initrd_end - initrd_start;
free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
PAGE_ALIGN(size));
if (!move_initrd(pci_mem))
printk("irongate_init_arch: initrd too big "
"(%ldK)\ndisabling initrd\n",
size / 1024);
}
#endif
reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop -
pci_mem, BOOTMEM_DEFAULT);
printk("irongate_init_arch: temporarily reserving "
"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
}
}
static void __init
irongate_setup_agp(void)
{
/* Disable the GART window. AGPGART doesn't work due to yet
unresolved memory coherency issues... */
IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
alpha_agpgart_size = 0;
}
void __init
irongate_init_arch(void)
{
struct pci_controller *hose;
int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
irongate_pci_clr_err();
if (amd761)
albacore_init_arch();
irongate_setup_agp();
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
/* This is for userland consumption. For some reason, the 40-bit
PIO bias that we use in the kernel through KSEG didn't work for
the page table based user mappings. So make sure we get the
43-bit PIO bias. */
hose->sparse_mem_base = 0;
hose->sparse_io_base = 0;
hose->dense_mem_base
= (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
hose->dense_io_base
= (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
hose->sg_isa = hose->sg_pci = NULL;
__direct_map_base = 0;
__direct_map_size = 0xffffffff;
}
/*
* IO map and AGP support
*/
#include <linux/vmalloc.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
#include <linux/export.h>
#include <asm/pgalloc.h>
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
void __iomem *
irongate_ioremap(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
unsigned long vaddr;
unsigned long baddr, last;
u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
unsigned long gart_bus_addr;
if (!alpha_agpgart_size)
return (void __iomem *)(addr + IRONGATE_MEM);
gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
PCI_BASE_ADDRESS_MEM_MASK;
/*
* Check for within the AGP aperture...
*/
do {
/*
* Check the AGP area
*/
if (addr >= gart_bus_addr && addr + size - 1 <
gart_bus_addr + alpha_agpgart_size)
break;
/*
* Not found - assume legacy ioremap
*/
return (void __iomem *)(addr + IRONGATE_MEM);
} while(0);
mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
/*
* Adjust the limits (mappings must be page aligned)
*/
if (addr & ~PAGE_MASK) {
printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
addr);
return (void __iomem *)(addr + IRONGATE_MEM);
}
last = addr + size - 1;
size = PAGE_ALIGN(last) - addr;
#if 0
printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
{
cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
cur_gatt, pte);
}
#endif
/*
* Map it
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area) return NULL;
for(baddr = addr, vaddr = (unsigned long)area->addr;
baddr <= last;
baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
{
cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
if (__alpha_remap_area_pages(vaddr,
pte, PAGE_SIZE, 0)) {
printk("AGP ioremap: FAILED to map...\n");
vfree(area->addr);
return NULL;
}
}
flush_tlb_all();
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
#if 0
printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
addr, size, vaddr);
#endif
return (void __iomem *)vaddr;
}
EXPORT_SYMBOL(irongate_ioremap);
void
irongate_iounmap(volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
if (((long)addr >> 41) == -2)
return; /* kseg map, nothing to do */
if (addr)
return vfree((void *)(PAGE_MASK & addr));
}
EXPORT_SYMBOL(irongate_iounmap);

View file

@ -0,0 +1,515 @@
/*
* linux/arch/alpha/kernel/core_lca.c
*
* Written by David Mosberger (davidm@cs.arizona.edu) with some code
* taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
* bios code.
*
* Code common to all LCA core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_lca.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
#include <asm/smp.h>
#include "proto.h"
#include "pci_impl.h"
/*
* BIOS32-style PCI interface:
*/
/*
* Machine check reasons. Defined according to PALcode sources
* (osf.h and platform.h).
*/
#define MCHK_K_TPERR 0x0080
#define MCHK_K_TCPERR 0x0082
#define MCHK_K_HERR 0x0084
#define MCHK_K_ECC_C 0x0086
#define MCHK_K_ECC_NC 0x0088
#define MCHK_K_UNKNOWN 0x008A
#define MCHK_K_CACKSOFT 0x008C
#define MCHK_K_BUGCHECK 0x008E
#define MCHK_K_OS_BUGCHECK 0x0090
#define MCHK_K_DCPERR 0x0092
#define MCHK_K_ICPERR 0x0094
/*
* Platform-specific machine-check reasons:
*/
#define MCHK_K_SIO_SERR 0x204 /* all platforms so far */
#define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */
#define MCHK_K_DCSR 0x208 /* all but Noname */
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the LCA_IOC_CONF register
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Type 0:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:11 Device select bit.
* 10:8 Function number
* 7:2 Register number
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr)
{
unsigned long addr;
u8 bus = pbus->number;
if (bus == 0) {
int device = device_fn >> 3;
int func = device_fn & 0x7;
/* Type 0 configuration cycle. */
if (device > 12) {
return -1;
}
*(vulp)LCA_IOC_CONF = 0;
addr = (1 << (11 + device)) | (func << 8) | where;
} else {
/* Type 1 configuration cycle. */
*(vulp)LCA_IOC_CONF = 1;
addr = (bus << 16) | (device_fn << 8) | where;
}
*pci_addr = addr;
return 0;
}
static unsigned int
conf_read(unsigned long addr)
{
unsigned long flags, code, stat0;
unsigned int value;
local_irq_save(flags);
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)LCA_IOC_STAT0;
*(vulp)LCA_IOC_STAT0 = stat0;
mb();
/* Access configuration space. */
value = *(vuip)addr;
draina();
stat0 = *(vulp)LCA_IOC_STAT0;
if (stat0 & LCA_IOC_STAT0_ERR) {
code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
& LCA_IOC_STAT0_CODE_MASK);
if (code != 1) {
printk("lca.c:conf_read: got stat0=%lx\n", stat0);
}
/* Reset error status. */
*(vulp)LCA_IOC_STAT0 = stat0;
mb();
/* Reset machine check. */
wrmces(0x7);
value = 0xffffffff;
}
local_irq_restore(flags);
return value;
}
static void
conf_write(unsigned long addr, unsigned int value)
{
unsigned long flags, code, stat0;
local_irq_save(flags); /* avoid getting hit by machine check */
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)LCA_IOC_STAT0;
*(vulp)LCA_IOC_STAT0 = stat0;
mb();
/* Access configuration space. */
*(vuip)addr = value;
draina();
stat0 = *(vulp)LCA_IOC_STAT0;
if (stat0 & LCA_IOC_STAT0_ERR) {
code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
& LCA_IOC_STAT0_CODE_MASK);
if (code != 1) {
printk("lca.c:conf_write: got stat0=%lx\n", stat0);
}
/* Reset error status. */
*(vulp)LCA_IOC_STAT0 = stat0;
mb();
/* Reset machine check. */
wrmces(0x7);
}
local_irq_restore(flags);
}
static int
lca_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr, pci_addr;
long mask;
int shift;
if (mk_conf_addr(bus, devfn, where, &pci_addr))
return PCIBIOS_DEVICE_NOT_FOUND;
shift = (where & 3) * 8;
mask = (size - 1) * 8;
addr = (pci_addr << 5) + mask + LCA_CONF;
*value = conf_read(addr) >> (shift);
return PCIBIOS_SUCCESSFUL;
}
static int
lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
unsigned long addr, pci_addr;
long mask;
if (mk_conf_addr(bus, devfn, where, &pci_addr))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
addr = (pci_addr << 5) + mask + LCA_CONF;
conf_write(addr, value << ((where & 3) * 8));
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops lca_pci_ops =
{
.read = lca_read_config,
.write = lca_write_config,
};
void
lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
wmb();
*(vulp)LCA_IOC_TBIA = 0;
mb();
}
void __init
lca_init_arch(void)
{
struct pci_controller *hose;
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR;
hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR;
hose->sparse_io_base = LCA_IO - IDENT_ADDR;
hose->dense_io_base = 0;
/*
* Set up the PCI to main memory translation windows.
*
* Mimic the SRM settings for the direct-map window.
* Window 0 is scatter-gather 8MB at 8MB (for isa).
* Window 1 is direct access 1GB at 1GB.
*
* Note that we do not try to save any of the DMA window CSRs
* before setting them, since we cannot read those CSRs on LCA.
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
*(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32);
*(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000;
*(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes);
*(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32);
*(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000;
*(vulp)LCA_IOC_T_BASE1 = 0;
*(vulp)LCA_IOC_TB_ENA = 0x80;
lca_pci_tbi(hose, 0, -1);
/*
* Disable PCI parity for now. The NCR53c810 chip has
* troubles meeting the PCI spec which results in
* data parity errors.
*/
*(vulp)LCA_IOC_PAR_DIS = 1UL<<5;
/*
* Finally, set up for restoring the correct HAE if using SRM.
* Again, since we cannot read many of the CSRs on the LCA,
* one of which happens to be the HAE, we save the value that
* the SRM will expect...
*/
if (alpha_using_srm)
srm_hae = 0x80000000UL;
}
/*
* Constants used during machine-check handling. I suppose these
* could be moved into lca.h but I don't see much reason why anybody
* else would want to use them.
*/
#define ESR_EAV (1UL<< 0) /* error address valid */
#define ESR_CEE (1UL<< 1) /* correctable error */
#define ESR_UEE (1UL<< 2) /* uncorrectable error */
#define ESR_WRE (1UL<< 3) /* write-error */
#define ESR_SOR (1UL<< 4) /* error source */
#define ESR_CTE (1UL<< 7) /* cache-tag error */
#define ESR_MSE (1UL<< 9) /* multiple soft errors */
#define ESR_MHE (1UL<<10) /* multiple hard errors */
#define ESR_NXM (1UL<<12) /* non-existent memory */
#define IOC_ERR ( 1<<4) /* ioc logs an error */
#define IOC_CMD_SHIFT 0
#define IOC_CMD (0xf<<IOC_CMD_SHIFT)
#define IOC_CODE_SHIFT 8
#define IOC_CODE (0xf<<IOC_CODE_SHIFT)
#define IOC_LOST ( 1<<5)
#define IOC_P_NBR ((__u32) ~((1<<13) - 1))
static void
mem_error(unsigned long esr, unsigned long ear)
{
printk(" %s %s error to %s occurred at address %x\n",
((esr & ESR_CEE) ? "Correctable" :
(esr & ESR_UEE) ? "Uncorrectable" : "A"),
(esr & ESR_WRE) ? "write" : "read",
(esr & ESR_SOR) ? "memory" : "b-cache",
(unsigned) (ear & 0x1ffffff8));
if (esr & ESR_CTE) {
printk(" A b-cache tag parity error was detected.\n");
}
if (esr & ESR_MSE) {
printk(" Several other correctable errors occurred.\n");
}
if (esr & ESR_MHE) {
printk(" Several other uncorrectable errors occurred.\n");
}
if (esr & ESR_NXM) {
printk(" Attempted to access non-existent memory.\n");
}
}
static void
ioc_error(__u32 stat0, __u32 stat1)
{
static const char * const pci_cmd[] = {
"Interrupt Acknowledge", "Special", "I/O Read", "I/O Write",
"Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3",
"Rsvd4", "Configuration Read", "Configuration Write",
"Memory Read Multiple", "Dual Address", "Memory Read Line",
"Memory Write and Invalidate"
};
static const char * const err_name[] = {
"exceeded retry limit", "no device", "bad data parity",
"target abort", "bad address parity", "page table read error",
"invalid page", "data error"
};
unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT;
unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT;
printk(" %s initiated PCI %s cycle to address %x"
" failed due to %s.\n",
code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]);
if (code == 5 || code == 6) {
printk(" (Error occurred at PCI memory address %x.)\n",
(stat0 & ~IOC_P_NBR));
}
if (stat0 & IOC_LOST) {
printk(" Other PCI errors occurred simultaneously.\n");
}
}
void
lca_machine_check(unsigned long vector, unsigned long la_ptr)
{
const char * reason;
union el_lca el;
el.c = (struct el_common *) la_ptr;
wrmces(rdmces()); /* reset machine check pending flag */
printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n",
vector, get_irq_regs()->pc, (unsigned int) el.c->code);
/*
* The first quadword after the common header always seems to
* be the machine check reason---don't know why this isn't
* part of the common header instead. In the case of a long
* logout frame, the upper 32 bits is the machine check
* revision level, which we ignore for now.
*/
switch ((unsigned int) el.c->code) {
case MCHK_K_TPERR: reason = "tag parity error"; break;
case MCHK_K_TCPERR: reason = "tag control parity error"; break;
case MCHK_K_HERR: reason = "access to non-existent memory"; break;
case MCHK_K_ECC_C: reason = "correctable ECC error"; break;
case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break;
case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break;
case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break;
case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break;
case MCHK_K_DCPERR: reason = "d-cache parity error"; break;
case MCHK_K_ICPERR: reason = "i-cache parity error"; break;
case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break;
case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break;
case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break;
case MCHK_K_UNKNOWN:
default: reason = "unknown"; break;
}
switch (el.c->size) {
case sizeof(struct el_lca_mcheck_short):
printk(KERN_CRIT
" Reason: %s (short frame%s, dc_stat=%#lx):\n",
reason, el.c->retry ? ", retryable" : "",
el.s->dc_stat);
if (el.s->esr & ESR_EAV) {
mem_error(el.s->esr, el.s->ear);
}
if (el.s->ioc_stat0 & IOC_ERR) {
ioc_error(el.s->ioc_stat0, el.s->ioc_stat1);
}
break;
case sizeof(struct el_lca_mcheck_long):
printk(KERN_CRIT " Reason: %s (long frame%s):\n",
reason, el.c->retry ? ", retryable" : "");
printk(KERN_CRIT
" reason: %#lx exc_addr: %#lx dc_stat: %#lx\n",
el.l->pt[0], el.l->exc_addr, el.l->dc_stat);
printk(KERN_CRIT " car: %#lx\n", el.l->car);
if (el.l->esr & ESR_EAV) {
mem_error(el.l->esr, el.l->ear);
}
if (el.l->ioc_stat0 & IOC_ERR) {
ioc_error(el.l->ioc_stat0, el.l->ioc_stat1);
}
break;
default:
printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size);
}
/* Dump the logout area to give all info. */
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
unsigned long * ptr = (unsigned long *) la_ptr;
long i;
for (i = 0; i < el.c->size / sizeof(long); i += 2) {
printk(KERN_CRIT " +%8lx %016lx %016lx\n",
i*sizeof(long), ptr[i], ptr[i+1]);
}
}
#endif /* CONFIG_VERBOSE_MCHECK */
}
/*
* The following routines are needed to support the SPEED changing
* necessary to successfully manage the thermal problem on the AlphaBook1.
*/
void
lca_clock_print(void)
{
long pmr_reg;
pmr_reg = LCA_READ_PMR;
printk("Status of clock control:\n");
printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg));
printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg));
printk("\tInterrupt override is %s\n",
(pmr_reg & LCA_PMR_INTO) ? "on" : "off");
printk("\tDMA override is %s\n",
(pmr_reg & LCA_PMR_DMAO) ? "on" : "off");
}
int
lca_get_clock(void)
{
long pmr_reg;
pmr_reg = LCA_READ_PMR;
return(LCA_GET_PRIMARY(pmr_reg));
}
void
lca_clock_fiddle(int divisor)
{
long pmr_reg;
pmr_reg = LCA_READ_PMR;
LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor);
/* lca_norm_clock = divisor; */
LCA_WRITE_PMR(pmr_reg);
mb();
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,613 @@
/*
* linux/arch/alpha/kernel/core_mcpcia.c
*
* Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
*
* Code common to all MCbus-PCI Adaptor core logic chipsets
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_mcpcia.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/ptrace.h>
#include "proto.h"
#include "pci_impl.h"
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
* handle the system transaction. Another involves timing. Ho hum.
*/
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CFG 0
#if DEBUG_CFG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the MCPCIA_HAXR2 register
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Type 0:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:11 Device select bit.
* 10:8 Function number
* 7:2 Register number
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static unsigned int
conf_read(unsigned long addr, unsigned char type1,
struct pci_controller *hose)
{
unsigned long flags;
unsigned long mid = MCPCIA_HOSE2MID(hose->index);
unsigned int stat0, value, cpu;
cpu = smp_processor_id();
local_irq_save(flags);
DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n",
addr, type1, mid));
/* Reset status register to avoid losing errors. */
stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
*(vuip)MCPCIA_CAP_ERR(mid) = stat0;
mb();
*(vuip)MCPCIA_CAP_ERR(mid);
DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
mcheck_extra(cpu) = mid;
mb();
/* Access configuration space. */
value = *((vuip)addr);
mb();
mb(); /* magic */
if (mcheck_taken(cpu)) {
mcheck_taken(cpu) = 0;
value = 0xffffffffU;
mb();
}
mcheck_expected(cpu) = 0;
mb();
DBG_CFG(("conf_read(): finished\n"));
local_irq_restore(flags);
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1,
struct pci_controller *hose)
{
unsigned long flags;
unsigned long mid = MCPCIA_HOSE2MID(hose->index);
unsigned int stat0, cpu;
cpu = smp_processor_id();
local_irq_save(flags); /* avoid getting hit by machine check */
/* Reset status register to avoid losing errors. */
stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
*(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
*(vuip)MCPCIA_CAP_ERR(mid);
DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
draina();
mcheck_expected(cpu) = 1;
mcheck_extra(cpu) = mid;
mb();
/* Access configuration space. */
*((vuip)addr) = value;
mb();
mb(); /* magic */
*(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
mcheck_expected(cpu) = 0;
mb();
DBG_CFG(("conf_write(): finished\n"));
local_irq_restore(flags);
}
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where,
struct pci_controller *hose, unsigned long *pci_addr,
unsigned char *type1)
{
u8 bus = pbus->number;
unsigned long addr;
DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x,"
" pci_addr=0x%p, type1=0x%p)\n",
bus, devfn, hose->index, where, pci_addr, type1));
/* Type 1 configuration cycle for *ALL* busses. */
*type1 = 1;
if (!pbus->parent) /* No parent means peer PCI bus. */
bus = 0;
addr = (bus << 16) | (devfn << 8) | (where);
addr <<= 5; /* swizzle for SPARSE */
addr |= hose->config_space_base;
*pci_addr = addr;
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static int
mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
struct pci_controller *hose = bus->sysdata;
unsigned long addr, w;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
addr |= (size - 1) * 8;
w = conf_read(addr, type1, hose);
switch (size) {
case 1:
*value = __kernel_extbl(w, where & 3);
break;
case 2:
*value = __kernel_extwl(w, where & 3);
break;
case 4:
*value = w;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
struct pci_controller *hose = bus->sysdata;
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
addr |= (size - 1) * 8;
value = __kernel_insql(value, where & 3);
conf_write(addr, value, type1, hose);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops mcpcia_pci_ops =
{
.read = mcpcia_read_config,
.write = mcpcia_write_config,
};
void
mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
wmb();
*(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0;
mb();
}
static int __init
mcpcia_probe_hose(int h)
{
int cpu = smp_processor_id();
int mid = MCPCIA_HOSE2MID(h);
unsigned int pci_rev;
/* Gotta be REAL careful. If hose is absent, we get an mcheck. */
mb();
mb();
draina();
wrmces(7);
mcheck_expected(cpu) = 2; /* indicates probing */
mcheck_taken(cpu) = 0;
mcheck_extra(cpu) = mid;
mb();
/* Access the bus revision word. */
pci_rev = *(vuip)MCPCIA_REV(mid);
mb();
mb(); /* magic */
if (mcheck_taken(cpu)) {
mcheck_taken(cpu) = 0;
pci_rev = 0xffffffff;
mb();
}
mcheck_expected(cpu) = 0;
mb();
return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST;
}
static void __init
mcpcia_new_hose(int h)
{
struct pci_controller *hose;
struct resource *io, *mem, *hae_mem;
int mid = MCPCIA_HOSE2MID(h);
hose = alloc_pci_controller();
if (h == 0)
pci_isa_hose = hose;
io = alloc_resource();
mem = alloc_resource();
hae_mem = alloc_resource();
hose->io_space = io;
hose->mem_space = hae_mem;
hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR;
hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR;
hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR;
hose->dense_io_base = 0;
hose->config_space_base = MCPCIA_CONF(mid);
hose->index = h;
io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS;
io->end = io->start + 0xffff;
io->name = pci_io_names[h];
io->flags = IORESOURCE_IO;
mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS;
mem->end = mem->start + 0xffffffff;
mem->name = pci_mem_names[h];
mem->flags = IORESOURCE_MEM;
hae_mem->start = mem->start;
hae_mem->end = mem->start + MCPCIA_MEM_MASK;
hae_mem->name = pci_hae0_name;
hae_mem->flags = IORESOURCE_MEM;
if (request_resource(&ioport_resource, io) < 0)
printk(KERN_ERR "Failed to request IO on hose %d\n", h);
if (request_resource(&iomem_resource, mem) < 0)
printk(KERN_ERR "Failed to request MEM on hose %d\n", h);
if (request_resource(mem, hae_mem) < 0)
printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h);
}
static void
mcpcia_pci_clr_err(int mid)
{
*(vuip)MCPCIA_CAP_ERR(mid);
*(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */
mb();
*(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */
}
static void __init
mcpcia_startup_hose(struct pci_controller *hose)
{
int mid = MCPCIA_HOSE2MID(hose->index);
unsigned int tmp;
mcpcia_pci_clr_err(mid);
/*
* Set up error reporting.
*/
tmp = *(vuip)MCPCIA_CAP_ERR(mid);
tmp |= 0x0006; /* master/target abort */
*(vuip)MCPCIA_CAP_ERR(mid) = tmp;
mb();
tmp = *(vuip)MCPCIA_CAP_ERR(mid);
/*
* Set up the PCI->physical memory translation windows.
*
* Window 0 is scatter-gather 8MB at 8MB (for isa)
* Window 1 is scatter-gather (up to) 1GB at 1GB (for pci)
* Window 2 is direct access 2GB at 2GB
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
size_for_memory(0x40000000), 0);
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
*(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3;
*(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000;
*(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8;
*(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3;
*(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000;
*(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8;
*(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1;
*(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000;
*(vuip)MCPCIA_T2_BASE(mid) = 0;
*(vuip)MCPCIA_W3_BASE(mid) = 0x0;
mcpcia_pci_tbi(hose, 0, -1);
*(vuip)MCPCIA_HBASE(mid) = 0x0;
mb();
*(vuip)MCPCIA_HAE_MEM(mid) = 0U;
mb();
*(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */
*(vuip)MCPCIA_HAE_IO(mid) = 0;
mb();
*(vuip)MCPCIA_HAE_IO(mid); /* read it back. */
}
void __init
mcpcia_init_arch(void)
{
/* With multiple PCI busses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
/* Allocate hose 0. That's the one that all the ISA junk hangs
off of, from which we'll be registering stuff here in a bit.
Other hose detection is done in mcpcia_init_hoses, which is
called from init_IRQ. */
mcpcia_new_hose(0);
}
/* This is called from init_IRQ, since we cannot take interrupts
before then. Which means we cannot do this in init_arch. */
void __init
mcpcia_init_hoses(void)
{
struct pci_controller *hose;
int hose_count;
int h;
/* First, find how many hoses we have. */
hose_count = 0;
for (h = 0; h < MCPCIA_MAX_HOSES; ++h) {
if (mcpcia_probe_hose(h)) {
if (h != 0)
mcpcia_new_hose(h);
hose_count++;
}
}
printk("mcpcia_init_hoses: found %d hoses\n", hose_count);
/* Now do init for each hose. */
for (hose = hose_head; hose; hose = hose->next)
mcpcia_startup_hose(hose);
}
static void
mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout)
{
struct el_common_EV5_uncorrectable_mcheck *frame;
int i;
frame = &logout->procdata;
/* Print PAL fields */
for (i = 0; i < 24; i += 2) {
printk(" paltmp[%d-%d] = %16lx %16lx\n",
i, i+1, frame->paltemp[i], frame->paltemp[i+1]);
}
for (i = 0; i < 8; i += 2) {
printk(" shadow[%d-%d] = %16lx %16lx\n",
i, i+1, frame->shadow[i],
frame->shadow[i+1]);
}
printk(" Addr of excepting instruction = %16lx\n",
frame->exc_addr);
printk(" Summary of arithmetic traps = %16lx\n",
frame->exc_sum);
printk(" Exception mask = %16lx\n",
frame->exc_mask);
printk(" Base address for PALcode = %16lx\n",
frame->pal_base);
printk(" Interrupt Status Reg = %16lx\n",
frame->isr);
printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n",
frame->icsr);
printk(" I-CACHE Reg %s parity error = %16lx\n",
(frame->ic_perr_stat & 0x800L) ?
"Data" : "Tag",
frame->ic_perr_stat);
printk(" D-CACHE error Reg = %16lx\n",
frame->dc_perr_stat);
if (frame->dc_perr_stat & 0x2) {
switch (frame->dc_perr_stat & 0x03c) {
case 8:
printk(" Data error in bank 1\n");
break;
case 4:
printk(" Data error in bank 0\n");
break;
case 20:
printk(" Tag error in bank 1\n");
break;
case 10:
printk(" Tag error in bank 0\n");
break;
}
}
printk(" Effective VA = %16lx\n",
frame->va);
printk(" Reason for D-stream = %16lx\n",
frame->mm_stat);
printk(" EV5 SCache address = %16lx\n",
frame->sc_addr);
printk(" EV5 SCache TAG/Data parity = %16lx\n",
frame->sc_stat);
printk(" EV5 BC_TAG_ADDR = %16lx\n",
frame->bc_tag_addr);
printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n",
frame->ei_addr);
printk(" Fill Syndrome = %16lx\n",
frame->fill_syndrome);
printk(" EI_STAT reg = %16lx\n",
frame->ei_stat);
printk(" LD_LOCK = %16lx\n",
frame->ld_lock);
}
static void
mcpcia_print_system_area(unsigned long la_ptr)
{
struct el_common *frame;
struct pci_controller *hose;
struct IOD_subpacket {
unsigned long base;
unsigned int whoami;
unsigned int rsvd1;
unsigned int pci_rev;
unsigned int cap_ctrl;
unsigned int hae_mem;
unsigned int hae_io;
unsigned int int_ctl;
unsigned int int_reg;
unsigned int int_mask0;
unsigned int int_mask1;
unsigned int mc_err0;
unsigned int mc_err1;
unsigned int cap_err;
unsigned int rsvd2;
unsigned int pci_err1;
unsigned int mdpa_stat;
unsigned int mdpa_syn;
unsigned int mdpb_stat;
unsigned int mdpb_syn;
unsigned int rsvd3;
unsigned int rsvd4;
unsigned int rsvd5;
} *iodpp;
frame = (struct el_common *)la_ptr;
iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset);
for (hose = hose_head; hose; hose = hose->next, iodpp++) {
printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n",
hose->index, iodpp->base);
printk(" WHOAMI = %8x\n", iodpp->whoami);
printk(" PCI_REV = %8x\n", iodpp->pci_rev);
printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl);
printk(" HAE_MEM = %8x\n", iodpp->hae_mem);
printk(" HAE_IO = %8x\n", iodpp->hae_io);
printk(" INT_CTL = %8x\n", iodpp->int_ctl);
printk(" INT_REG = %8x\n", iodpp->int_reg);
printk(" INT_MASK0 = %8x\n", iodpp->int_mask0);
printk(" INT_MASK1 = %8x\n", iodpp->int_mask1);
printk(" MC_ERR0 = %8x\n", iodpp->mc_err0);
printk(" MC_ERR1 = %8x\n", iodpp->mc_err1);
printk(" CAP_ERR = %8x\n", iodpp->cap_err);
printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1);
printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat);
printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn);
printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat);
printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn);
}
}
void
mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
unsigned int cpu = smp_processor_id();
int expected;
mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
expected = mcheck_expected(cpu);
mb();
mb(); /* magic */
draina();
switch (expected) {
case 0:
{
/* FIXME: how do we figure out which hose the
error was on? */
struct pci_controller *hose;
for (hose = hose_head; hose; hose = hose->next)
mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index));
break;
}
case 1:
mcpcia_pci_clr_err(mcheck_extra(cpu));
break;
default:
/* Otherwise, we're being called from mcpcia_probe_hose
and there's no hose clear an error from. */
break;
}
wrmces(0x7);
mb();
process_mcheck_info(vector, la_ptr, "MCPCIA", expected != 0);
if (!expected && vector != 0x620 && vector != 0x630) {
mcpcia_print_uncorrectable(mchk_logout);
mcpcia_print_system_area(la_ptr);
}
}

View file

@ -0,0 +1,202 @@
/*
* linux/arch/alpha/kernel/core_polaris.c
*
* POLARIS chip-specific code
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_polaris.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include "proto.h"
#include "pci_impl.h"
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
/*
* Given a bus, device, and function number, compute resulting
* configuration space address. This is fairly straightforward
* on POLARIS, since the chip itself generates Type 0 or Type 1
* cycles automatically depending on the bus number (Bus 0 is
* hardwired to Type 0, all others are Type 1. Peer bridges
* are not supported).
*
* All types:
*
* 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., scsi and ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, u8 *type1)
{
u8 bus = pbus->number;
*type1 = (bus == 0) ? 0 : 1;
*pci_addr = (bus << 16) | (device_fn << 8) | (where) |
POLARIS_DENSE_CONFIG_BASE;
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
" returning address 0x%p\n"
bus, device_fn, where, *pci_addr));
return 0;
}
static int
polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*value = __kernel_ldbu(*(vucp)addr);
break;
case 2:
*value = __kernel_ldwu(*(vusp)addr);
break;
case 4:
*value = *(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
__kernel_stb(value, *(vucp)addr);
mb();
__kernel_ldbu(*(vucp)addr);
break;
case 2:
__kernel_stw(value, *(vusp)addr);
mb();
__kernel_ldwu(*(vusp)addr);
break;
case 4:
*(vuip)addr = value;
mb();
*(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops polaris_pci_ops =
{
.read = polaris_read_config,
.write = polaris_write_config,
};
void __init
polaris_init_arch(void)
{
struct pci_controller *hose;
/* May need to initialize error reporting (see PCICTL0/1), but
* for now assume that the firmware has done the right thing
* already.
*/
#if 0
printk("polaris_init_arch(): trusting firmware for setup\n");
#endif
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
hose->sparse_mem_base = 0;
hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR;
hose->sparse_io_base = 0;
hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR;
hose->sg_isa = hose->sg_pci = NULL;
/* The I/O window is fixed at 2G @ 2G. */
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
}
static inline void
polaris_pci_clr_err(void)
{
*(vusp)POLARIS_W_STATUS;
/* Write 1's to settable bits to clear errors */
*(vusp)POLARIS_W_STATUS = 0x7800;
mb();
*(vusp)POLARIS_W_STATUS;
}
void
polaris_machine_check(unsigned long vector, unsigned long la_ptr)
{
/* Clear the error before any reporting. */
mb();
mb();
draina();
polaris_pci_clr_err();
wrmces(0x7);
mb();
process_mcheck_info(vector, la_ptr, "POLARIS",
mcheck_expected(0));
}

623
arch/alpha/kernel/core_t2.c Normal file
View file

@ -0,0 +1,623 @@
/*
* linux/arch/alpha/kernel/core_t2.c
*
* Written by Jay A Estabrook (jestabro@amt.tay1.dec.com).
* December 1996.
*
* based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com)
*
* Code common to all T2 core logic chips.
*/
#define __EXTERN_INLINE
#include <asm/io.h>
#include <asm/core_t2.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/delay.h>
#include <asm/mce.h>
#include "proto.h"
#include "pci_impl.h"
/* For dumping initial DMA window settings. */
#define DEBUG_PRINT_INITIAL_SETTINGS 0
/* For dumping final DMA window settings. */
#define DEBUG_PRINT_FINAL_SETTINGS 0
/*
* By default, we direct-map starting at 2GB, in order to allow the
* maximum size direct-map window (2GB) to match the maximum amount of
* memory (2GB) that can be present on SABLEs. But that limits the
* floppy to DMA only via the scatter/gather window set up for 8MB
* ISA DMA, since the maximum ISA DMA address is 2GB-1.
*
* For now, this seems a reasonable trade-off: even though most SABLEs
* have less than 1GB of memory, floppy usage/performance will not
* really be affected by forcing it to go via scatter/gather...
*/
#define T2_DIRECTMAP_2G 1
#if T2_DIRECTMAP_2G
# define T2_DIRECTMAP_START 0x80000000UL
# define T2_DIRECTMAP_LENGTH 0x80000000UL
#else
# define T2_DIRECTMAP_START 0x40000000UL
# define T2_DIRECTMAP_LENGTH 0x40000000UL
#endif
/* The ISA scatter/gather window settings. */
#define T2_ISA_SG_START 0x00800000UL
#define T2_ISA_SG_LENGTH 0x00800000UL
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
* handle the system transaction. Another involves timing. Ho hum.
*/
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBG(args) printk args
#else
# define DBG(args)
#endif
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
/* Place to save the DMA Window registers as set up by SRM
for restoration during shutdown. */
static struct
{
struct {
unsigned long wbase;
unsigned long wmask;
unsigned long tbase;
} window[2];
unsigned long hae_1;
unsigned long hae_2;
unsigned long hae_3;
unsigned long hae_4;
unsigned long hbase;
} t2_saved_config __attribute((common));
/*
* Given a bus, device, and function number, compute resulting
* configuration space address and setup the T2_HAXR2 register
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Type 0:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:11 Device select bit.
* 10:8 Function number
* 7:2 Register number
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
unsigned long addr;
u8 bus = pbus->number;
DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x,"
" addr=0x%lx, type1=0x%x)\n",
bus, device_fn, where, pci_addr, type1));
if (bus == 0) {
int device = device_fn >> 3;
/* Type 0 configuration cycle. */
if (device > 8) {
DBG(("mk_conf_addr: device (%d)>20, returning -1\n",
device));
return -1;
}
*type1 = 0;
addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where);
} else {
/* Type 1 configuration cycle. */
*type1 = 1;
addr = (bus << 16) | (device_fn << 8) | (where);
}
*pci_addr = addr;
DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
/*
* NOTE: both conf_read() and conf_write() may set HAE_3 when needing
* to do type1 access. This is protected by the use of spinlock IRQ
* primitives in the wrapper functions pci_{read,write}_config_*()
* defined in drivers/pci/pci.c.
*/
static unsigned int
conf_read(unsigned long addr, unsigned char type1)
{
unsigned int value, cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
mb();
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
value = *(vuip)addr;
mb();
mb(); /* magic */
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
another CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
value = 0xffffffffU;
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
if (type1) {
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1)
{
unsigned int cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
mb();
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
*(vuip)addr = value;
mb();
mb(); /* magic */
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
this CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
if (type1) {
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
}
static int
t2_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr, pci_addr;
unsigned char type1;
int shift;
long mask;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
shift = (where & 3) * 8;
addr = (pci_addr << 5) + mask + T2_CONF;
*value = conf_read(addr, type1) >> (shift);
return PCIBIOS_SUCCESSFUL;
}
static int
t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
unsigned long addr, pci_addr;
unsigned char type1;
long mask;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
addr = (pci_addr << 5) + mask + T2_CONF;
conf_write(addr, value << ((where & 3) * 8), type1);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops t2_pci_ops =
{
.read = t2_read_config,
.write = t2_write_config,
};
static void __init
t2_direct_map_window1(unsigned long base, unsigned long length)
{
unsigned long temp;
__direct_map_base = base;
__direct_map_size = length;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK1 = temp;
*(vulp)T2_TBASE1 = 0;
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
__func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
#endif
}
static void __init
t2_sg_map_window2(struct pci_controller *hose,
unsigned long base,
unsigned long length)
{
unsigned long temp;
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
hose->sg_isa = iommu_arena_new(hose, base, length, 0);
hose->sg_pci = NULL;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK2 = temp;
*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
mb();
t2_pci_tbi(hose, 0, -1); /* flush TLB all */
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
__func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
}
static void __init
t2_save_configuration(void)
{
#if DEBUG_PRINT_INITIAL_SETTINGS
printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */
printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2);
printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3);
printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4);
printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE);
printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__,
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__,
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
/*
* Save the DMA Window registers.
*/
t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
t2_saved_config.hbase = *(vulp)T2_HBASE;
}
void __init
t2_init_arch(void)
{
struct pci_controller *hose;
struct resource *hae_mem;
unsigned long temp;
unsigned int i;
for (i = 0; i < NR_CPUS; i++) {
mcheck_expected(i) = 0;
mcheck_taken(i) = 0;
}
t2_mcheck_any_expected = 0;
t2_mcheck_last_taken = 0;
/* Enable scatter/gather TLB use. */
temp = *(vulp)T2_IOCSR;
if (!(temp & (0x1UL << 26))) {
printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
temp);
*(vulp)T2_IOCSR = temp | (0x1UL << 26);
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
}
t2_save_configuration();
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hae_mem = alloc_resource();
hae_mem->start = 0;
hae_mem->end = T2_MEM_R1_MASK;
hae_mem->name = pci_hae0_name;
if (request_resource(&iomem_resource, hae_mem) < 0)
printk(KERN_ERR "Failed to request HAE_MEM\n");
hose->mem_space = hae_mem;
hose->index = 0;
hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR;
hose->sparse_io_base = T2_IO - IDENT_ADDR;
hose->dense_io_base = 0;
/*
* Set up the PCI->physical memory translation windows.
*
* Window 1 is direct mapped.
* Window 2 is scatter/gather (for ISA).
*/
t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
/* Always make an ISA DMA window. */
t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
*(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
/* Zero HAE. */
*(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
*(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
*(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
/*
* We also now zero out HAE_4, the dense memory HAE, so that
* we need not account for its "offset" when accessing dense
* memory resources which we allocated in our normal way. This
* HAE would need to stay untouched were we to keep the SRM
* resource settings.
*
* Thus we can now run standard X servers on SABLE/LYNX. :-)
*/
*(vulp)T2_HAE_4 = 0; mb();
}
void
t2_kill_arch(int mode)
{
/*
* Restore the DMA Window registers.
*/
*(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
*(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
*(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
*(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
*(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
*(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
mb();
*(vulp)T2_HAE_1 = srm_hae;
*(vulp)T2_HAE_2 = t2_saved_config.hae_2;
*(vulp)T2_HAE_3 = t2_saved_config.hae_3;
*(vulp)T2_HAE_4 = t2_saved_config.hae_4;
*(vulp)T2_HBASE = t2_saved_config.hbase;
mb();
*(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
}
void
t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
unsigned long t2_iocsr;
t2_iocsr = *(vulp)T2_IOCSR;
/* set the TLB Clear bit */
*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28);
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
/* clear the TLB Clear bit */
*(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28);
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
}
#define SIC_SEIC (1UL << 33) /* System Event Clear */
static void
t2_clear_errors(int cpu)
{
struct sable_cpu_csr *cpu_regs;
cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
cpu_regs->sic &= ~SIC_SEIC;
/* Clear CPU errors. */
cpu_regs->bcce |= cpu_regs->bcce;
cpu_regs->cbe |= cpu_regs->cbe;
cpu_regs->bcue |= cpu_regs->bcue;
cpu_regs->dter |= cpu_regs->dter;
*(vulp)T2_CERR1 |= *(vulp)T2_CERR1;
*(vulp)T2_PERR1 |= *(vulp)T2_PERR1;
mb();
mb(); /* magic */
}
/*
* SABLE seems to have a "broadcast" style machine check, in that all
* CPUs receive it. And, the issuing CPU, in the case of PCI Config
* space read/write faults, will also receive a second mcheck, upon
* lowering IPL during completion processing in pci_read_config_byte()
* et al.
*
* Hence all the taken/expected/any_expected/last_taken stuff...
*/
void
t2_machine_check(unsigned long vector, unsigned long la_ptr)
{
int cpu = smp_processor_id();
#ifdef CONFIG_VERBOSE_MCHECK
struct el_common *mchk_header = (struct el_common *)la_ptr;
#endif
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
t2_clear_errors(cpu);
/* This should not actually be done until the logout frame is
examined, but, since we don't do that, go on and do this... */
wrmces(0x7);
mb();
/* Now, do testing for the anomalous conditions. */
if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
/*
* FUNKY: Received mcheck on a CPU and not
* expecting it, but another CPU is expecting one.
*
* Just dismiss it for now on this CPU...
*/
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
printk("t2_machine_check(cpu%d): any_expected 0x%x -"
" (assumed) spurious -"
" code 0x%x\n", cpu, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
}
#endif
return;
}
if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
if (t2_mcheck_last_taken & (1 << cpu)) {
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
printk("t2_machine_check(cpu%d): last_taken 0x%x - "
"unexpected mcheck - code 0x%x\n",
cpu, t2_mcheck_last_taken,
(unsigned int)mchk_header->code);
}
#endif
t2_mcheck_last_taken = 0;
mb();
return;
} else {
t2_mcheck_last_taken = 0;
mb();
}
}
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
"any_expected 0x%x - code 0x%x\n",
(mcheck_expected(cpu) ? "EX" : "UN"), cpu,
t2_mcheck_last_taken, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
}
#endif
process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu));
}

View file

@ -0,0 +1,798 @@
/*
* linux/arch/alpha/kernel/core_titan.c
*
* Code common to all TITAN core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_titan.h>
#undef __EXTERN_INLINE
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/vga.h>
#include "proto.h"
#include "pci_impl.h"
/* Save Titan configuration data as the console had it set up. */
struct
{
unsigned long wsba[4];
unsigned long wsm[4];
unsigned long tba[4];
} saved_config[4] __attribute__((common));
/*
* Is PChip 1 present? No need to query it more than once.
*/
static int titan_pchip1_present;
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
/*
* Routines to access TIG registers.
*/
static inline volatile unsigned long *
mk_tig_addr(int offset)
{
return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
}
static inline u8
titan_read_tig(int offset, u8 value)
{
volatile unsigned long *tig_addr = mk_tig_addr(offset);
return (u8)(*tig_addr & 0xff);
}
static inline void
titan_write_tig(int offset, u8 value)
{
volatile unsigned long *tig_addr = mk_tig_addr(offset);
*tig_addr = (unsigned long)value;
}
/*
* Given a bus, device, and function number, compute resulting
* configuration space address
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Note that all config space accesses use Type 1 address format.
*
* Note also that type 1 is determined by non-zero bus number.
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
struct pci_controller *hose = pbus->sysdata;
unsigned long addr;
u8 bus = pbus->number;
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
"pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
if (!pbus->parent) /* No parent means peer PCI bus. */
bus = 0;
*type1 = (bus != 0);
addr = (bus << 16) | (device_fn << 8) | where;
addr |= hose->config_space_base;
*pci_addr = addr;
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static int
titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*value = __kernel_ldbu(*(vucp)addr);
break;
case 2:
*value = __kernel_ldwu(*(vusp)addr);
break;
case 4:
*value = *(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
__kernel_stb(value, *(vucp)addr);
mb();
__kernel_ldbu(*(vucp)addr);
break;
case 2:
__kernel_stw(value, *(vusp)addr);
mb();
__kernel_ldwu(*(vusp)addr);
break;
case 4:
*(vuip)addr = value;
mb();
*(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops titan_pci_ops =
{
.read = titan_read_config,
.write = titan_write_config,
};
void
titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
titan_pachip *pachip =
(hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
titan_pachip_port *port;
volatile unsigned long *csr;
unsigned long value;
/* Get the right hose. */
port = &pachip->g_port;
if (hose->index & 2)
port = &pachip->a_port;
/* We can invalidate up to 8 tlb entries in a go. The flush
matches against <31:16> in the pci address.
Note that gtlbi* and atlbi* are in the same place in the g_port
and a_port, respectively, so the g_port offset can be used
even if hose is an a_port */
csr = &port->port_specific.g.gtlbia.csr;
if (((start ^ end) & 0xffff0000) == 0)
csr = &port->port_specific.g.gtlbiv.csr;
/* For TBIA, it doesn't matter what value we write. For TBI,
it's the shifted tag bits. */
value = (start & 0xffff0000) >> 12;
wmb();
*csr = value;
mb();
*csr;
}
static int
titan_query_agp(titan_pachip_port *port)
{
union TPAchipPCTL pctl;
/* set up APCTL */
pctl.pctl_q_whole = port->pctl.csr;
return pctl.pctl_r_bits.apctl_v_agp_present;
}
static void __init
titan_init_one_pachip_port(titan_pachip_port *port, int index)
{
struct pci_controller *hose;
hose = alloc_pci_controller();
if (index == 0)
pci_isa_hose = hose;
hose->io_space = alloc_resource();
hose->mem_space = alloc_resource();
/*
* This is for userland consumption. The 40-bit PIO bias that we
* use in the kernel through KSEG doesn't work in the page table
* based user mappings. (43-bit KSEG sign extends the physical
* address from bit 40 to hit the I/O bit - mapped addresses don't).
* So make sure we get the 43-bit PIO bias.
*/
hose->sparse_mem_base = 0;
hose->sparse_io_base = 0;
hose->dense_mem_base
= (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
hose->dense_io_base
= (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
hose->config_space_base = TITAN_CONF(index);
hose->index = index;
hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
hose->io_space->name = pci_io_names[index];
hose->io_space->flags = IORESOURCE_IO;
hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
hose->mem_space->end = hose->mem_space->start + 0xffffffff;
hose->mem_space->name = pci_mem_names[index];
hose->mem_space->flags = IORESOURCE_MEM;
if (request_resource(&ioport_resource, hose->io_space) < 0)
printk(KERN_ERR "Failed to request IO on hose %d\n", index);
if (request_resource(&iomem_resource, hose->mem_space) < 0)
printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
/*
* Save the existing PCI window translations. SRM will
* need them when we go to reboot.
*/
saved_config[index].wsba[0] = port->wsba[0].csr;
saved_config[index].wsm[0] = port->wsm[0].csr;
saved_config[index].tba[0] = port->tba[0].csr;
saved_config[index].wsba[1] = port->wsba[1].csr;
saved_config[index].wsm[1] = port->wsm[1].csr;
saved_config[index].tba[1] = port->tba[1].csr;
saved_config[index].wsba[2] = port->wsba[2].csr;
saved_config[index].wsm[2] = port->wsm[2].csr;
saved_config[index].tba[2] = port->tba[2].csr;
saved_config[index].wsba[3] = port->wsba[3].csr;
saved_config[index].wsm[3] = port->wsm[3].csr;
saved_config[index].tba[3] = port->tba[3].csr;
/*
* Set up the PCI to main memory translation windows.
*
* Note: Window 3 on Titan is Scatter-Gather ONLY.
*
* Window 0 is scatter-gather 8MB at 8MB (for isa)
* Window 1 is direct access 1GB at 2GB
* Window 2 is scatter-gather 1GB at 3GB
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_isa->align_entry = 8; /* 64KB for ISA */
hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
port->wsba[0].csr = hose->sg_isa->dma_base | 3;
port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
port->wsba[1].csr = __direct_map_base | 1;
port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
port->tba[1].csr = 0;
port->wsba[2].csr = hose->sg_pci->dma_base | 3;
port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
port->wsba[3].csr = 0;
/* Enable the Monster Window to make DAC pci64 possible. */
port->pctl.csr |= pctl_m_mwin;
/*
* If it's an AGP port, initialize agplastwr.
*/
if (titan_query_agp(port))
port->port_specific.a.agplastwr.csr = __direct_map_base;
titan_pci_tbi(hose, 0, -1);
}
static void __init
titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
{
titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
/* Init the ports in hose order... */
titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
if (titan_pchip1_present)
titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
if (titan_pchip1_present)
titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
}
void __init
titan_init_arch(void)
{
#if 0
printk("%s: titan_init_arch()\n", __func__);
printk("%s: CChip registers:\n", __func__);
printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
printk("%s: DChip registers:\n", __func__);
printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
#endif
boot_cpuid = __hard_smp_processor_id();
/* With multiple PCI busses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
iomem_resource.end = ~0UL;
/* PCI DMA Direct Mapping is 1GB at 2GB. */
__direct_map_base = 0x80000000;
__direct_map_size = 0x40000000;
/* Init the PA chip(s). */
titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
/* Check for graphic console location (if any). */
find_console_vga_hose();
}
static void
titan_kill_one_pachip_port(titan_pachip_port *port, int index)
{
port->wsba[0].csr = saved_config[index].wsba[0];
port->wsm[0].csr = saved_config[index].wsm[0];
port->tba[0].csr = saved_config[index].tba[0];
port->wsba[1].csr = saved_config[index].wsba[1];
port->wsm[1].csr = saved_config[index].wsm[1];
port->tba[1].csr = saved_config[index].tba[1];
port->wsba[2].csr = saved_config[index].wsba[2];
port->wsm[2].csr = saved_config[index].wsm[2];
port->tba[2].csr = saved_config[index].tba[2];
port->wsba[3].csr = saved_config[index].wsba[3];
port->wsm[3].csr = saved_config[index].wsm[3];
port->tba[3].csr = saved_config[index].tba[3];
}
static void
titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
{
if (titan_pchip1_present) {
titan_kill_one_pachip_port(&pachip1->g_port, 1);
titan_kill_one_pachip_port(&pachip1->a_port, 3);
}
titan_kill_one_pachip_port(&pachip0->g_port, 0);
titan_kill_one_pachip_port(&pachip0->a_port, 2);
}
void
titan_kill_arch(int mode)
{
titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
}
/*
* IO map support.
*/
void __iomem *
titan_ioportmap(unsigned long addr)
{
FIXUP_IOADDR_VGA(addr);
return (void __iomem *)(addr + TITAN_IO_BIAS);
}
void __iomem *
titan_ioremap(unsigned long addr, unsigned long size)
{
int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
unsigned long baddr = addr & ~TITAN_HOSE_MASK;
unsigned long last = baddr + size - 1;
struct pci_controller *hose;
struct vm_struct *area;
unsigned long vaddr;
unsigned long *ptes;
unsigned long pfn;
/*
* Adjust the address and hose, if necessary.
*/
if (pci_vga_hose && __is_mem_vga(addr)) {
h = pci_vga_hose->index;
addr += pci_vga_hose->mem_space->start;
}
/*
* Find the hose.
*/
for (hose = hose_head; hose; hose = hose->next)
if (hose->index == h)
break;
if (!hose)
return NULL;
/*
* Is it direct-mapped?
*/
if ((baddr >= __direct_map_base) &&
((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
return (void __iomem *) vaddr;
}
/*
* Check the scatter-gather arena.
*/
if (hose->sg_pci &&
baddr >= (unsigned long)hose->sg_pci->dma_base &&
last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
/*
* Adjust the limits (mappings must be page aligned)
*/
baddr -= hose->sg_pci->dma_base;
last -= hose->sg_pci->dma_base;
baddr &= PAGE_MASK;
size = PAGE_ALIGN(last) - baddr;
/*
* Map it
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area) {
printk("ioremap failed... no vm_area...\n");
return NULL;
}
ptes = hose->sg_pci->ptes;
for (vaddr = (unsigned long)area->addr;
baddr <= last;
baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
pfn = ptes[baddr >> PAGE_SHIFT];
if (!(pfn & 1)) {
printk("ioremap failed... pte not valid...\n");
vfree(area->addr);
return NULL;
}
pfn >>= 1; /* make it a true pfn */
if (__alpha_remap_area_pages(vaddr,
pfn << PAGE_SHIFT,
PAGE_SIZE, 0)) {
printk("FAILED to remap_area_pages...\n");
vfree(area->addr);
return NULL;
}
}
flush_tlb_all();
vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
return (void __iomem *) vaddr;
}
/* Assume a legacy (read: VGA) address, and return appropriately. */
return (void __iomem *)(addr + TITAN_MEM_BIAS);
}
void
titan_iounmap(volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
if (addr >= VMALLOC_START)
vfree((void *)(PAGE_MASK & addr));
}
int
titan_is_mmio(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
if (addr >= VMALLOC_START)
return 1;
else
return (addr & 0x100000000UL) == 0;
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL(titan_ioportmap);
EXPORT_SYMBOL(titan_ioremap);
EXPORT_SYMBOL(titan_iounmap);
EXPORT_SYMBOL(titan_is_mmio);
#endif
/*
* AGP GART Support.
*/
#include <linux/agp_backend.h>
#include <asm/agp_backend.h>
#include <linux/slab.h>
#include <linux/delay.h>
struct titan_agp_aperture {
struct pci_iommu_arena *arena;
long pg_start;
long pg_count;
};
static int
titan_agp_setup(alpha_agp_info *agp)
{
struct titan_agp_aperture *aper;
if (!alpha_agpgart_size)
return -ENOMEM;
aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
if (aper == NULL)
return -ENOMEM;
aper->arena = agp->hose->sg_pci;
aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
aper->pg_count - 1);
if (aper->pg_start < 0) {
printk(KERN_ERR "Failed to reserve AGP memory\n");
kfree(aper);
return -ENOMEM;
}
agp->aperture.bus_base =
aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
agp->aperture.size = aper->pg_count * PAGE_SIZE;
agp->aperture.sysdata = aper;
return 0;
}
static void
titan_agp_cleanup(alpha_agp_info *agp)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
int status;
status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
if (status == -EBUSY) {
printk(KERN_WARNING
"Attempted to release bound AGP memory - unbinding\n");
iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
status = iommu_release(aper->arena, aper->pg_start,
aper->pg_count);
}
if (status < 0)
printk(KERN_ERR "Failed to release AGP memory\n");
kfree(aper);
kfree(agp);
}
static int
titan_agp_configure(alpha_agp_info *agp)
{
union TPAchipPCTL pctl;
titan_pachip_port *port = agp->private;
pctl.pctl_q_whole = port->pctl.csr;
/* Side-Band Addressing? */
pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
/* AGP Rate? */
pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
if (agp->mode.bits.rate & 2)
pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
#if 0
if (agp->mode.bits.rate & 4)
pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
#endif
/* RQ Depth? */
pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
/*
* AGP Enable.
*/
pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
/* Tell the user. */
printk("Enabling AGP: %dX%s\n",
1 << pctl.pctl_r_bits.apctl_v_agp_rate,
pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
/* Write it. */
port->pctl.csr = pctl.pctl_q_whole;
/* And wait at least 5000 66MHz cycles (per Titan spec). */
udelay(100);
return 0;
}
static int
titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
return iommu_bind(aper->arena, aper->pg_start + pg_start,
mem->page_count, mem->pages);
}
static int
titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
return iommu_unbind(aper->arena, aper->pg_start + pg_start,
mem->page_count);
}
static unsigned long
titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
unsigned long baddr = addr - aper->arena->dma_base;
unsigned long pte;
if (addr < agp->aperture.bus_base ||
addr >= agp->aperture.bus_base + agp->aperture.size) {
printk("%s: addr out of range\n", __func__);
return -EINVAL;
}
pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
if (!(pte & 1)) {
printk("%s: pte not valid\n", __func__);
return -EINVAL;
}
return (pte >> 1) << PAGE_SHIFT;
}
struct alpha_agp_ops titan_agp_ops =
{
.setup = titan_agp_setup,
.cleanup = titan_agp_cleanup,
.configure = titan_agp_configure,
.bind = titan_agp_bind_memory,
.unbind = titan_agp_unbind_memory,
.translate = titan_agp_translate
};
alpha_agp_info *
titan_agp_info(void)
{
alpha_agp_info *agp;
struct pci_controller *hose;
titan_pachip_port *port;
int hosenum = -1;
union TPAchipPCTL pctl;
/*
* Find the AGP port.
*/
port = &TITAN_pachip0->a_port;
if (titan_query_agp(port))
hosenum = 2;
if (hosenum < 0 &&
titan_pchip1_present &&
titan_query_agp(port = &TITAN_pachip1->a_port))
hosenum = 3;
/*
* Find the hose the port is on.
*/
for (hose = hose_head; hose; hose = hose->next)
if (hose->index == hosenum)
break;
if (!hose || !hose->sg_pci)
return NULL;
/*
* Allocate the info structure.
*/
agp = kmalloc(sizeof(*agp), GFP_KERNEL);
if (!agp)
return NULL;
/*
* Fill it in.
*/
agp->hose = hose;
agp->private = port;
agp->ops = &titan_agp_ops;
/*
* Aperture - not configured until ops.setup().
*
* FIXME - should we go ahead and allocate it here?
*/
agp->aperture.bus_base = 0;
agp->aperture.size = 0;
agp->aperture.sysdata = NULL;
/*
* Capabilities.
*/
agp->capability.lw = 0;
agp->capability.bits.rate = 3; /* 2x, 1x */
agp->capability.bits.sba = 1;
agp->capability.bits.rq = 7; /* 8 - 1 */
/*
* Mode.
*/
pctl.pctl_q_whole = port->pctl.csr;
agp->mode.lw = 0;
agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
agp->mode.bits.rq = 7; /* RQ Depth? */
agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
return agp;
}

View file

@ -0,0 +1,481 @@
/*
* linux/arch/alpha/kernel/core_tsunami.c
*
* Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
*
* Code common to all TSUNAMI core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_tsunami.h>
#undef __EXTERN_INLINE
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include <asm/vga.h>
#include "proto.h"
#include "pci_impl.h"
/* Save Tsunami configuration data as the console had it set up. */
struct
{
unsigned long wsba[4];
unsigned long wsm[4];
unsigned long tba[4];
} saved_config[2] __attribute__((common));
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the I/O controller does not properly
* handle the system transaction. Another involves timing. Ho hum.
*/
/*
* BIOS32-style PCI interface:
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
/*
* Given a bus, device, and function number, compute resulting
* configuration space address
* accordingly. It is therefore not safe to have concurrent
* invocations to configuration space access routines, but there
* really shouldn't be any need for this.
*
* Note that all config space accesses use Type 1 address format.
*
* Note also that type 1 is determined by non-zero bus number.
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
struct pci_controller *hose = pbus->sysdata;
unsigned long addr;
u8 bus = pbus->number;
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
"pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
if (!pbus->parent) /* No parent means peer PCI bus. */
bus = 0;
*type1 = (bus != 0);
addr = (bus << 16) | (device_fn << 8) | where;
addr |= hose->config_space_base;
*pci_addr = addr;
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static int
tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*value = __kernel_ldbu(*(vucp)addr);
break;
case 2:
*value = __kernel_ldwu(*(vusp)addr);
break;
case 4:
*value = *(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
__kernel_stb(value, *(vucp)addr);
mb();
__kernel_ldbu(*(vucp)addr);
break;
case 2:
__kernel_stw(value, *(vusp)addr);
mb();
__kernel_ldwu(*(vusp)addr);
break;
case 4:
*(vuip)addr = value;
mb();
*(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops tsunami_pci_ops =
{
.read = tsunami_read_config,
.write = tsunami_write_config,
};
void
tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0;
volatile unsigned long *csr;
unsigned long value;
/* We can invalidate up to 8 tlb entries in a go. The flush
matches against <31:16> in the pci address. */
csr = &pchip->tlbia.csr;
if (((start ^ end) & 0xffff0000) == 0)
csr = &pchip->tlbiv.csr;
/* For TBIA, it doesn't matter what value we write. For TBI,
it's the shifted tag bits. */
value = (start & 0xffff0000) >> 12;
*csr = value;
mb();
*csr;
}
#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
static long __init
tsunami_probe_read(volatile unsigned long *vaddr)
{
long dont_care, probe_result;
int cpu = smp_processor_id();
int s = swpipl(IPL_MCHECK - 1);
mcheck_taken(cpu) = 0;
mcheck_expected(cpu) = 1;
mb();
dont_care = *vaddr;
draina();
mcheck_expected(cpu) = 0;
probe_result = !mcheck_taken(cpu);
mcheck_taken(cpu) = 0;
setipl(s);
printk("dont_care == 0x%lx\n", dont_care);
return probe_result;
}
static long __init
tsunami_probe_write(volatile unsigned long *vaddr)
{
long true_contents, probe_result = 1;
TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */
true_contents = *vaddr;
*vaddr = 0;
draina();
if (TSUNAMI_cchip->misc.csr & (1L << 28)) {
int source = (TSUNAMI_cchip->misc.csr >> 29) & 7;
TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */
probe_result = 0;
printk("tsunami_probe_write: unit %d at 0x%016lx\n", source,
(unsigned long)vaddr);
}
if (probe_result)
*vaddr = true_contents;
return probe_result;
}
#else
#define tsunami_probe_read(ADDR) 1
#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
static void __init
tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
{
struct pci_controller *hose;
if (tsunami_probe_read(&pchip->pctl.csr) == 0)
return;
hose = alloc_pci_controller();
if (index == 0)
pci_isa_hose = hose;
hose->io_space = alloc_resource();
hose->mem_space = alloc_resource();
/* This is for userland consumption. For some reason, the 40-bit
PIO bias that we use in the kernel through KSEG didn't work for
the page table based user mappings. So make sure we get the
43-bit PIO bias. */
hose->sparse_mem_base = 0;
hose->sparse_io_base = 0;
hose->dense_mem_base
= (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L;
hose->dense_io_base
= (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L;
hose->config_space_base = TSUNAMI_CONF(index);
hose->index = index;
hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS;
hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1;
hose->io_space->name = pci_io_names[index];
hose->io_space->flags = IORESOURCE_IO;
hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS;
hose->mem_space->end = hose->mem_space->start + 0xffffffff;
hose->mem_space->name = pci_mem_names[index];
hose->mem_space->flags = IORESOURCE_MEM;
if (request_resource(&ioport_resource, hose->io_space) < 0)
printk(KERN_ERR "Failed to request IO on hose %d\n", index);
if (request_resource(&iomem_resource, hose->mem_space) < 0)
printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
/*
* Save the existing PCI window translations. SRM will
* need them when we go to reboot.
*/
saved_config[index].wsba[0] = pchip->wsba[0].csr;
saved_config[index].wsm[0] = pchip->wsm[0].csr;
saved_config[index].tba[0] = pchip->tba[0].csr;
saved_config[index].wsba[1] = pchip->wsba[1].csr;
saved_config[index].wsm[1] = pchip->wsm[1].csr;
saved_config[index].tba[1] = pchip->tba[1].csr;
saved_config[index].wsba[2] = pchip->wsba[2].csr;
saved_config[index].wsm[2] = pchip->wsm[2].csr;
saved_config[index].tba[2] = pchip->tba[2].csr;
saved_config[index].wsba[3] = pchip->wsba[3].csr;
saved_config[index].wsm[3] = pchip->wsm[3].csr;
saved_config[index].tba[3] = pchip->tba[3].csr;
/*
* Set up the PCI to main memory translation windows.
*
* Note: Window 3 is scatter-gather only
*
* Window 0 is scatter-gather 8MB at 8MB (for isa)
* Window 1 is scatter-gather (up to) 1GB at 1GB
* Window 2 is direct access 2GB at 2GB
*
* NOTE: we need the align_entry settings for Acer devices on ES40,
* specifically floppy and IDE when memory is larger than 2GB.
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
/* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */
hose->sg_isa->align_entry = 4;
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
size_for_memory(0x40000000), 0);
hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
pchip->wsba[0].csr = hose->sg_isa->dma_base | 3;
pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
pchip->wsba[1].csr = hose->sg_pci->dma_base | 3;
pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000;
pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes);
pchip->wsba[2].csr = 0x80000000 | 1;
pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000;
pchip->tba[2].csr = 0;
pchip->wsba[3].csr = 0;
/* Enable the Monster Window to make DAC pci64 possible. */
pchip->pctl.csr |= pctl_m_mwin;
tsunami_pci_tbi(hose, 0, -1);
}
void __iomem *
tsunami_ioportmap(unsigned long addr)
{
FIXUP_IOADDR_VGA(addr);
return (void __iomem *)(addr + TSUNAMI_IO_BIAS);
}
void __iomem *
tsunami_ioremap(unsigned long addr, unsigned long size)
{
FIXUP_MEMADDR_VGA(addr);
return (void __iomem *)(addr + TSUNAMI_MEM_BIAS);
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL(tsunami_ioportmap);
EXPORT_SYMBOL(tsunami_ioremap);
#endif
void __init
tsunami_init_arch(void)
{
#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
unsigned long tmp;
/* Ho hum.. init_arch is called before init_IRQ, but we need to be
able to handle machine checks. So install the handler now. */
wrent(entInt, 0);
/* NXMs just don't matter to Tsunami--unless they make it
choke completely. */
tmp = (unsigned long)(TSUNAMI_cchip - 1);
printk("%s: probing bogus address: 0x%016lx\n", __func__, bogus_addr);
printk("\tprobe %s\n",
tsunami_probe_write((unsigned long *)bogus_addr)
? "succeeded" : "failed");
#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
#if 0
printk("%s: CChip registers:\n", __func__);
printk("%s: CSR_CSC 0x%lx\n", __func__, TSUNAMI_cchip->csc.csr);
printk("%s: CSR_MTR 0x%lx\n", __func__, TSUNAMI_cchip.mtr.csr);
printk("%s: CSR_MISC 0x%lx\n", __func__, TSUNAMI_cchip->misc.csr);
printk("%s: CSR_DIM0 0x%lx\n", __func__, TSUNAMI_cchip->dim0.csr);
printk("%s: CSR_DIM1 0x%lx\n", __func__, TSUNAMI_cchip->dim1.csr);
printk("%s: CSR_DIR0 0x%lx\n", __func__, TSUNAMI_cchip->dir0.csr);
printk("%s: CSR_DIR1 0x%lx\n", __func__, TSUNAMI_cchip->dir1.csr);
printk("%s: CSR_DRIR 0x%lx\n", __func__, TSUNAMI_cchip->drir.csr);
printk("%s: DChip registers:\n");
printk("%s: CSR_DSC 0x%lx\n", __func__, TSUNAMI_dchip->dsc.csr);
printk("%s: CSR_STR 0x%lx\n", __func__, TSUNAMI_dchip->str.csr);
printk("%s: CSR_DREV 0x%lx\n", __func__, TSUNAMI_dchip->drev.csr);
#endif
/* With multiple PCI busses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
/* Find how many hoses we have, and initialize them. TSUNAMI
and TYPHOON can have 2, but might only have 1 (DS10). */
tsunami_init_one_pchip(TSUNAMI_pchip0, 0);
if (TSUNAMI_cchip->csc.csr & 1L<<14)
tsunami_init_one_pchip(TSUNAMI_pchip1, 1);
/* Check for graphic console location (if any). */
find_console_vga_hose();
}
static void
tsunami_kill_one_pchip(tsunami_pchip *pchip, int index)
{
pchip->wsba[0].csr = saved_config[index].wsba[0];
pchip->wsm[0].csr = saved_config[index].wsm[0];
pchip->tba[0].csr = saved_config[index].tba[0];
pchip->wsba[1].csr = saved_config[index].wsba[1];
pchip->wsm[1].csr = saved_config[index].wsm[1];
pchip->tba[1].csr = saved_config[index].tba[1];
pchip->wsba[2].csr = saved_config[index].wsba[2];
pchip->wsm[2].csr = saved_config[index].wsm[2];
pchip->tba[2].csr = saved_config[index].tba[2];
pchip->wsba[3].csr = saved_config[index].wsba[3];
pchip->wsm[3].csr = saved_config[index].wsm[3];
pchip->tba[3].csr = saved_config[index].tba[3];
}
void
tsunami_kill_arch(int mode)
{
tsunami_kill_one_pchip(TSUNAMI_pchip0, 0);
if (TSUNAMI_cchip->csc.csr & 1L<<14)
tsunami_kill_one_pchip(TSUNAMI_pchip1, 1);
}
static inline void
tsunami_pci_clr_err_1(tsunami_pchip *pchip)
{
pchip->perror.csr;
pchip->perror.csr = 0x040;
mb();
pchip->perror.csr;
}
static inline void
tsunami_pci_clr_err(void)
{
tsunami_pci_clr_err_1(TSUNAMI_pchip0);
/* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */
if (TSUNAMI_cchip->csc.csr & 1L<<14)
tsunami_pci_clr_err_1(TSUNAMI_pchip1);
}
void
tsunami_machine_check(unsigned long vector, unsigned long la_ptr)
{
/* Clear error before any reporting. */
mb();
mb(); /* magic */
draina();
tsunami_pci_clr_err();
wrmces(0x7);
mb();
process_mcheck_info(vector, la_ptr, "TSUNAMI",
mcheck_expected(smp_processor_id()));
}

View file

@ -0,0 +1,657 @@
/*
* linux/arch/alpha/kernel/core_wildfire.c
*
* Wildfire support.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_wildfire.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include "proto.h"
#include "pci_impl.h"
#define DEBUG_CONFIG 0
#define DEBUG_DUMP_REGS 0
#define DEBUG_DUMP_CONFIG 1
#if DEBUG_CONFIG
# define DBG_CFG(args) printk args
#else
# define DBG_CFG(args)
#endif
#if DEBUG_DUMP_REGS
static void wildfire_dump_pci_regs(int qbbno, int hoseno);
static void wildfire_dump_pca_regs(int qbbno, int pcano);
static void wildfire_dump_qsa_regs(int qbbno);
static void wildfire_dump_qsd_regs(int qbbno);
static void wildfire_dump_iop_regs(int qbbno);
static void wildfire_dump_gp_regs(int qbbno);
#endif
#if DEBUG_DUMP_CONFIG
static void wildfire_dump_hardware_config(void);
#endif
unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
#define QBB_MAP_EMPTY 0xff
unsigned long wildfire_hard_qbb_mask;
unsigned long wildfire_soft_qbb_mask;
unsigned long wildfire_gp_mask;
unsigned long wildfire_hs_mask;
unsigned long wildfire_iop_mask;
unsigned long wildfire_ior_mask;
unsigned long wildfire_pca_mask;
unsigned long wildfire_cpu_mask;
unsigned long wildfire_mem_mask;
void __init
wildfire_init_hose(int qbbno, int hoseno)
{
struct pci_controller *hose;
wildfire_pci *pci;
hose = alloc_pci_controller();
hose->io_space = alloc_resource();
hose->mem_space = alloc_resource();
/* This is for userland consumption. */
hose->sparse_mem_base = 0;
hose->sparse_io_base = 0;
hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
hose->index = (qbbno << 3) + hoseno;
hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
hose->io_space->name = pci_io_names[hoseno];
hose->io_space->flags = IORESOURCE_IO;
hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
hose->mem_space->end = hose->mem_space->start + 0xffffffff;
hose->mem_space->name = pci_mem_names[hoseno];
hose->mem_space->flags = IORESOURCE_MEM;
if (request_resource(&ioport_resource, hose->io_space) < 0)
printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
qbbno, hoseno);
if (request_resource(&iomem_resource, hose->mem_space) < 0)
printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
qbbno, hoseno);
#if DEBUG_DUMP_REGS
wildfire_dump_pci_regs(qbbno, hoseno);
#endif
/*
* Set up the PCI to main memory translation windows.
*
* Note: Window 3 is scatter-gather only
*
* Window 0 is scatter-gather 8MB at 8MB (for isa)
* Window 1 is direct access 1GB at 1GB
* Window 2 is direct access 1GB at 2GB
* Window 3 is scatter-gather 128MB at 3GB
* ??? We ought to scale window 3 memory.
*
*/
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
pci = WILDFIRE_pci(qbbno, hoseno);
pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
pci->pci_window[1].wbase.csr = 0x40000000 | 1;
pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
pci->pci_window[1].tbase.csr = 0;
pci->pci_window[2].wbase.csr = 0x80000000 | 1;
pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
pci->pci_window[2].tbase.csr = 0x40000000;
pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
}
void __init
wildfire_init_pca(int qbbno, int pcano)
{
/* Test for PCA existence first. */
if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
return;
#if DEBUG_DUMP_REGS
wildfire_dump_pca_regs(qbbno, pcano);
#endif
/* Do both hoses of the PCA. */
wildfire_init_hose(qbbno, (pcano << 1) + 0);
wildfire_init_hose(qbbno, (pcano << 1) + 1);
}
void __init
wildfire_init_qbb(int qbbno)
{
int pcano;
/* Test for QBB existence first. */
if (!WILDFIRE_QBB_EXISTS(qbbno))
return;
#if DEBUG_DUMP_REGS
wildfire_dump_qsa_regs(qbbno);
wildfire_dump_qsd_regs(qbbno);
wildfire_dump_iop_regs(qbbno);
wildfire_dump_gp_regs(qbbno);
#endif
/* Init all PCAs here. */
for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
wildfire_init_pca(qbbno, pcano);
}
}
void __init
wildfire_hardware_probe(void)
{
unsigned long temp;
unsigned int hard_qbb, soft_qbb;
wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
wildfire_qsd *qsd;
wildfire_qsa *qsa;
wildfire_iop *iop;
wildfire_gp *gp;
wildfire_ne *ne;
wildfire_fe *fe;
int i;
temp = fast->qsd_whami.csr;
#if 0
printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
#endif
hard_qbb = (temp >> 8) & 7;
soft_qbb = (temp >> 4) & 7;
/* Init the HW configuration variables. */
wildfire_hard_qbb_mask = (1 << hard_qbb);
wildfire_soft_qbb_mask = (1 << soft_qbb);
wildfire_gp_mask = 0;
wildfire_hs_mask = 0;
wildfire_iop_mask = 0;
wildfire_ior_mask = 0;
wildfire_pca_mask = 0;
wildfire_cpu_mask = 0;
wildfire_mem_mask = 0;
memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
/* First, determine which QBBs are present. */
qsa = WILDFIRE_qsa(soft_qbb);
temp = qsa->qsa_qbb_id.csr;
#if 0
printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
#endif
if (temp & 0x40) /* Is there an HS? */
wildfire_hs_mask = 1;
if (temp & 0x20) { /* Is there a GP? */
gp = WILDFIRE_gp(soft_qbb);
temp = 0;
for (i = 0; i < 4; i++) {
temp |= gp->gpa_qbb_map[i].csr << (i * 8);
#if 0
printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
i, gp, temp);
#endif
}
for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
if (temp & 8) { /* Is there a QBB? */
soft_qbb = temp & 7;
wildfire_hard_qbb_mask |= (1 << hard_qbb);
wildfire_soft_qbb_mask |= (1 << soft_qbb);
}
temp >>= 4;
}
wildfire_gp_mask = wildfire_soft_qbb_mask;
}
/* Next determine each QBBs resources. */
for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
qsd = WILDFIRE_qsd(soft_qbb);
temp = qsd->qsd_whami.csr;
#if 0
printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
#endif
hard_qbb = (temp >> 8) & 7;
wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
qsa = WILDFIRE_qsa(soft_qbb);
temp = qsa->qsa_qbb_pop[0].csr;
#if 0
printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
#endif
wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
temp = qsa->qsa_qbb_pop[1].csr;
#if 0
printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
#endif
wildfire_iop_mask |= (1 << soft_qbb);
wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
temp = qsa->qsa_qbb_id.csr;
#if 0
printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
#endif
if (temp & 0x20)
wildfire_gp_mask |= (1 << soft_qbb);
/* Probe for PCA existence here. */
for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
iop = WILDFIRE_iop(soft_qbb);
ne = WILDFIRE_ne(soft_qbb, i);
fe = WILDFIRE_fe(soft_qbb, i);
if ((iop->iop_hose[i].init.csr & 1) == 1 &&
((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
{
wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
}
}
}
}
#if DEBUG_DUMP_CONFIG
wildfire_dump_hardware_config();
#endif
}
void __init
wildfire_init_arch(void)
{
int qbbno;
/* With multiple PCI buses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
/* Probe the hardware for info about configuration. */
wildfire_hardware_probe();
/* Now init all the found QBBs. */
for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
wildfire_init_qbb(qbbno);
}
/* Normal direct PCI DMA mapping. */
__direct_map_base = 0x40000000UL;
__direct_map_size = 0x80000000UL;
}
void
wildfire_machine_check(unsigned long vector, unsigned long la_ptr)
{
mb();
mb(); /* magic */
draina();
/* FIXME: clear pci errors */
wrmces(0x7);
mb();
process_mcheck_info(vector, la_ptr, "WILDFIRE",
mcheck_expected(smp_processor_id()));
}
void
wildfire_kill_arch(int mode)
{
}
void
wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
int qbbno = hose->index >> 3;
int hoseno = hose->index & 7;
wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
mb();
pci->pci_flush_tlb.csr; /* reading does the trick */
}
static int
mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
struct pci_controller *hose = pbus->sysdata;
unsigned long addr;
u8 bus = pbus->number;
DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
"pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
if (!pbus->parent) /* No parent means peer PCI bus. */
bus = 0;
*type1 = (bus != 0);
addr = (bus << 16) | (device_fn << 8) | where;
addr |= hose->config_space_base;
*pci_addr = addr;
DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
return 0;
}
static int
wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*value = __kernel_ldbu(*(vucp)addr);
break;
case 2:
*value = __kernel_ldwu(*(vusp)addr);
break;
case 4:
*value = *(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
__kernel_stb(value, *(vucp)addr);
mb();
__kernel_ldbu(*(vucp)addr);
break;
case 2:
__kernel_stw(value, *(vusp)addr);
mb();
__kernel_ldwu(*(vusp)addr);
break;
case 4:
*(vuip)addr = value;
mb();
*(vuip)addr;
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops wildfire_pci_ops =
{
.read = wildfire_read_config,
.write = wildfire_write_config,
};
/*
* NUMA Support
*/
int wildfire_pa_to_nid(unsigned long pa)
{
return pa >> 36;
}
int wildfire_cpuid_to_nid(int cpuid)
{
/* assume 4 CPUs per node */
return cpuid >> 2;
}
unsigned long wildfire_node_mem_start(int nid)
{
/* 64GB per node */
return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
}
unsigned long wildfire_node_mem_size(int nid)
{
/* 64GB per node */
return 64UL * 1024 * 1024 * 1024;
}
#if DEBUG_DUMP_REGS
static void __init
wildfire_dump_pci_regs(int qbbno, int hoseno)
{
wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
int i;
printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
qbbno, hoseno, pci);
printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
pci->pci_io_addr_ext.csr);
printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
qbbno, hoseno, pci);
for (i = 0; i < 4; i++) {
printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
pci->pci_window[i].wbase.csr,
pci->pci_window[i].wmask.csr,
pci->pci_window[i].tbase.csr);
}
printk(KERN_ERR "\n");
}
static void __init
wildfire_dump_pca_regs(int qbbno, int pcano)
{
wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
int i;
printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
qbbno, pcano, pca);
printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
pca->pca_stdio_edge_level.csr);
printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
qbbno, pcano, pca);
for (i = 0; i < 4; i++) {
printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
pca->pca_int[i].target.csr,
pca->pca_int[i].enable.csr);
}
printk(KERN_ERR "\n");
}
static void __init
wildfire_dump_qsa_regs(int qbbno)
{
wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
int i;
printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
for (i = 0; i < 5; i++)
printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
i, qsa->qsa_config[i].csr);
for (i = 0; i < 2; i++)
printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
i, qsa->qsa_qbb_pop[0].csr);
printk(KERN_ERR "\n");
}
static void __init
wildfire_dump_qsd_regs(int qbbno)
{
wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
qsd->qsd_port_present.csr);
printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
qsd->qsd_port_active.csr);
printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
qsd->qsd_fault_ena.csr);
printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
qsd->qsd_cpu_int_ena.csr);
printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
qsd->qsd_mem_config.csr);
printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
qsd->qsd_err_sum.csr);
printk(KERN_ERR "\n");
}
static void __init
wildfire_dump_iop_regs(int qbbno)
{
wildfire_iop *iop = WILDFIRE_iop(qbbno);
int i;
printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
iop->iop_switch_credits.csr);
printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
iop->iop_hose_credits.csr);
for (i = 0; i < 4; i++)
printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
i, iop->iop_hose[i].init.csr);
for (i = 0; i < 4; i++)
printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
i, iop->iop_dev_int[i].target.csr);
printk(KERN_ERR "\n");
}
static void __init
wildfire_dump_gp_regs(int qbbno)
{
wildfire_gp *gp = WILDFIRE_gp(qbbno);
int i;
printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
for (i = 0; i < 4; i++)
printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
i, gp->gpa_qbb_map[i].csr);
printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
gp->gpa_mem_pop_map.csr);
printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
printk(KERN_ERR "\n");
}
#endif /* DUMP_REGS */
#if DEBUG_DUMP_CONFIG
static void __init
wildfire_dump_hardware_config(void)
{
int i;
printk(KERN_ERR "Probed Hardware Configuration\n");
printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
printk(" hard_qbb_map: ");
for (i = 0; i < WILDFIRE_MAX_QBB; i++)
if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
printk("--- ");
else
printk("%3d ", wildfire_hard_qbb_map[i]);
printk("\n");
printk(" soft_qbb_map: ");
for (i = 0; i < WILDFIRE_MAX_QBB; i++)
if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
printk("--- ");
else
printk("%3d ", wildfire_soft_qbb_map[i]);
printk("\n");
}
#endif /* DUMP_CONFIG */

857
arch/alpha/kernel/entry.S Normal file
View file

@ -0,0 +1,857 @@
/*
* arch/alpha/kernel/entry.S
*
* Kernel entry-points.
*/
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/pal.h>
#include <asm/errno.h>
#include <asm/unistd.h>
.text
.set noat
.cfi_sections .debug_frame
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 320
.macro CFI_START_OSF_FRAME func
.align 4
.globl \func
.type \func,@function
\func:
.cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 48
.cfi_rel_offset 64, 8
.cfi_rel_offset $gp, 16
.cfi_rel_offset $16, 24
.cfi_rel_offset $17, 32
.cfi_rel_offset $18, 40
.endm
.macro CFI_END_OSF_FRAME func
.cfi_endproc
.size \func, . - \func
.endm
/*
* This defines the normal kernel pt-regs layout.
*
* regs 9-15 preserved by C code
* regs 16-18 saved by PAL-code
* regs 29-30 saved and set up by PAL-code
* JRP - Save regs 16-18 in a special area of the stack, so that
* the palcode-provided values are available to the signal handler.
*/
.macro SAVE_ALL
subq $sp, SP_OFF, $sp
.cfi_adjust_cfa_offset SP_OFF
stq $0, 0($sp)
stq $1, 8($sp)
stq $2, 16($sp)
stq $3, 24($sp)
stq $4, 32($sp)
stq $28, 144($sp)
.cfi_rel_offset $0, 0
.cfi_rel_offset $1, 8
.cfi_rel_offset $2, 16
.cfi_rel_offset $3, 24
.cfi_rel_offset $4, 32
.cfi_rel_offset $28, 144
lda $2, alpha_mv
stq $5, 40($sp)
stq $6, 48($sp)
stq $7, 56($sp)
stq $8, 64($sp)
stq $19, 72($sp)
stq $20, 80($sp)
stq $21, 88($sp)
ldq $2, HAE_CACHE($2)
stq $22, 96($sp)
stq $23, 104($sp)
stq $24, 112($sp)
stq $25, 120($sp)
stq $26, 128($sp)
stq $27, 136($sp)
stq $2, 152($sp)
stq $16, 160($sp)
stq $17, 168($sp)
stq $18, 176($sp)
.cfi_rel_offset $5, 40
.cfi_rel_offset $6, 48
.cfi_rel_offset $7, 56
.cfi_rel_offset $8, 64
.cfi_rel_offset $19, 72
.cfi_rel_offset $20, 80
.cfi_rel_offset $21, 88
.cfi_rel_offset $22, 96
.cfi_rel_offset $23, 104
.cfi_rel_offset $24, 112
.cfi_rel_offset $25, 120
.cfi_rel_offset $26, 128
.cfi_rel_offset $27, 136
.endm
.macro RESTORE_ALL
lda $19, alpha_mv
ldq $0, 0($sp)
ldq $1, 8($sp)
ldq $2, 16($sp)
ldq $3, 24($sp)
ldq $21, 152($sp)
ldq $20, HAE_CACHE($19)
ldq $4, 32($sp)
ldq $5, 40($sp)
ldq $6, 48($sp)
ldq $7, 56($sp)
subq $20, $21, $20
ldq $8, 64($sp)
beq $20, 99f
ldq $20, HAE_REG($19)
stq $21, HAE_CACHE($19)
stq $21, 0($20)
99: ldq $19, 72($sp)
ldq $20, 80($sp)
ldq $21, 88($sp)
ldq $22, 96($sp)
ldq $23, 104($sp)
ldq $24, 112($sp)
ldq $25, 120($sp)
ldq $26, 128($sp)
ldq $27, 136($sp)
ldq $28, 144($sp)
addq $sp, SP_OFF, $sp
.cfi_restore $0
.cfi_restore $1
.cfi_restore $2
.cfi_restore $3
.cfi_restore $4
.cfi_restore $5
.cfi_restore $6
.cfi_restore $7
.cfi_restore $8
.cfi_restore $19
.cfi_restore $20
.cfi_restore $21
.cfi_restore $22
.cfi_restore $23
.cfi_restore $24
.cfi_restore $25
.cfi_restore $26
.cfi_restore $27
.cfi_restore $28
.cfi_adjust_cfa_offset -SP_OFF
.endm
.macro DO_SWITCH_STACK
bsr $1, do_switch_stack
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
/* We don't really care about the FP registers for debugging. */
.endm
.macro UNDO_SWITCH_STACK
bsr $1, undo_switch_stack
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
.endm
/*
* Non-syscall kernel entry points.
*/
CFI_START_OSF_FRAME entInt
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $19
jsr $31, do_entInt
CFI_END_OSF_FRAME entInt
CFI_START_OSF_FRAME entArith
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $18
jsr $31, do_entArith
CFI_END_OSF_FRAME entArith
CFI_START_OSF_FRAME entMM
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp
.cfi_adjust_cfa_offset 56
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
addq $sp, 56, $19
/* handle the fault */
lda $8, 0x3fff
bic $sp, $8, $8
jsr $26, do_page_fault
/* reload the registers after the exception code played. */
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
/* finish up the syscall as normal. */
br ret_from_sys_call
CFI_END_OSF_FRAME entMM
CFI_START_OSF_FRAME entIF
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $17
jsr $31, do_entIF
CFI_END_OSF_FRAME entIF
CFI_START_OSF_FRAME entUna
lda $sp, -256($sp)
.cfi_adjust_cfa_offset 256
stq $0, 0($sp)
.cfi_rel_offset $0, 0
.cfi_remember_state
ldq $0, 256($sp) /* get PS */
stq $1, 8($sp)
stq $2, 16($sp)
stq $3, 24($sp)
and $0, 8, $0 /* user mode? */
stq $4, 32($sp)
bne $0, entUnaUser /* yup -> do user-level unaligned fault */
stq $5, 40($sp)
stq $6, 48($sp)
stq $7, 56($sp)
stq $8, 64($sp)
stq $9, 72($sp)
stq $10, 80($sp)
stq $11, 88($sp)
stq $12, 96($sp)
stq $13, 104($sp)
stq $14, 112($sp)
stq $15, 120($sp)
/* 16-18 PAL-saved */
stq $19, 152($sp)
stq $20, 160($sp)
stq $21, 168($sp)
stq $22, 176($sp)
stq $23, 184($sp)
stq $24, 192($sp)
stq $25, 200($sp)
stq $26, 208($sp)
stq $27, 216($sp)
stq $28, 224($sp)
mov $sp, $19
stq $gp, 232($sp)
.cfi_rel_offset $1, 1*8
.cfi_rel_offset $2, 2*8
.cfi_rel_offset $3, 3*8
.cfi_rel_offset $4, 4*8
.cfi_rel_offset $5, 5*8
.cfi_rel_offset $6, 6*8
.cfi_rel_offset $7, 7*8
.cfi_rel_offset $8, 8*8
.cfi_rel_offset $9, 9*8
.cfi_rel_offset $10, 10*8
.cfi_rel_offset $11, 11*8
.cfi_rel_offset $12, 12*8
.cfi_rel_offset $13, 13*8
.cfi_rel_offset $14, 14*8
.cfi_rel_offset $15, 15*8
.cfi_rel_offset $19, 19*8
.cfi_rel_offset $20, 20*8
.cfi_rel_offset $21, 21*8
.cfi_rel_offset $22, 22*8
.cfi_rel_offset $23, 23*8
.cfi_rel_offset $24, 24*8
.cfi_rel_offset $25, 25*8
.cfi_rel_offset $26, 26*8
.cfi_rel_offset $27, 27*8
.cfi_rel_offset $28, 28*8
.cfi_rel_offset $29, 29*8
lda $8, 0x3fff
stq $31, 248($sp)
bic $sp, $8, $8
jsr $26, do_entUna
ldq $0, 0($sp)
ldq $1, 8($sp)
ldq $2, 16($sp)
ldq $3, 24($sp)
ldq $4, 32($sp)
ldq $5, 40($sp)
ldq $6, 48($sp)
ldq $7, 56($sp)
ldq $8, 64($sp)
ldq $9, 72($sp)
ldq $10, 80($sp)
ldq $11, 88($sp)
ldq $12, 96($sp)
ldq $13, 104($sp)
ldq $14, 112($sp)
ldq $15, 120($sp)
/* 16-18 PAL-saved */
ldq $19, 152($sp)
ldq $20, 160($sp)
ldq $21, 168($sp)
ldq $22, 176($sp)
ldq $23, 184($sp)
ldq $24, 192($sp)
ldq $25, 200($sp)
ldq $26, 208($sp)
ldq $27, 216($sp)
ldq $28, 224($sp)
ldq $gp, 232($sp)
lda $sp, 256($sp)
.cfi_restore $1
.cfi_restore $2
.cfi_restore $3
.cfi_restore $4
.cfi_restore $5
.cfi_restore $6
.cfi_restore $7
.cfi_restore $8
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_restore $19
.cfi_restore $20
.cfi_restore $21
.cfi_restore $22
.cfi_restore $23
.cfi_restore $24
.cfi_restore $25
.cfi_restore $26
.cfi_restore $27
.cfi_restore $28
.cfi_restore $29
.cfi_adjust_cfa_offset -256
call_pal PAL_rti
.align 4
entUnaUser:
.cfi_restore_state
ldq $0, 0($sp) /* restore original $0 */
lda $sp, 256($sp) /* pop entUna's stack frame */
.cfi_restore $0
.cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
.cfi_adjust_cfa_offset 56
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
lda $8, 0x3fff
addq $sp, 56, $19
bic $sp, $8, $8
jsr $26, do_entUnaUser
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
br ret_from_sys_call
CFI_END_OSF_FRAME entUna
CFI_START_OSF_FRAME entDbg
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $16
jsr $31, do_entDbg
CFI_END_OSF_FRAME entDbg
/*
* The system call entry point is special. Most importantly, it looks
* like a function call to userspace as far as clobbered registers. We
* do preserve the argument registers (for syscall restarts) and $26
* (for leaf syscall functions).
*
* So much for theory. We don't take advantage of this yet.
*
* Note that a0-a2 are not saved by PALcode as with the other entry points.
*/
.align 4
.globl entSys
.type entSys, @function
.cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 48
.cfi_rel_offset 64, 8
.cfi_rel_offset $gp, 16
entSys:
SAVE_ALL
lda $8, 0x3fff
bic $sp, $8, $8
lda $4, NR_SYSCALLS($31)
stq $16, SP_OFF+24($sp)
lda $5, sys_call_table
lda $27, sys_ni_syscall
cmpult $0, $4, $4
ldl $3, TI_FLAGS($8)
stq $17, SP_OFF+32($sp)
s8addq $0, $5, $5
stq $18, SP_OFF+40($sp)
.cfi_rel_offset $16, SP_OFF+24
.cfi_rel_offset $17, SP_OFF+32
.cfi_rel_offset $18, SP_OFF+40
#ifdef CONFIG_AUDITSYSCALL
lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and $3, $6, $3
#endif
bne $3, strace
beq $4, 1f
ldq $27, 0($5)
1: jsr $26, ($27), alpha_ni_syscall
ldgp $gp, 0($26)
blt $0, $syscall_error /* the call failed */
stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */
.align 4
.globl ret_from_sys_call
ret_from_sys_call:
cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
beq $0, ret_to_kernel
ret_to_user:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $17, TI_FLAGS($8)
and $17, _TIF_WORK_MASK, $2
bne $2, work_pending
restore_all:
.cfi_remember_state
RESTORE_ALL
call_pal PAL_rti
ret_to_kernel:
.cfi_restore_state
lda $16, 7
call_pal PAL_swpipl
br restore_all
.align 3
$syscall_error:
/*
* Some system calls (e.g., ptrace) can return arbitrary
* values which might normally be mistaken as error numbers.
* Those functions must zero $0 (v0) directly in the stack
* frame to indicate that a negative return value wasn't an
* error number..
*/
ldq $18, 0($sp) /* old syscall nr (zero if success) */
beq $18, $ret_success
ldq $19, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */
stq $0, 0($sp)
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
stq $1, 72($sp) /* a3 for return */
br ret_from_sys_call
$ret_success:
stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */
br ret_from_sys_call
/*
* Do all cleanup when returning from all interrupts and system calls.
*
* Arguments:
* $8: current.
* $17: TI_FLAGS.
* $18: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable.
* $19: The old a3 value
*/
.align 4
.type work_pending, @function
work_pending:
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
bne $2, $work_notifysig
$work_resched:
/*
* We can get here only if we returned from syscall without SIGPENDING
* or got through work_notifysig already. Either case means no syscall
* restarts for us, so let $18 and $19 burn.
*/
jsr $26, schedule
mov 0, $18
br ret_to_user
$work_notifysig:
mov $sp, $16
DO_SWITCH_STACK
jsr $26, do_work_pending
UNDO_SWITCH_STACK
br restore_all
/*
* PTRACE syscall handler
*/
.align 4
.type strace, @function
strace:
/* set up signal stack, call syscall_trace */
DO_SWITCH_STACK
jsr $26, syscall_trace_enter /* returns the syscall number */
UNDO_SWITCH_STACK
/* get the arguments back.. */
ldq $16, SP_OFF+24($sp)
ldq $17, SP_OFF+32($sp)
ldq $18, SP_OFF+40($sp)
ldq $19, 72($sp)
ldq $20, 80($sp)
ldq $21, 88($sp)
/* get the system call pointer.. */
lda $1, NR_SYSCALLS($31)
lda $2, sys_call_table
lda $27, alpha_ni_syscall
cmpult $0, $1, $1
s8addq $0, $2, $2
beq $1, 1f
ldq $27, 0($2)
1: jsr $26, ($27), sys_gettimeofday
ret_from_straced:
ldgp $gp, 0($26)
/* check return.. */
blt $0, $strace_error /* the call failed */
stq $31, 72($sp) /* a3=0 => no error */
$strace_success:
stq $0, 0($sp) /* save return value */
DO_SWITCH_STACK
jsr $26, syscall_trace_leave
UNDO_SWITCH_STACK
br $31, ret_from_sys_call
.align 3
$strace_error:
ldq $18, 0($sp) /* old syscall nr (zero if success) */
beq $18, $strace_success
ldq $19, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */
stq $0, 0($sp)
stq $1, 72($sp) /* a3 for return */
DO_SWITCH_STACK
mov $18, $9 /* save old syscall number */
mov $19, $10 /* save old a3 */
jsr $26, syscall_trace_leave
mov $9, $18
mov $10, $19
UNDO_SWITCH_STACK
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
CFI_END_OSF_FRAME entSys
/*
* Save and restore the switch stack -- aka the balance of the user context.
*/
.align 4
.type do_switch_stack, @function
.cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 0
.cfi_register 64, $1
do_switch_stack:
lda $sp, -SWITCH_STACK_SIZE($sp)
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
stq $26, 56($sp)
stt $f0, 64($sp)
stt $f1, 72($sp)
stt $f2, 80($sp)
stt $f3, 88($sp)
stt $f4, 96($sp)
stt $f5, 104($sp)
stt $f6, 112($sp)
stt $f7, 120($sp)
stt $f8, 128($sp)
stt $f9, 136($sp)
stt $f10, 144($sp)
stt $f11, 152($sp)
stt $f12, 160($sp)
stt $f13, 168($sp)
stt $f14, 176($sp)
stt $f15, 184($sp)
stt $f16, 192($sp)
stt $f17, 200($sp)
stt $f18, 208($sp)
stt $f19, 216($sp)
stt $f20, 224($sp)
stt $f21, 232($sp)
stt $f22, 240($sp)
stt $f23, 248($sp)
stt $f24, 256($sp)
stt $f25, 264($sp)
stt $f26, 272($sp)
stt $f27, 280($sp)
mf_fpcr $f0 # get fpcr
stt $f28, 288($sp)
stt $f29, 296($sp)
stt $f30, 304($sp)
stt $f0, 312($sp) # save fpcr in slot of $f31
ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
ret $31, ($1), 1
.cfi_endproc
.size do_switch_stack, .-do_switch_stack
.align 4
.type undo_switch_stack, @function
.cfi_startproc simple
.cfi_def_cfa $sp, 0
.cfi_register 64, $1
undo_switch_stack:
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
ldq $26, 56($sp)
ldt $f30, 312($sp) # get saved fpcr
ldt $f0, 64($sp)
ldt $f1, 72($sp)
ldt $f2, 80($sp)
ldt $f3, 88($sp)
mt_fpcr $f30 # install saved fpcr
ldt $f4, 96($sp)
ldt $f5, 104($sp)
ldt $f6, 112($sp)
ldt $f7, 120($sp)
ldt $f8, 128($sp)
ldt $f9, 136($sp)
ldt $f10, 144($sp)
ldt $f11, 152($sp)
ldt $f12, 160($sp)
ldt $f13, 168($sp)
ldt $f14, 176($sp)
ldt $f15, 184($sp)
ldt $f16, 192($sp)
ldt $f17, 200($sp)
ldt $f18, 208($sp)
ldt $f19, 216($sp)
ldt $f20, 224($sp)
ldt $f21, 232($sp)
ldt $f22, 240($sp)
ldt $f23, 248($sp)
ldt $f24, 256($sp)
ldt $f25, 264($sp)
ldt $f26, 272($sp)
ldt $f27, 280($sp)
ldt $f28, 288($sp)
ldt $f29, 296($sp)
ldt $f30, 304($sp)
lda $sp, SWITCH_STACK_SIZE($sp)
ret $31, ($1), 1
.cfi_endproc
.size undo_switch_stack, .-undo_switch_stack
/*
* The meat of the context switch code.
*/
.align 4
.globl alpha_switch_to
.type alpha_switch_to, @function
.cfi_startproc
alpha_switch_to:
DO_SWITCH_STACK
call_pal PAL_swpctx
lda $8, 0x3fff
UNDO_SWITCH_STACK
bic $sp, $8, $8
mov $17, $0
ret
.cfi_endproc
.size alpha_switch_to, .-alpha_switch_to
/*
* New processes begin life here.
*/
.globl ret_from_fork
.align 4
.ent ret_from_fork
ret_from_fork:
lda $26, ret_from_sys_call
mov $17, $16
jmp $31, schedule_tail
.end ret_from_fork
/*
* ... and new kernel threads - here
*/
.align 4
.globl ret_from_kernel_thread
.ent ret_from_kernel_thread
ret_from_kernel_thread:
mov $17, $16
jsr $26, schedule_tail
mov $9, $27
mov $10, $16
jsr $26, ($9)
mov $31, $19 /* to disable syscall restarts */
br $31, ret_to_user
.end ret_from_kernel_thread
/*
* Special system calls. Most of these are special in that they either
* have to play switch_stack games or in some way use the pt_regs struct.
*/
.macro fork_like name
.align 4
.globl alpha_\name
.ent alpha_\name
alpha_\name:
.prologue 0
bsr $1, do_switch_stack
jsr $26, sys_\name
ldq $26, 56($sp)
lda $sp, SWITCH_STACK_SIZE($sp)
ret
.end alpha_\name
.endm
fork_like fork
fork_like vfork
fork_like clone
.align 4
.globl sys_sigreturn
.ent sys_sigreturn
sys_sigreturn:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
bne $9, 1f
jsr $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
.align 4
.globl sys_rt_sigreturn
.ent sys_rt_sigreturn
sys_rt_sigreturn:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
bne $9, 1f
jsr $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
alpha_ni_syscall:
.prologue 0
/* Special because it also implements overflow handling via
syscall number 0. And if you recall, zero is a special
trigger for "not an error". Store large non-zero there. */
lda $0, -ENOSYS
unop
stq $0, 0($sp)
ret
.end alpha_ni_syscall

View file

@ -0,0 +1,320 @@
/*
* linux/arch/alpha/kernel/err_common.c
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Error handling code supporting Alpha systems
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include <asm/smp.h>
#include <asm/err_common.h>
#include "err_impl.h"
#include "proto.h"
/*
* err_print_prefix -- error handling print routines should prefix
* all prints with this
*/
char *err_print_prefix = KERN_NOTICE;
/*
* Generic
*/
void
mchk_dump_mem(void *data, size_t length, char **annotation)
{
unsigned long *ldata = data;
size_t i;
for (i = 0; (i * sizeof(*ldata)) < length; i++) {
if (annotation && !annotation[i])
annotation = NULL;
printk("%s %08x: %016lx %s\n",
err_print_prefix,
(unsigned)(i * sizeof(*ldata)), ldata[i],
annotation ? annotation[i] : "");
}
}
void
mchk_dump_logout_frame(struct el_common *mchk_header)
{
printk("%s -- Frame Header --\n"
" Frame Size: %d (0x%x) bytes\n"
" Flags: %s%s\n"
" MCHK Code: 0x%x\n"
" Frame Rev: %d\n"
" Proc Offset: 0x%08x\n"
" Sys Offset: 0x%08x\n"
" -- Processor Region --\n",
err_print_prefix,
mchk_header->size, mchk_header->size,
mchk_header->retry ? "RETRY " : "",
mchk_header->err2 ? "SECOND_ERR " : "",
mchk_header->code,
mchk_header->frame_rev,
mchk_header->proc_offset,
mchk_header->sys_offset);
mchk_dump_mem((void *)
((unsigned long)mchk_header + mchk_header->proc_offset),
mchk_header->sys_offset - mchk_header->proc_offset,
NULL);
printk("%s -- System Region --\n", err_print_prefix);
mchk_dump_mem((void *)
((unsigned long)mchk_header + mchk_header->sys_offset),
mchk_header->size - mchk_header->sys_offset,
NULL);
printk("%s -- End of Frame --\n", err_print_prefix);
}
/*
* Console Data Log
*/
/* Data */
static struct el_subpacket_handler *subpacket_handler_list = NULL;
static struct el_subpacket_annotation *subpacket_annotation_list = NULL;
static struct el_subpacket *
el_process_header_subpacket(struct el_subpacket *header)
{
union el_timestamp timestamp;
char *name = "UNKNOWN EVENT";
int packet_count = 0;
int length = 0;
if (header->class != EL_CLASS__HEADER) {
printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n",
err_print_prefix,
header->class, header->type);
return NULL;
}
switch(header->type) {
case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME:
name = "SYSTEM ERROR";
length = header->by_type.sys_err.frame_length;
packet_count =
header->by_type.sys_err.frame_packet_count;
timestamp.as_int = 0;
break;
case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME:
name = "SYSTEM EVENT";
length = header->by_type.sys_event.frame_length;
packet_count =
header->by_type.sys_event.frame_packet_count;
timestamp = header->by_type.sys_event.timestamp;
break;
case EL_TYPE__HEADER__HALT_FRAME:
name = "ERROR HALT";
length = header->by_type.err_halt.frame_length;
packet_count =
header->by_type.err_halt.frame_packet_count;
timestamp = header->by_type.err_halt.timestamp;
break;
case EL_TYPE__HEADER__LOGOUT_FRAME:
name = "LOGOUT FRAME";
length = header->by_type.logout_header.frame_length;
packet_count = 1;
timestamp.as_int = 0;
break;
default: /* Unknown */
printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n",
err_print_prefix,
header->class, header->type);
return NULL;
}
printk("%s*** %s:\n"
" CLASS %d, TYPE %d\n",
err_print_prefix,
name,
header->class, header->type);
el_print_timestamp(&timestamp);
/*
* Process the subpackets
*/
el_process_subpackets(header, packet_count);
/* return the next header */
header = (struct el_subpacket *)
((unsigned long)header + header->length + length);
return header;
}
static struct el_subpacket *
el_process_subpacket_reg(struct el_subpacket *header)
{
struct el_subpacket *next = NULL;
struct el_subpacket_handler *h = subpacket_handler_list;
for (; h && h->class != header->class; h = h->next);
if (h) next = h->handler(header);
return next;
}
void
el_print_timestamp(union el_timestamp *timestamp)
{
if (timestamp->as_int)
printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n",
err_print_prefix,
timestamp->b.month, timestamp->b.day,
timestamp->b.year, timestamp->b.hour,
timestamp->b.minute, timestamp->b.second);
}
void
el_process_subpackets(struct el_subpacket *header, int packet_count)
{
struct el_subpacket *subpacket;
int i;
subpacket = (struct el_subpacket *)
((unsigned long)header + header->length);
for (i = 0; subpacket && i < packet_count; i++) {
printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i);
subpacket = el_process_subpacket(subpacket);
}
}
struct el_subpacket *
el_process_subpacket(struct el_subpacket *header)
{
struct el_subpacket *next = NULL;
switch(header->class) {
case EL_CLASS__TERMINATION:
/* Termination packet, there are no more */
break;
case EL_CLASS__HEADER:
next = el_process_header_subpacket(header);
break;
default:
if (NULL == (next = el_process_subpacket_reg(header))) {
printk("%s** Unexpected header CLASS %d TYPE %d"
" -- aborting.\n",
err_print_prefix,
header->class, header->type);
}
break;
}
return next;
}
void
el_annotate_subpacket(struct el_subpacket *header)
{
struct el_subpacket_annotation *a;
char **annotation = NULL;
for (a = subpacket_annotation_list; a; a = a->next) {
if (a->class == header->class &&
a->type == header->type &&
a->revision == header->revision) {
/*
* We found the annotation
*/
annotation = a->annotation;
printk("%s %s\n", err_print_prefix, a->description);
break;
}
}
mchk_dump_mem(header, header->length, annotation);
}
static void __init
cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu)
{
struct el_subpacket *header = (struct el_subpacket *)
(IDENT_ADDR | pcpu->console_data_log_pa);
int err;
printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n"
"*** Error(s) were logged on a previous boot\n",
err_print_prefix, cpu);
for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++)
header = el_process_subpacket(header);
/* let the console know it's ok to clear the error(s) at restart */
pcpu->console_data_log_pa = 0;
printk("%s*** %d total error(s) logged\n"
"**** END OF CONSOLE DATA LOG FOR CPU %d ****\n",
err_print_prefix, err, cpu);
}
void __init
cdl_check_console_data_log(void)
{
struct percpu_struct *pcpu;
unsigned long cpu;
for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) {
pcpu = (struct percpu_struct *)
((unsigned long)hwrpb + hwrpb->processor_offset
+ cpu * hwrpb->processor_size);
if (pcpu->console_data_log_pa)
cdl_process_console_data_log(cpu, pcpu);
}
}
int __init
cdl_register_subpacket_annotation(struct el_subpacket_annotation *new)
{
struct el_subpacket_annotation *a = subpacket_annotation_list;
if (a == NULL) subpacket_annotation_list = new;
else {
for (; a->next != NULL; a = a->next) {
if ((a->class == new->class && a->type == new->type) ||
a == new) {
printk("Attempted to re-register "
"subpacket annotation\n");
return -EINVAL;
}
}
a->next = new;
}
new->next = NULL;
return 0;
}
int __init
cdl_register_subpacket_handler(struct el_subpacket_handler *new)
{
struct el_subpacket_handler *h = subpacket_handler_list;
if (h == NULL) subpacket_handler_list = new;
else {
for (; h->next != NULL; h = h->next) {
if (h->class == new->class || h == new) {
printk("Attempted to re-register "
"subpacket handler\n");
return -EINVAL;
}
}
h->next = new;
}
new->next = NULL;
return 0;
}

276
arch/alpha/kernel/err_ev6.c Normal file
View file

@ -0,0 +1,276 @@
/*
* linux/arch/alpha/kernel/err_ev6.c
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Error handling code supporting Alpha systems
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#include <asm/hwrpb.h>
#include <asm/smp.h>
#include <asm/err_common.h>
#include <asm/err_ev6.h>
#include "err_impl.h"
#include "proto.h"
static int
ev6_parse_ibox(u64 i_stat, int print)
{
int status = MCHK_DISPOSITION_REPORT;
#define EV6__I_STAT__PAR (1UL << 29)
#define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR)
if (!(i_stat & EV6__I_STAT__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
if (!print)
return status;
if (i_stat & EV6__I_STAT__PAR)
printk("%s Icache parity error\n", err_print_prefix);
return status;
}
static int
ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print)
{
int status = MCHK_DISPOSITION_REPORT;
#define EV6__MM_STAT__DC_TAG_PERR (1UL << 10)
#define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR)
#define EV6__D_STAT__TPERR_P0 (1UL << 0)
#define EV6__D_STAT__TPERR_P1 (1UL << 1)
#define EV6__D_STAT__ECC_ERR_ST (1UL << 2)
#define EV6__D_STAT__ECC_ERR_LD (1UL << 3)
#define EV6__D_STAT__SEO (1UL << 4)
#define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \
EV6__D_STAT__TPERR_P1 | \
EV6__D_STAT__ECC_ERR_ST | \
EV6__D_STAT__ECC_ERR_LD | \
EV6__D_STAT__SEO)
if (!(d_stat & EV6__D_STAT__ERRMASK) &&
!(mm_stat & EV6__MM_STAT__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
if (!print)
return status;
if (mm_stat & EV6__MM_STAT__DC_TAG_PERR)
printk("%s Dcache tag parity error on probe\n",
err_print_prefix);
if (d_stat & EV6__D_STAT__TPERR_P0)
printk("%s Dcache tag parity error - pipe 0\n",
err_print_prefix);
if (d_stat & EV6__D_STAT__TPERR_P1)
printk("%s Dcache tag parity error - pipe 1\n",
err_print_prefix);
if (d_stat & EV6__D_STAT__ECC_ERR_ST)
printk("%s ECC error occurred on a store\n",
err_print_prefix);
if (d_stat & EV6__D_STAT__ECC_ERR_LD)
printk("%s ECC error occurred on a %s load\n",
err_print_prefix,
c_stat ? "" : "speculative ");
if (d_stat & EV6__D_STAT__SEO)
printk("%s Dcache second error\n", err_print_prefix);
return status;
}
static int
ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
u64 c_stat, u64 c_sts, int print)
{
static const char * const sourcename[] = {
"UNKNOWN", "UNKNOWN", "UNKNOWN",
"MEMORY", "BCACHE", "DCACHE",
"BCACHE PROBE", "BCACHE PROBE"
};
static const char * const streamname[] = { "D", "I" };
static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
int status = MCHK_DISPOSITION_REPORT;
int source = -1, stream = -1, bits = -1;
#define EV6__C_STAT__BC_PERR (0x01)
#define EV6__C_STAT__DC_PERR (0x02)
#define EV6__C_STAT__DSTREAM_MEM_ERR (0x03)
#define EV6__C_STAT__DSTREAM_BC_ERR (0x04)
#define EV6__C_STAT__DSTREAM_DC_ERR (0x05)
#define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */
#define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */
#define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B)
#define EV6__C_STAT__ISTREAM_BC_ERR (0x0C)
#define EV6__C_STAT__DSTREAM_MEM_DBL (0x13)
#define EV6__C_STAT__DSTREAM_BC_DBL (0x14)
#define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B)
#define EV6__C_STAT__ISTREAM_BC_DBL (0x1C)
#define EV6__C_STAT__SOURCE_MEMORY (0x03)
#define EV6__C_STAT__SOURCE_BCACHE (0x04)
#define EV6__C_STAT__SOURCE__S (0)
#define EV6__C_STAT__SOURCE__M (0x07)
#define EV6__C_STAT__ISTREAM__S (3)
#define EV6__C_STAT__ISTREAM__M (0x01)
#define EV6__C_STAT__DOUBLE__S (4)
#define EV6__C_STAT__DOUBLE__M (0x01)
#define EV6__C_STAT__ERRMASK (0x1F)
#define EV6__C_STS__SHARED (1 << 0)
#define EV6__C_STS__DIRTY (1 << 1)
#define EV6__C_STS__VALID (1 << 2)
#define EV6__C_STS__PARITY (1 << 3)
if (!(c_stat & EV6__C_STAT__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
if (!print)
return status;
source = EXTRACT(c_stat, EV6__C_STAT__SOURCE);
stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM);
bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE);
if (c_stat & EV6__C_STAT__BC_PERR) {
printk("%s Bcache tag parity error\n", err_print_prefix);
source = -1;
}
if (c_stat & EV6__C_STAT__DC_PERR) {
printk("%s Dcache tag parity error\n", err_print_prefix);
source = -1;
}
if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 ||
c_stat == EV6__C_STAT__PROBE_BC_ERR1) {
printk("%s Bcache single-bit error on a probe hit\n",
err_print_prefix);
source = -1;
}
if (source != -1)
printk("%s %s-STREAM %s-BIT ECC error from %s\n",
err_print_prefix,
streamname[stream], bitsname[bits], sourcename[source]);
printk("%s Address: 0x%016llx\n"
" Syndrome[upper.lower]: %02llx.%02llx\n",
err_print_prefix,
c_addr,
c2_syn, c1_syn);
if (source == EV6__C_STAT__SOURCE_MEMORY ||
source == EV6__C_STAT__SOURCE_BCACHE)
printk("%s Block status: %s%s%s%s\n",
err_print_prefix,
(c_sts & EV6__C_STS__SHARED) ? "SHARED " : "",
(c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "",
(c_sts & EV6__C_STS__VALID) ? "VALID " : "",
(c_sts & EV6__C_STS__PARITY) ? "PARITY " : "");
return status;
}
void
ev6_register_error_handlers(void)
{
/* None right now. */
}
int
ev6_process_logout_frame(struct el_common *mchk_header, int print)
{
struct el_common_EV6_mcheck *ev6mchk =
(struct el_common_EV6_mcheck *)mchk_header;
int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
status |= ev6_parse_ibox(ev6mchk->I_STAT, print);
status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT,
ev6mchk->C_STAT, print);
status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME,
ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT,
ev6mchk->C_STS, print);
if (!print)
return status;
if (status != MCHK_DISPOSITION_DISMISS) {
char *saved_err_prefix = err_print_prefix;
/*
* Dump some additional information from the frame
*/
printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx"
" ISUM: 0x%016lx\n"
" PAL_BASE: 0x%016lx I_CTL: 0x%016lx"
" PCTX: 0x%016lx\n",
err_print_prefix,
ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM,
ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX);
if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) {
printk("%s UNKNOWN error, frame follows:\n",
err_print_prefix);
} else {
/* had decode -- downgrade print level for frame */
err_print_prefix = KERN_NOTICE;
}
mchk_dump_logout_frame(mchk_header);
err_print_prefix = saved_err_prefix;
}
return status;
}
void
ev6_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_common *mchk_header = (struct el_common *)la_ptr;
/*
* Sync the processor
*/
mb();
draina();
/*
* Parse the logout frame without printing first. If the only error(s)
* found are have a disposition of "dismiss", then just dismiss them
* and don't print any message
*/
if (ev6_process_logout_frame(mchk_header, 0) !=
MCHK_DISPOSITION_DISMISS) {
char *saved_err_prefix = err_print_prefix;
err_print_prefix = KERN_CRIT;
/*
* Either a nondismissable error was detected or no
* recognized error was detected in the logout frame
* -- report the error in either case
*/
printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n",
err_print_prefix,
(vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable",
(unsigned int)vector, (int)smp_processor_id());
ev6_process_logout_frame(mchk_header, 1);
dik_show_regs(get_irq_regs(), NULL);
err_print_prefix = saved_err_prefix;
}
/*
* Release the logout frame
*/
wrmces(0x7);
mb();
}

286
arch/alpha/kernel/err_ev7.c Normal file
View file

@ -0,0 +1,286 @@
/*
* linux/arch/alpha/kernel/err_ev7.c
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Error handling code supporting Alpha systems
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include <asm/smp.h>
#include <asm/err_common.h>
#include <asm/err_ev7.h>
#include "err_impl.h"
#include "proto.h"
struct ev7_lf_subpackets *
ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr,
struct ev7_lf_subpackets *lf_subpackets)
{
struct el_subpacket *subpacket;
int i;
/*
* A Marvel machine check frame is always packaged in an
* el_subpacket of class HEADER, type LOGOUT_FRAME.
*/
if (el_ptr->class != EL_CLASS__HEADER ||
el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME)
return NULL;
/*
* It is a logout frame header. Look at the one subpacket.
*/
el_ptr = (struct el_subpacket *)
((unsigned long)el_ptr + el_ptr->length);
/*
* It has to be class PAL, type LOGOUT_FRAME.
*/
if (el_ptr->class != EL_CLASS__PAL ||
el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME)
return NULL;
lf_subpackets->logout = (struct ev7_pal_logout_subpacket *)
el_ptr->by_type.raw.data_start;
/*
* Process the subpackets.
*/
subpacket = (struct el_subpacket *)
((unsigned long)el_ptr + el_ptr->length);
for (i = 0;
subpacket && i < lf_subpackets->logout->subpacket_count;
subpacket = (struct el_subpacket *)
((unsigned long)subpacket + subpacket->length), i++) {
/*
* All subpackets should be class PAL.
*/
if (subpacket->class != EL_CLASS__PAL) {
printk("%s**UNEXPECTED SUBPACKET CLASS %d "
"IN LOGOUT FRAME (packet %d\n",
err_print_prefix, subpacket->class, i);
return NULL;
}
/*
* Remember the subpacket.
*/
switch(subpacket->type) {
case EL_TYPE__PAL__EV7_PROCESSOR:
lf_subpackets->ev7 =
(struct ev7_pal_processor_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_RBOX:
lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_ZBOX:
lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_IO:
lf_subpackets->io = (struct ev7_pal_io_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE:
case EL_TYPE__PAL__ENV__AIRMOVER_FAN:
case EL_TYPE__PAL__ENV__VOLTAGE:
case EL_TYPE__PAL__ENV__INTRUSION:
case EL_TYPE__PAL__ENV__POWER_SUPPLY:
case EL_TYPE__PAL__ENV__LAN:
case EL_TYPE__PAL__ENV__HOT_PLUG:
lf_subpackets->env[ev7_lf_env_index(subpacket->type)] =
(struct ev7_pal_environmental_subpacket *)
subpacket->by_type.raw.data_start;
break;
default:
/*
* Don't know what kind of frame this is.
*/
return NULL;
}
}
return lf_subpackets;
}
void
ev7_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
char *saved_err_prefix = err_print_prefix;
/*
* Sync the processor
*/
mb();
draina();
err_print_prefix = KERN_CRIT;
printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n",
err_print_prefix,
(vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable",
(unsigned int)vector, (int)smp_processor_id());
el_process_subpacket(el_ptr);
err_print_prefix = saved_err_prefix;
/*
* Release the logout frame
*/
wrmces(0x7);
mb();
}
static char *el_ev7_processor_subpacket_annotation[] = {
"Subpacket Header", "I_STAT", "DC_STAT",
"C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0",
"C_STAT", "C_STS", "MM_STAT",
"EXC_ADDR", "IER_CM", "ISUM",
"PAL_BASE", "I_CTL", "PROCESS_CONTEXT",
"CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL",
"CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL",
"BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS",
"BBOX_DAT_RMP", NULL
};
static char *el_ev7_zbox_subpacket_annotation[] = {
"Subpacket Header",
"ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
"ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
"ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR",
"ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL",
"ZBOX(0): reserved / DIFT_ERR_STATUS",
"ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
"ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
"ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR",
"ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL",
"ZBOX(1): reserved / DIFT_ERR_STATUS",
"CBOX_CTL", "CBOX_STP_CTL",
"ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA",
"ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME",
NULL
};
static char *el_ev7_rbox_subpacket_annotation[] = {
"Subpacket Header", "RBOX_CFG", "RBOX_N_CFG",
"RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG",
"RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR",
"RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR",
"RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL",
"RBOX_INTQ", "RBOX_INT", NULL
};
static char *el_ev7_io_subpacket_annotation[] = {
"Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV",
"IO7_UPH", "HPI_CTL", "CRD_CTL",
"HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM",
"PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0",
"PO7_ERR_PKT1", "reserved", "reserved",
"PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT",
"PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT",
"PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT",
"PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT",
"PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
NULL
};
static struct el_subpacket_annotation el_ev7_pal_annotations[] = {
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_PROCESSOR,
1,
"EV7 Processor Subpacket",
el_ev7_processor_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_ZBOX,
1,
"EV7 ZBOX Subpacket",
el_ev7_zbox_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_RBOX,
1,
"EV7 RBOX Subpacket",
el_ev7_rbox_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_IO,
1,
"EV7 IO Subpacket",
el_ev7_io_subpacket_annotation)
};
static struct el_subpacket *
ev7_process_pal_subpacket(struct el_subpacket *header)
{
struct ev7_pal_subpacket *packet;
if (header->class != EL_CLASS__PAL) {
printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
err_print_prefix,
header->class, header->type);
return NULL;
}
packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start;
switch(header->type) {
case EL_TYPE__PAL__LOGOUT_FRAME:
printk("%s*** MCHK occurred on LPID %lld (RBOX %llx)\n",
err_print_prefix,
packet->by_type.logout.whami,
packet->by_type.logout.rbox_whami);
el_print_timestamp(&packet->by_type.logout.timestamp);
printk("%s EXC_ADDR: %016llx\n"
" HALT_CODE: %llx\n",
err_print_prefix,
packet->by_type.logout.exc_addr,
packet->by_type.logout.halt_code);
el_process_subpackets(header,
packet->by_type.logout.subpacket_count);
break;
default:
printk("%s ** PAL TYPE %d SUBPACKET\n",
err_print_prefix,
header->type);
el_annotate_subpacket(header);
break;
}
return (struct el_subpacket *)((unsigned long)header + header->length);
}
struct el_subpacket_handler ev7_pal_subpacket_handler =
SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
void __init
ev7_register_error_handlers(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
}

View file

@ -0,0 +1,87 @@
/*
* linux/arch/alpha/kernel/err_impl.h
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Contains declarations and macros to support Alpha error handling
* implementations.
*/
#include <asm/mce.h>
union el_timestamp;
struct el_subpacket;
struct ev7_lf_subpackets;
struct el_subpacket_annotation {
struct el_subpacket_annotation *next;
u16 class;
u16 type;
u16 revision;
char *description;
char **annotation;
};
#define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)}
struct el_subpacket_handler {
struct el_subpacket_handler *next;
u16 class;
struct el_subpacket *(*handler)(struct el_subpacket *);
};
#define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)}
/*
* Manipulate a field from a register given it's name. defines
* for the LSB (__S - shift count) and bitmask (__M) are required
*
* EXTRACT(u, f) - extracts the field and places it at bit position 0
* GEN_MASK(f) - creates an in-position mask for the field
*/
#define EXTRACT(u, f) (((u) >> f##__S) & f##__M)
#define GEN_MASK(f) ((u64)f##__M << f##__S)
/*
* err_common.c
*/
extern char *err_print_prefix;
extern void mchk_dump_mem(void *, size_t, char **);
extern void mchk_dump_logout_frame(struct el_common *);
extern void el_print_timestamp(union el_timestamp *);
extern void el_process_subpackets(struct el_subpacket *, int);
extern struct el_subpacket *el_process_subpacket(struct el_subpacket *);
extern void el_annotate_subpacket(struct el_subpacket *);
extern void cdl_check_console_data_log(void);
extern int cdl_register_subpacket_annotation(struct el_subpacket_annotation *);
extern int cdl_register_subpacket_handler(struct el_subpacket_handler *);
/*
* err_ev7.c
*/
extern struct ev7_lf_subpackets *
ev7_collect_logout_frame_subpackets(struct el_subpacket *,
struct ev7_lf_subpackets *);
extern void ev7_register_error_handlers(void);
extern void ev7_machine_check(unsigned long, unsigned long);
/*
* err_ev6.c
*/
extern void ev6_register_error_handlers(void);
extern int ev6_process_logout_frame(struct el_common *, int);
extern void ev6_machine_check(unsigned long, unsigned long);
/*
* err_marvel.c
*/
extern void marvel_machine_check(unsigned long, unsigned long);
extern void marvel_register_error_handlers(void);
/*
* err_titan.c
*/
extern int titan_process_logout_frame(struct el_common *, int);
extern void titan_machine_check(unsigned long, unsigned long);
extern void titan_register_error_handlers(void);
extern int privateer_process_logout_frame(struct el_common *, int);
extern void privateer_machine_check(unsigned long, unsigned long);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,760 @@
/*
* linux/arch/alpha/kernel/err_titan.c
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Error handling code supporting TITAN systems
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/core_titan.h>
#include <asm/hwrpb.h>
#include <asm/smp.h>
#include <asm/err_common.h>
#include <asm/err_ev6.h>
#include <asm/irq_regs.h>
#include "err_impl.h"
#include "proto.h"
static int
titan_parse_c_misc(u64 c_misc, int print)
{
#ifdef CONFIG_VERBOSE_MCHECK
char *src;
int nxs = 0;
#endif
int status = MCHK_DISPOSITION_REPORT;
#define TITAN__CCHIP_MISC__NXM (1UL << 28)
#define TITAN__CCHIP_MISC__NXS__S (29)
#define TITAN__CCHIP_MISC__NXS__M (0x7)
if (!(c_misc & TITAN__CCHIP_MISC__NXM))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
#ifdef CONFIG_VERBOSE_MCHECK
if (!print)
return status;
nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS);
switch(nxs) {
case 0: /* CPU 0 */
case 1: /* CPU 1 */
case 2: /* CPU 2 */
case 3: /* CPU 3 */
src = "CPU";
/* num is already the CPU number */
break;
case 4: /* Pchip 0 */
case 5: /* Pchip 1 */
src = "Pchip";
nxs -= 4;
break;
default:/* reserved */
src = "Unknown, NXS =";
/* leave num untouched */
break;
}
printk("%s Non-existent memory access from: %s %d\n",
err_print_prefix, src, nxs);
#endif /* CONFIG_VERBOSE_MCHECK */
return status;
}
static int
titan_parse_p_serror(int which, u64 serror, int print)
{
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
static const char * const serror_src[] = {
"GPCI", "APCI", "AGP HP", "AGP LP"
};
static const char * const serror_cmd[] = {
"DMA Read", "DMA RMW", "SGTE Read", "Reserved"
};
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
#define TITAN__PCHIP_SERROR__UECC (1UL << 1)
#define TITAN__PCHIP_SERROR__CRE (1UL << 2)
#define TITAN__PCHIP_SERROR__NXIO (1UL << 3)
#define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4)
#define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \
TITAN__PCHIP_SERROR__CRE)
#define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \
TITAN__PCHIP_SERROR__UECC | \
TITAN__PCHIP_SERROR__CRE | \
TITAN__PCHIP_SERROR__NXIO | \
TITAN__PCHIP_SERROR__LOST_CRE)
#define TITAN__PCHIP_SERROR__SRC__S (52)
#define TITAN__PCHIP_SERROR__SRC__M (0x3)
#define TITAN__PCHIP_SERROR__CMD__S (54)
#define TITAN__PCHIP_SERROR__CMD__M (0x3)
#define TITAN__PCHIP_SERROR__SYN__S (56)
#define TITAN__PCHIP_SERROR__SYN__M (0xff)
#define TITAN__PCHIP_SERROR__ADDR__S (15)
#define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL)
if (!(serror & TITAN__PCHIP_SERROR__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
#ifdef CONFIG_VERBOSE_MCHECK
if (!print)
return status;
printk("%s PChip %d SERROR: %016llx\n",
err_print_prefix, which, serror);
if (serror & TITAN__PCHIP_SERROR__ECCMASK) {
printk("%s %sorrectable ECC Error:\n"
" Source: %-6s Command: %-8s Syndrome: 0x%08x\n"
" Address: 0x%llx\n",
err_print_prefix,
(serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C",
serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)],
serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)],
(unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN),
EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR));
}
if (serror & TITAN__PCHIP_SERROR__NXIO)
printk("%s Non Existent I/O Error\n", err_print_prefix);
if (serror & TITAN__PCHIP_SERROR__LOST_UECC)
printk("%s Lost Uncorrectable ECC Error\n",
err_print_prefix);
if (serror & TITAN__PCHIP_SERROR__LOST_CRE)
printk("%s Lost Correctable ECC Error\n", err_print_prefix);
#endif /* CONFIG_VERBOSE_MCHECK */
return status;
}
static int
titan_parse_p_perror(int which, int port, u64 perror, int print)
{
int cmd;
unsigned long addr;
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
static const char * const perror_cmd[] = {
"Interrupt Acknowledge", "Special Cycle",
"I/O Read", "I/O Write",
"Reserved", "Reserved",
"Memory Read", "Memory Write",
"Reserved", "Reserved",
"Configuration Read", "Configuration Write",
"Memory Read Multiple", "Dual Address Cycle",
"Memory Read Line", "Memory Write and Invalidate"
};
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_PERROR__LOST (1UL << 0)
#define TITAN__PCHIP_PERROR__SERR (1UL << 1)
#define TITAN__PCHIP_PERROR__PERR (1UL << 2)
#define TITAN__PCHIP_PERROR__DCRTO (1UL << 3)
#define TITAN__PCHIP_PERROR__SGE (1UL << 4)
#define TITAN__PCHIP_PERROR__APE (1UL << 5)
#define TITAN__PCHIP_PERROR__TA (1UL << 6)
#define TITAN__PCHIP_PERROR__DPE (1UL << 7)
#define TITAN__PCHIP_PERROR__NDS (1UL << 8)
#define TITAN__PCHIP_PERROR__IPTPR (1UL << 9)
#define TITAN__PCHIP_PERROR__IPTPW (1UL << 10)
#define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \
TITAN__PCHIP_PERROR__SERR | \
TITAN__PCHIP_PERROR__PERR | \
TITAN__PCHIP_PERROR__DCRTO | \
TITAN__PCHIP_PERROR__SGE | \
TITAN__PCHIP_PERROR__APE | \
TITAN__PCHIP_PERROR__TA | \
TITAN__PCHIP_PERROR__DPE | \
TITAN__PCHIP_PERROR__NDS | \
TITAN__PCHIP_PERROR__IPTPR | \
TITAN__PCHIP_PERROR__IPTPW)
#define TITAN__PCHIP_PERROR__DAC (1UL << 47)
#define TITAN__PCHIP_PERROR__MWIN (1UL << 48)
#define TITAN__PCHIP_PERROR__CMD__S (52)
#define TITAN__PCHIP_PERROR__CMD__M (0x0f)
#define TITAN__PCHIP_PERROR__ADDR__S (14)
#define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful)
if (!(perror & TITAN__PCHIP_PERROR__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD);
addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2;
/*
* Initializing the BIOS on a video card on a bus without
* a south bridge (subtractive decode agent) can result in
* master aborts as the BIOS probes the capabilities of the
* card. XFree86 does such initialization. If the error
* is a master abort (No DevSel as PCI Master) and the command
* is an I/O read or write below the address where we start
* assigning PCI I/O spaces (SRM uses 0x1000), then mark the
* error as dismissable so starting XFree86 doesn't result
* in a series of uncorrectable errors being reported. Also
* dismiss master aborts to VGA frame buffer space
* (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000)
* for the same reason.
*
* Also mark the error dismissible if it looks like the right
* error but only the Lost bit is set. Since the BIOS initialization
* can cause multiple master aborts and the error interrupt can
* be handled on a different CPU than the BIOS code is run on,
* it is possible for a second master abort to occur between the
* time the PALcode reads PERROR and the time it writes PERROR
* to acknowledge the error. If this timing happens, a second
* error will be signalled after the first, and if no additional
* errors occur, will look like a Lost error with no additional
* errors on the same transaction as the previous error.
*/
if (((perror & TITAN__PCHIP_PERROR__NDS) ||
((perror & TITAN__PCHIP_PERROR__ERRMASK) ==
TITAN__PCHIP_PERROR__LOST)) &&
((((cmd & 0xE) == 2) && (addr < 0x1000)) ||
(((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) {
status = MCHK_DISPOSITION_DISMISS;
}
#ifdef CONFIG_VERBOSE_MCHECK
if (!print)
return status;
printk("%s PChip %d %cPERROR: %016llx\n",
err_print_prefix, which,
port ? 'A' : 'G', perror);
if (perror & TITAN__PCHIP_PERROR__IPTPW)
printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__IPTPR)
printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__NDS)
printk("%s No DEVSEL as PCI Master [Master Abort]\n",
err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__DPE)
printk("%s Data Parity Error\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__TA)
printk("%s Target Abort\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__APE)
printk("%s Address Parity Error\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__SGE)
printk("%s Scatter-Gather Error, Invalid PTE\n",
err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__DCRTO)
printk("%s Delayed-Completion Retry Timeout\n",
err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__PERR)
printk("%s PERR Asserted\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__SERR)
printk("%s SERR Asserted\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__LOST)
printk("%s Lost Error\n", err_print_prefix);
printk("%s Command: 0x%x - %s\n"
" Address: 0x%lx\n",
err_print_prefix,
cmd, perror_cmd[cmd],
addr);
if (perror & TITAN__PCHIP_PERROR__DAC)
printk("%s Dual Address Cycle\n", err_print_prefix);
if (perror & TITAN__PCHIP_PERROR__MWIN)
printk("%s Hit in Monster Window\n", err_print_prefix);
#endif /* CONFIG_VERBOSE_MCHECK */
return status;
}
static int
titan_parse_p_agperror(int which, u64 agperror, int print)
{
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
int cmd, len;
unsigned long addr;
static const char * const agperror_cmd[] = {
"Read (low-priority)", "Read (high-priority)",
"Write (low-priority)", "Write (high-priority)",
"Reserved", "Reserved",
"Flush", "Fence"
};
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_AGPERROR__LOST (1UL << 0)
#define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1)
#define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2)
#define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3)
#define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4)
#define TITAN__PCHIP_AGPERROR__PTP (1UL << 5)
#define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6)
#define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \
TITAN__PCHIP_AGPERROR__LPQFULL | \
TITAN__PCHIP_AGPERROR__HPQFULL | \
TITAN__PCHIP_AGPERROR__RESCMD | \
TITAN__PCHIP_AGPERROR__IPTE | \
TITAN__PCHIP_AGPERROR__PTP | \
TITAN__PCHIP_AGPERROR__NOWINDOW)
#define TITAN__PCHIP_AGPERROR__DAC (1UL << 48)
#define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49)
#define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59)
#define TITAN__PCHIP_AGPERROR__CMD__S (50)
#define TITAN__PCHIP_AGPERROR__CMD__M (0x07)
#define TITAN__PCHIP_AGPERROR__ADDR__S (15)
#define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL)
#define TITAN__PCHIP_AGPERROR__LEN__S (53)
#define TITAN__PCHIP_AGPERROR__LEN__M (0x3f)
if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK))
return MCHK_DISPOSITION_UNKNOWN_ERROR;
#ifdef CONFIG_VERBOSE_MCHECK
if (!print)
return status;
cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD);
addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3;
len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN);
printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix,
which, agperror);
if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW)
printk("%s No Window\n", err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__PTP)
printk("%s Peer-to-Peer set\n", err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__IPTE)
printk("%s Invalid PTE\n", err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__RESCMD)
printk("%s Reserved Command\n", err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL)
printk("%s HP Transaction Received while Queue Full\n",
err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL)
printk("%s LP Transaction Received while Queue Full\n",
err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__LOST)
printk("%s Lost Error\n", err_print_prefix);
printk("%s Command: 0x%x - %s, %d Quadwords%s\n"
" Address: 0x%lx\n",
err_print_prefix, cmd, agperror_cmd[cmd], len,
(agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "",
addr);
if (agperror & TITAN__PCHIP_AGPERROR__DAC)
printk("%s Dual Address Cycle\n", err_print_prefix);
if (agperror & TITAN__PCHIP_AGPERROR__MWIN)
printk("%s Hit in Monster Window\n", err_print_prefix);
#endif /* CONFIG_VERBOSE_MCHECK */
return status;
}
static int
titan_parse_p_chip(int which, u64 serror, u64 gperror,
u64 aperror, u64 agperror, int print)
{
int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
status |= titan_parse_p_serror(which, serror, print);
status |= titan_parse_p_perror(which, 0, gperror, print);
status |= titan_parse_p_perror(which, 1, aperror, print);
status |= titan_parse_p_agperror(which, agperror, print);
return status;
}
int
titan_process_logout_frame(struct el_common *mchk_header, int print)
{
struct el_TITAN_sysdata_mcheck *tmchk =
(struct el_TITAN_sysdata_mcheck *)
((unsigned long)mchk_header + mchk_header->sys_offset);
int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
status |= titan_parse_c_misc(tmchk->c_misc, print);
status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror,
tmchk->p0_aperror, tmchk->p0_agperror,
print);
status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror,
tmchk->p1_aperror, tmchk->p1_agperror,
print);
return status;
}
void
titan_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_common *mchk_header = (struct el_common *)la_ptr;
struct el_TITAN_sysdata_mcheck *tmchk =
(struct el_TITAN_sysdata_mcheck *)
((unsigned long)mchk_header + mchk_header->sys_offset);
u64 irqmask;
/*
* Mask of Titan interrupt sources which are reported as machine checks
*
* 63 - CChip Error
* 62 - PChip 0 H_Error
* 61 - PChip 1 H_Error
* 60 - PChip 0 C_Error
* 59 - PChip 1 C_Error
*/
#define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL
/*
* Sync the processor
*/
mb();
draina();
/*
* Only handle system errors here
*/
if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) {
ev6_machine_check(vector, la_ptr);
return;
}
/*
* It's a system error, handle it here
*
* The PALcode has already cleared the error, so just parse it
*/
/*
* Parse the logout frame without printing first. If the only error(s)
* found are classified as "dismissable", then just dismiss them and
* don't print any message
*/
if (titan_process_logout_frame(mchk_header, 0) !=
MCHK_DISPOSITION_DISMISS) {
char *saved_err_prefix = err_print_prefix;
err_print_prefix = KERN_CRIT;
/*
* Either a nondismissable error was detected or no
* recognized error was detected in the logout frame
* -- report the error in either case
*/
printk("%s"
"*System %s Error (Vector 0x%x) reported on CPU %d:\n",
err_print_prefix,
(vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable",
(unsigned int)vector, (int)smp_processor_id());
#ifdef CONFIG_VERBOSE_MCHECK
titan_process_logout_frame(mchk_header, alpha_verbose_mcheck);
if (alpha_verbose_mcheck)
dik_show_regs(get_irq_regs(), NULL);
#endif /* CONFIG_VERBOSE_MCHECK */
err_print_prefix = saved_err_prefix;
/*
* Convert any pending interrupts which report as system
* machine checks to interrupts
*/
irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK;
titan_dispatch_irqs(irqmask);
}
/*
* Release the logout frame
*/
wrmces(0x7);
mb();
}
/*
* Subpacket Annotations
*/
static char *el_titan_pchip0_extended_annotation[] = {
"Subpacket Header", "P0_SCTL", "P0_SERREN",
"P0_APCTL", "P0_APERREN", "P0_AGPERREN",
"P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1",
"P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0",
"P0_AWSM1", "P0_AWSM2", "P0_AWSM3",
"P0_ATBA0", "P0_ATBA1", "P0_ATBA2",
"P0_ATBA3", "P0_GPCTL", "P0_GPERREN",
"P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1",
"P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0",
"P0_GWSM1", "P0_GWSM2", "P0_GWSM3",
"P0_GTBA0", "P0_GTBA1", "P0_GTBA2",
"P0_GTBA3", NULL
};
static char *el_titan_pchip1_extended_annotation[] = {
"Subpacket Header", "P1_SCTL", "P1_SERREN",
"P1_APCTL", "P1_APERREN", "P1_AGPERREN",
"P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1",
"P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0",
"P1_AWSM1", "P1_AWSM2", "P1_AWSM3",
"P1_ATBA0", "P1_ATBA1", "P1_ATBA2",
"P1_ATBA3", "P1_GPCTL", "P1_GPERREN",
"P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1",
"P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0",
"P1_GWSM1", "P1_GWSM2", "P1_GWSM3",
"P1_GTBA0", "P1_GTBA1", "P1_GTBA2",
"P1_GTBA3", NULL
};
static char *el_titan_memory_extended_annotation[] = {
"Subpacket Header", "AAR0", "AAR1",
"AAR2", "AAR3", "P0_SCTL",
"P0_GPCTL", "P0_APCTL", "P1_SCTL",
"P1_GPCTL", "P1_SCTL", NULL
};
static struct el_subpacket_annotation el_titan_annotations[] = {
SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED,
1,
"Titan PChip 0 Extended Frame",
el_titan_pchip0_extended_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED,
1,
"Titan PChip 1 Extended Frame",
el_titan_pchip1_extended_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED,
1,
"Titan Memory Extended Frame",
el_titan_memory_extended_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
EL_TYPE__TERMINATION__TERMINATION,
1,
"Termination Subpacket",
NULL)
};
static struct el_subpacket *
el_process_regatta_subpacket(struct el_subpacket *header)
{
if (header->class != EL_CLASS__REGATTA_FAMILY) {
printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
err_print_prefix,
header->class, header->type);
return NULL;
}
switch(header->type) {
case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME:
case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME:
case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME:
case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT:
case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT:
printk("%s ** Occurred on CPU %d:\n",
err_print_prefix,
(int)header->by_type.regatta_frame.cpuid);
privateer_process_logout_frame((struct el_common *)
header->by_type.regatta_frame.data_start, 1);
break;
default:
printk("%s ** REGATTA TYPE %d SUBPACKET\n",
err_print_prefix, header->type);
el_annotate_subpacket(header);
break;
}
return (struct el_subpacket *)((unsigned long)header + header->length);
}
static struct el_subpacket_handler titan_subpacket_handler =
SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY,
el_process_regatta_subpacket);
void __init
titan_register_error_handlers(void)
{
size_t i;
for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++)
cdl_register_subpacket_annotation(&el_titan_annotations[i]);
cdl_register_subpacket_handler(&titan_subpacket_handler);
ev6_register_error_handlers();
}
/*
* Privateer
*/
static int
privateer_process_680_frame(struct el_common *mchk_header, int print)
{
int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
#ifdef CONFIG_VERBOSE_MCHECK
struct el_PRIVATEER_envdata_mcheck *emchk =
(struct el_PRIVATEER_envdata_mcheck *)
((unsigned long)mchk_header + mchk_header->sys_offset);
/* TODO - categorize errors, for now, no error */
if (!print)
return status;
/* TODO - decode instead of just dumping... */
printk("%s Summary Flags: %016llx\n"
" CChip DIRx: %016llx\n"
" System Management IR: %016llx\n"
" CPU IR: %016llx\n"
" Power Supply IR: %016llx\n"
" LM78 Fault Status: %016llx\n"
" System Doors: %016llx\n"
" Temperature Warning: %016llx\n"
" Fan Control: %016llx\n"
" Fatal Power Down Code: %016llx\n",
err_print_prefix,
emchk->summary,
emchk->c_dirx,
emchk->smir,
emchk->cpuir,
emchk->psir,
emchk->fault,
emchk->sys_doors,
emchk->temp_warn,
emchk->fan_ctrl,
emchk->code);
#endif /* CONFIG_VERBOSE_MCHECK */
return status;
}
int
privateer_process_logout_frame(struct el_common *mchk_header, int print)
{
struct el_common_EV6_mcheck *ev6mchk =
(struct el_common_EV6_mcheck *)mchk_header;
int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
/*
* Machine check codes
*/
#define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */
#define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */
#define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */
#define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */
#define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */
#define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */
#define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */
#define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */
#define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */
#define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */
switch(ev6mchk->MCHK_Code) {
/*
* Vector 630 - Processor, Correctable
*/
case PRIVATEER_MCHK__CORR_ECC:
case PRIVATEER_MCHK__DC_TAG_PERR:
/*
* Fall through to vector 670 for processing...
*/
/*
* Vector 670 - Processor, Uncorrectable
*/
case PRIVATEER_MCHK__PAL_BUGCHECK:
case PRIVATEER_MCHK__OS_BUGCHECK:
case PRIVATEER_MCHK__PROC_HRD_ERR:
case PRIVATEER_MCHK__ISTREAM_CMOV_PRX:
case PRIVATEER_MCHK__ISTREAM_CMOV_FLT:
status |= ev6_process_logout_frame(mchk_header, print);
break;
/*
* Vector 620 - System, Correctable
*/
case PRIVATEER_MCHK__SYS_CORR_ERR:
/*
* Fall through to vector 660 for processing...
*/
/*
* Vector 660 - System, Uncorrectable
*/
case PRIVATEER_MCHK__SYS_HRD_ERR:
status |= titan_process_logout_frame(mchk_header, print);
break;
/*
* Vector 680 - System, Environmental
*/
case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */
status |= privateer_process_680_frame(mchk_header, print);
break;
/*
* Unknown
*/
default:
status |= MCHK_DISPOSITION_REPORT;
if (print) {
printk("%s** Unknown Error, frame follows\n",
err_print_prefix);
mchk_dump_logout_frame(mchk_header);
}
}
return status;
}
void
privateer_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_common *mchk_header = (struct el_common *)la_ptr;
struct el_TITAN_sysdata_mcheck *tmchk =
(struct el_TITAN_sysdata_mcheck *)
(la_ptr + mchk_header->sys_offset);
u64 irqmask;
char *saved_err_prefix = err_print_prefix;
#define PRIVATEER_680_INTERRUPT_MASK (0xE00UL)
#define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL)
/*
* Sync the processor.
*/
mb();
draina();
/*
* Only handle system events here.
*/
if (vector != SCB_Q_SYSEVENT)
return titan_machine_check(vector, la_ptr);
/*
* Report the event - System Events should be reported even if no
* error is indicated since the event could indicate the return
* to normal status.
*/
err_print_prefix = KERN_CRIT;
printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n",
err_print_prefix,
(unsigned int)vector, (int)smp_processor_id());
privateer_process_680_frame(mchk_header, 1);
err_print_prefix = saved_err_prefix;
/*
* Convert any pending interrupts which report as 680 machine
* checks to interrupts.
*/
irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK;
/*
* Dispatch the interrupt(s).
*/
titan_dispatch_irqs(irqmask);
/*
* Release the logout frame.
*/
wrmces(0x7);
mb();
}

View file

@ -0,0 +1,49 @@
/*
* linux/arch/alpha/kernel/es1888.c
*
* Init the built-in ES1888 sound chip (SB16 compatible)
*/
#include <linux/init.h>
#include <asm/io.h>
#include "proto.h"
void __init
es1888_init(void)
{
/* Sequence of IO reads to init the audio controller */
inb(0x0229);
inb(0x0229);
inb(0x0229);
inb(0x022b);
inb(0x0229);
inb(0x022b);
inb(0x0229);
inb(0x0229);
inb(0x022b);
inb(0x0229);
inb(0x0220); /* This sets the base address to 0x220 */
/* Sequence to set DMA channels */
outb(0x01, 0x0226); /* reset */
inb(0x0226); /* pause */
outb(0x00, 0x0226); /* release reset */
while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/
continue;
inb(0x022a); /* pause */
outb(0xc6, 0x022c); /* enable extended mode */
inb(0x022a); /* pause, also forces the write */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0xb1, 0x022c); /* setup for write to Interrupt CR */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0x14, 0x022c); /* set IRQ 5 */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0xb2, 0x022c); /* setup for write to DMA CR */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0x18, 0x022c); /* set DMA channel 1 */
inb(0x022c); /* force the write */
}

47
arch/alpha/kernel/gct.c Normal file
View file

@ -0,0 +1,47 @@
/*
* linux/arch/alpha/kernel/gct.c
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/hwrpb.h>
#include <asm/gct.h>
int
gct6_find_nodes(gct6_node *node, gct6_search_struct *search)
{
gct6_search_struct *wanted;
int status = 0;
/* First check the magic number. */
if (node->magic != GCT_NODE_MAGIC) {
printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n");
return -EINVAL;
}
/* Check against the search struct. */
for (wanted = search;
wanted && (wanted->type | wanted->subtype);
wanted++) {
if (node->type != wanted->type)
continue;
if (node->subtype != wanted->subtype)
continue;
/* Found it -- call out. */
if (wanted->callout)
wanted->callout(node);
}
/* Now walk the tree, siblings first. */
if (node->next)
status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search);
/* Then the children. */
if (node->child)
status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search);
return status;
}

98
arch/alpha/kernel/head.S Normal file
View file

@ -0,0 +1,98 @@
/*
* arch/alpha/kernel/head.S
*
* initial boot stuff.. At this point, the bootloader has already
* switched into OSF/1 PAL-code, and loaded us at the correct address
* (START_ADDR). So there isn't much left for us to do: just set up
* the kernel global pointer and jump to the kernel entry-point.
*/
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/pal.h>
#include <asm/setup.h>
__HEAD
.globl _stext
.set noreorder
.globl __start
.ent __start
_stext:
__start:
.prologue 0
br $27,1f
1: ldgp $29,0($27)
/* We need to get current_task_info loaded up... */
lda $8,init_thread_union
/* ... and find our stack ... */
lda $30,0x4000 - SIZEOF_PT_REGS($8)
/* ... and then we can start the kernel. */
jsr $26,start_kernel
call_pal PAL_halt
.end __start
#ifdef CONFIG_SMP
.align 3
.globl __smp_callin
.ent __smp_callin
/* On entry here from SRM console, the HWPCB of the per-cpu
slot for this processor has been loaded. We've arranged
for the UNIQUE value for this process to contain the PCBB
of the target idle task. */
__smp_callin:
.prologue 1
ldgp $29,0($27) # First order of business, load the GP.
call_pal PAL_rduniq # Grab the target PCBB.
mov $0,$16 # Install it.
call_pal PAL_swpctx
lda $8,0x3fff # Find "current".
bic $30,$8,$8
jsr $26,smp_callin
call_pal PAL_halt
.end __smp_callin
#endif /* CONFIG_SMP */
#
# The following two functions are needed for supporting SRM PALcode
# on the PC164 (at least), since that PALcode manages the interrupt
# masking, and we cannot duplicate the effort without causing problems
#
.align 3
.globl cserve_ena
.ent cserve_ena
cserve_ena:
.prologue 0
bis $16,$16,$17
lda $16,52($31)
call_pal PAL_cserve
ret ($26)
.end cserve_ena
.align 3
.globl cserve_dis
.ent cserve_dis
cserve_dis:
.prologue 0
bis $16,$16,$17
lda $16,53($31)
call_pal PAL_cserve
ret ($26)
.end cserve_dis
#
# It is handy, on occasion, to make halt actually just loop.
# Putting it here means we dont have to recompile the whole
# kernel.
#
.align 3
.globl halt
.ent halt
halt:
.prologue 0
call_pal PAL_halt
.end halt

630
arch/alpha/kernel/io.c Normal file
View file

@ -0,0 +1,630 @@
/*
* Alpha IO and memory functions.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#include <asm/io.h>
/* Out-of-line versions of the i/o routines that redirect into the
platform-specific version. Note that "platform-specific" may mean
"generic", which bumps through the machine vector. */
unsigned int
ioread8(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
unsigned int ioread16(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
unsigned int ioread32(void __iomem *addr)
{
unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
void iowrite8(u8 b, void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
mb();
}
void iowrite16(u16 b, void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
mb();
}
void iowrite32(u32 b, void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
mb();
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite32);
u8 inb(unsigned long port)
{
return ioread8(ioport_map(port, 1));
}
u16 inw(unsigned long port)
{
return ioread16(ioport_map(port, 2));
}
u32 inl(unsigned long port)
{
return ioread32(ioport_map(port, 4));
}
void outb(u8 b, unsigned long port)
{
iowrite8(b, ioport_map(port, 1));
}
void outw(u16 b, unsigned long port)
{
iowrite16(b, ioport_map(port, 2));
}
void outl(u32 b, unsigned long port)
{
iowrite32(b, ioport_map(port, 4));
}
EXPORT_SYMBOL(inb);
EXPORT_SYMBOL(inw);
EXPORT_SYMBOL(inl);
EXPORT_SYMBOL(outb);
EXPORT_SYMBOL(outw);
EXPORT_SYMBOL(outl);
u8 __raw_readb(const volatile void __iomem *addr)
{
return IO_CONCAT(__IO_PREFIX,readb)(addr);
}
u16 __raw_readw(const volatile void __iomem *addr)
{
return IO_CONCAT(__IO_PREFIX,readw)(addr);
}
u32 __raw_readl(const volatile void __iomem *addr)
{
return IO_CONCAT(__IO_PREFIX,readl)(addr);
}
u64 __raw_readq(const volatile void __iomem *addr)
{
return IO_CONCAT(__IO_PREFIX,readq)(addr);
}
void __raw_writeb(u8 b, volatile void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
}
void __raw_writew(u16 b, volatile void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,writew)(b, addr);
}
void __raw_writel(u32 b, volatile void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,writel)(b, addr);
}
void __raw_writeq(u64 b, volatile void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
}
EXPORT_SYMBOL(__raw_readb);
EXPORT_SYMBOL(__raw_readw);
EXPORT_SYMBOL(__raw_readl);
EXPORT_SYMBOL(__raw_readq);
EXPORT_SYMBOL(__raw_writeb);
EXPORT_SYMBOL(__raw_writew);
EXPORT_SYMBOL(__raw_writel);
EXPORT_SYMBOL(__raw_writeq);
u8 readb(const volatile void __iomem *addr)
{
u8 ret = __raw_readb(addr);
mb();
return ret;
}
u16 readw(const volatile void __iomem *addr)
{
u16 ret = __raw_readw(addr);
mb();
return ret;
}
u32 readl(const volatile void __iomem *addr)
{
u32 ret = __raw_readl(addr);
mb();
return ret;
}
u64 readq(const volatile void __iomem *addr)
{
u64 ret = __raw_readq(addr);
mb();
return ret;
}
void writeb(u8 b, volatile void __iomem *addr)
{
__raw_writeb(b, addr);
mb();
}
void writew(u16 b, volatile void __iomem *addr)
{
__raw_writew(b, addr);
mb();
}
void writel(u32 b, volatile void __iomem *addr)
{
__raw_writel(b, addr);
mb();
}
void writeq(u64 b, volatile void __iomem *addr)
{
__raw_writeq(b, addr);
mb();
}
EXPORT_SYMBOL(readb);
EXPORT_SYMBOL(readw);
EXPORT_SYMBOL(readl);
EXPORT_SYMBOL(readq);
EXPORT_SYMBOL(writeb);
EXPORT_SYMBOL(writew);
EXPORT_SYMBOL(writel);
EXPORT_SYMBOL(writeq);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
*/
void ioread8_rep(void __iomem *port, void *dst, unsigned long count)
{
while ((unsigned long)dst & 0x3) {
if (!count)
return;
count--;
*(unsigned char *)dst = ioread8(port);
dst += 1;
}
while (count >= 4) {
unsigned int w;
count -= 4;
w = ioread8(port);
w |= ioread8(port) << 8;
w |= ioread8(port) << 16;
w |= ioread8(port) << 24;
*(unsigned int *)dst = w;
dst += 4;
}
while (count) {
--count;
*(unsigned char *)dst = ioread8(port);
dst += 1;
}
}
void insb(unsigned long port, void *dst, unsigned long count)
{
ioread8_rep(ioport_map(port, 1), dst, count);
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(insb);
/*
* Read COUNT 16-bit words from port PORT into memory starting at
* SRC. SRC must be at least short aligned. This is used by the
* IDE driver to read disk sectors. Performance is important, but
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void ioread16_rep(void __iomem *port, void *dst, unsigned long count)
{
if (unlikely((unsigned long)dst & 0x3)) {
if (!count)
return;
BUG_ON((unsigned long)dst & 0x1);
count--;
*(unsigned short *)dst = ioread16(port);
dst += 2;
}
while (count >= 2) {
unsigned int w;
count -= 2;
w = ioread16(port);
w |= ioread16(port) << 16;
*(unsigned int *)dst = w;
dst += 4;
}
if (count) {
*(unsigned short*)dst = ioread16(port);
}
}
void insw(unsigned long port, void *dst, unsigned long count)
{
ioread16_rep(ioport_map(port, 2), dst, count);
}
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(insw);
/*
* Read COUNT 32-bit words from port PORT into memory starting at
* SRC. Now works with any alignment in SRC. Performance is important,
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void ioread32_rep(void __iomem *port, void *dst, unsigned long count)
{
if (unlikely((unsigned long)dst & 0x3)) {
while (count--) {
struct S { int x __attribute__((packed)); };
((struct S *)dst)->x = ioread32(port);
dst += 4;
}
} else {
/* Buffer 32-bit aligned. */
while (count--) {
*(unsigned int *)dst = ioread32(port);
dst += 4;
}
}
}
void insl(unsigned long port, void *dst, unsigned long count)
{
ioread32_rep(ioport_map(port, 4), dst, count);
}
EXPORT_SYMBOL(ioread32_rep);
EXPORT_SYMBOL(insl);
/*
* Like insb but in the opposite direction.
* Don't worry as much about doing aligned memory transfers:
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count)
{
const unsigned char *src = xsrc;
while (count--)
iowrite8(*src++, port);
}
void outsb(unsigned long port, const void *src, unsigned long count)
{
iowrite8_rep(ioport_map(port, 1), src, count);
}
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(outsb);
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void iowrite16_rep(void __iomem *port, const void *src, unsigned long count)
{
if (unlikely((unsigned long)src & 0x3)) {
if (!count)
return;
BUG_ON((unsigned long)src & 0x1);
iowrite16(*(unsigned short *)src, port);
src += 2;
--count;
}
while (count >= 2) {
unsigned int w;
count -= 2;
w = *(unsigned int *)src;
src += 4;
iowrite16(w >> 0, port);
iowrite16(w >> 16, port);
}
if (count) {
iowrite16(*(unsigned short *)src, port);
}
}
void outsw(unsigned long port, const void *src, unsigned long count)
{
iowrite16_rep(ioport_map(port, 2), src, count);
}
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(outsw);
/*
* Like insl but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Works with any alignment in SRC.
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void iowrite32_rep(void __iomem *port, const void *src, unsigned long count)
{
if (unlikely((unsigned long)src & 0x3)) {
while (count--) {
struct S { int x __attribute__((packed)); };
iowrite32(((struct S *)src)->x, port);
src += 4;
}
} else {
/* Buffer 32-bit aligned. */
while (count--) {
iowrite32(*(unsigned int *)src, port);
src += 4;
}
}
}
void outsl(unsigned long port, const void *src, unsigned long count)
{
iowrite32_rep(ioport_map(port, 4), src, count);
}
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(outsl);
/*
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8;
do {
*(u64 *)to = __raw_readq(from);
count -= 8;
to += 8;
from += 8;
} while (count >= 0);
count += 8;
}
if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4;
do {
*(u32 *)to = __raw_readl(from);
count -= 4;
to += 4;
from += 4;
} while (count >= 0);
count += 4;
}
if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2;
do {
*(u16 *)to = __raw_readw(from);
count -= 2;
to += 2;
from += 2;
} while (count >= 0);
count += 2;
}
while (count > 0) {
*(u8 *) to = __raw_readb(from);
count--;
to++;
from++;
}
mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, long count)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
/* FIXME -- align FROM. */
if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8;
do {
__raw_writeq(*(const u64 *)from, to);
count -= 8;
to += 8;
from += 8;
} while (count >= 0);
count += 8;
}
if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4;
do {
__raw_writel(*(const u32 *)from, to);
count -= 4;
to += 4;
from += 4;
} while (count >= 0);
count += 4;
}
if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2;
do {
__raw_writew(*(const u16 *)from, to);
count -= 2;
to += 2;
from += 2;
} while (count >= 0);
count += 2;
}
while (count > 0) {
__raw_writeb(*(const u8 *) from, to);
count--;
to++;
from++;
}
mb();
}
EXPORT_SYMBOL(memcpy_toio);
/*
* "memset" on IO memory space.
*/
void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
{
/* Handle any initial odd byte */
if (count > 0 && ((u64)to & 1)) {
__raw_writeb(c, to);
to++;
count--;
}
/* Handle any initial odd halfword */
if (count >= 2 && ((u64)to & 2)) {
__raw_writew(c, to);
to += 2;
count -= 2;
}
/* Handle any initial odd word */
if (count >= 4 && ((u64)to & 4)) {
__raw_writel(c, to);
to += 4;
count -= 4;
}
/* Handle all full-sized quadwords: we're aligned
(or have a small count) */
count -= 8;
if (count >= 0) {
do {
__raw_writeq(c, to);
to += 8;
count -= 8;
} while (count >= 0);
}
count += 8;
/* The tail is word-aligned if we still have count >= 4 */
if (count >= 4) {
__raw_writel(c, to);
to += 4;
count -= 4;
}
/* The tail is half-word aligned if we have count >= 2 */
if (count >= 2) {
__raw_writew(c, to);
to += 2;
count -= 2;
}
/* And finally, one last byte.. */
if (count) {
__raw_writeb(c, to);
}
mb();
}
EXPORT_SYMBOL(_memset_c_io);
/* A version of memcpy used by the vga console routines to move data around
arbitrarily between screen and main memory. */
void
scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
{
const u16 __iomem *ios = (const u16 __iomem *) s;
u16 __iomem *iod = (u16 __iomem *) d;
int s_isio = __is_ioaddr(s);
int d_isio = __is_ioaddr(d);
if (s_isio) {
if (d_isio) {
/* FIXME: Should handle unaligned ops and
operation widening. */
count /= 2;
while (count--) {
u16 tmp = __raw_readw(ios++);
__raw_writew(tmp, iod++);
}
}
else
memcpy_fromio(d, ios, count);
} else {
if (d_isio)
memcpy_toio(iod, s, count);
else
memcpy(d, s, count);
}
}
EXPORT_SYMBOL(scr_memcpyw);
void __iomem *ioport_map(unsigned long port, unsigned int size)
{
return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
}
void ioport_unmap(void __iomem *addr)
{
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);

123
arch/alpha/kernel/irq.c Normal file
View file

@ -0,0 +1,123 @@
/*
* linux/arch/alpha/kernel/irq.c
*
* Copyright (C) 1995 Linus Torvalds
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/uaccess.h>
volatile unsigned long irq_err_count;
DEFINE_PER_CPU(unsigned long, irq_pmi_count);
void ack_bad_irq(unsigned int irq)
{
irq_err_count++;
printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
}
#ifdef CONFIG_SMP
static char irq_user_affinity[NR_IRQS];
int irq_select_affinity(unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip;
static int last_cpu;
int cpu = last_cpu + 1;
if (!data)
return 1;
chip = irq_data_get_irq_chip(data);
if (!chip->irq_set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu) ||
!cpumask_test_cpu(cpu, irq_default_affinity))
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
cpumask_copy(data->affinity, cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0;
}
#endif /* CONFIG_SMP */
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
for_each_online_cpu(j)
seq_printf(p, "%10lu ", cpu_data[j].ipi_count);
seq_putc(p, '\n');
#endif
seq_puts(p, "PMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
seq_puts(p, " Performance Monitoring\n");
seq_printf(p, "ERR: %10lu\n", irq_err_count);
return 0;
}
/*
* handle_irq handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
#define MAX_ILLEGAL_IRQS 16
void
handle_irq(int irq)
{
/*
* We ack quickly, we don't want the irq controller
* thinking we're snobs just because some other CPU has
* disabled global interrupts (we have already done the
* INT_ACK cycles, it's too late to try to pretend to the
* controller that we aren't taking the interrupt).
*
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
static unsigned int illegal_count=0;
struct irq_desc *desc = irq_to_desc(irq);
if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
illegal_count < MAX_ILLEGAL_IRQS)) {
irq_err_count++;
illegal_count++;
printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
irq);
return;
}
irq_enter();
generic_handle_irq_desc(irq, desc);
irq_exit();
}

View file

@ -0,0 +1,243 @@
/*
* Alpha specific irq code.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <asm/machvec.h>
#include <asm/dma.h>
#include <asm/perf_event.h>
#include <asm/mce.h>
#include "proto.h"
#include "irq_impl.h"
/* Hack minimum IPL during interrupt processing for broken hardware. */
#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
int __min_ipl;
EXPORT_SYMBOL(__min_ipl);
#endif
/*
* Performance counter hook. A module can override this to
* do something useful.
*/
static void
dummy_perf(unsigned long vector, struct pt_regs *regs)
{
irq_err_count++;
printk(KERN_CRIT "Performance counter interrupt!\n");
}
void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
EXPORT_SYMBOL(perf_irq);
/*
* The main interrupt entry point.
*/
asmlinkage void
do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
struct pt_regs *old_regs;
/*
* Disable interrupts during IRQ handling.
* Note that there is no matching local_irq_enable() due to
* severe problems with RTI at IPL0 and some MILO PALcode
* (namely LX164).
*/
local_irq_disable();
switch (type) {
case 0:
#ifdef CONFIG_SMP
handle_ipi(regs);
return;
#else
irq_err_count++;
printk(KERN_CRIT "Interprocessor interrupt? "
"You must be kidding!\n");
#endif
break;
case 1:
old_regs = set_irq_regs(regs);
handle_irq(RTC_IRQ);
set_irq_regs(old_regs);
return;
case 2:
old_regs = set_irq_regs(regs);
alpha_mv.machine_check(vector, la_ptr);
set_irq_regs(old_regs);
return;
case 3:
old_regs = set_irq_regs(regs);
alpha_mv.device_interrupt(vector);
set_irq_regs(old_regs);
return;
case 4:
perf_irq(la_ptr, regs);
return;
default:
printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
type, vector);
}
printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
}
void __init
common_init_isa_dma(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(0, DMA1_CLR_MASK_REG);
outb(0, DMA2_CLR_MASK_REG);
}
void __init
init_IRQ(void)
{
/* Just in case the platform init_irq() causes interrupts/mchecks
(as is the case with RAWHIDE, at least). */
wrent(entInt, 0);
alpha_mv.init_irq();
}
/*
* machine error checks
*/
#define MCHK_K_TPERR 0x0080
#define MCHK_K_TCPERR 0x0082
#define MCHK_K_HERR 0x0084
#define MCHK_K_ECC_C 0x0086
#define MCHK_K_ECC_NC 0x0088
#define MCHK_K_OS_BUGCHECK 0x008A
#define MCHK_K_PAL_BUGCHECK 0x0090
#ifndef CONFIG_SMP
struct mcheck_info __mcheck_info;
#endif
void
process_mcheck_info(unsigned long vector, unsigned long la_ptr,
const char *machine, int expected)
{
struct el_common *mchk_header;
const char *reason;
/*
* See if the machine check is due to a badaddr() and if so,
* ignore it.
*/
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
printk(KERN_CRIT "%s machine check %s\n", machine,
expected ? "expected." : "NOT expected!!!");
}
#endif
if (expected) {
int cpu = smp_processor_id();
mcheck_expected(cpu) = 0;
mcheck_taken(cpu) = 1;
return;
}
mchk_header = (struct el_common *)la_ptr;
printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n",
machine, vector, get_irq_regs()->pc, mchk_header->code);
switch (mchk_header->code) {
/* Machine check reasons. Defined according to PALcode sources. */
case 0x80: reason = "tag parity error"; break;
case 0x82: reason = "tag control parity error"; break;
case 0x84: reason = "generic hard error"; break;
case 0x86: reason = "correctable ECC error"; break;
case 0x88: reason = "uncorrectable ECC error"; break;
case 0x8A: reason = "OS-specific PAL bugcheck"; break;
case 0x90: reason = "callsys in kernel mode"; break;
case 0x96: reason = "i-cache read retryable error"; break;
case 0x98: reason = "processor detected hard error"; break;
/* System specific (these are for Alcor, at least): */
case 0x202: reason = "system detected hard error"; break;
case 0x203: reason = "system detected uncorrectable ECC error"; break;
case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
case 0x205: reason = "parity error detected by core logic"; break;
case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
case 0x207: reason = "non-existent memory error"; break;
case 0x208: reason = "MCHK_K_DCSR"; break;
case 0x209: reason = "PCI SERR detected"; break;
case 0x20b: reason = "PCI data parity error detected"; break;
case 0x20d: reason = "PCI address parity error detected"; break;
case 0x20f: reason = "PCI master abort error"; break;
case 0x211: reason = "PCI target abort error"; break;
case 0x213: reason = "scatter/gather PTE invalid error"; break;
case 0x215: reason = "flash ROM write error"; break;
case 0x217: reason = "IOA timeout detected"; break;
case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
case 0x21b: reason = "EISA fail-safe timer timeout"; break;
case 0x21d: reason = "EISA bus time-out"; break;
case 0x21f: reason = "EISA software generated NMI"; break;
case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
default: reason = "unknown"; break;
}
printk(KERN_CRIT "machine check type: %s%s\n",
reason, mchk_header->retry ? " (retryable)" : "");
dik_show_regs(get_irq_regs(), NULL);
#ifdef CONFIG_VERBOSE_MCHECK
if (alpha_verbose_mcheck > 1) {
/* Dump the logout area to give all info. */
unsigned long *ptr = (unsigned long *)la_ptr;
long i;
for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
printk(KERN_CRIT " +%8lx %016lx %016lx\n",
i*sizeof(long), ptr[i], ptr[i+1]);
}
}
#endif /* CONFIG_VERBOSE_MCHECK */
}
/*
* The special RTC interrupt type. The interrupt itself was
* processed by PALcode, and comes in via entInt vector 1.
*/
struct irqaction timer_irqaction = {
.handler = rtc_timer_interrupt,
.name = "timer",
};
void __init
init_rtc_irq(void)
{
irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
handle_percpu_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
/* Dummy irqactions. */
struct irqaction isa_cascade_irqaction = {
.handler = no_action,
.name = "isa-cascade"
};
struct irqaction timer_cascade_irqaction = {
.handler = no_action,
.name = "timer-cascade"
};
struct irqaction halt_switch_irqaction = {
.handler = no_action,
.name = "halt-switch"
};

View file

@ -0,0 +1,166 @@
/*
* linux/arch/alpha/kernel/irq_i8259.c
*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
*
* Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c.
*/
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include "proto.h"
#include "irq_impl.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned int cached_irq_mask = 0xffff;
static DEFINE_SPINLOCK(i8259_irq_lock);
static inline void
i8259_update_irq_hw(unsigned int irq, unsigned long mask)
{
int port = 0x21;
if (irq & 8) mask >>= 8;
if (irq & 8) port = 0xA1;
outb(mask, port);
}
inline void
i8259a_enable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
spin_unlock(&i8259_irq_lock);
}
static inline void
__i8259a_disable_irq(unsigned int irq)
{
i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
}
void
i8259a_disable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(d->irq);
spin_unlock(&i8259_irq_lock);
}
void
i8259a_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
/* Ack the interrupt making it the lowest priority. */
if (irq >= 8) {
outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */
irq = 2;
}
outb(0xE0 | irq, 0x20); /* ack the master */
spin_unlock(&i8259_irq_lock);
}
struct irq_chip i8259a_irq_type = {
.name = "XT-PIC",
.irq_unmask = i8259a_enable_irq,
.irq_mask = i8259a_disable_irq,
.irq_mask_ack = i8259a_mask_and_ack_irq,
};
void __init
init_i8259a_irqs(void)
{
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
long i;
outb(0xff, 0x21); /* mask all of 8259A-1 */
outb(0xff, 0xA1); /* mask all of 8259A-2 */
for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
}
setup_irq(2, &cascade);
}
#if defined(CONFIG_ALPHA_GENERIC)
# define IACK_SC alpha_mv.iack_sc
#elif defined(CONFIG_ALPHA_APECS)
# define IACK_SC APECS_IACK_SC
#elif defined(CONFIG_ALPHA_LCA)
# define IACK_SC LCA_IACK_SC
#elif defined(CONFIG_ALPHA_CIA)
# define IACK_SC CIA_IACK_SC
#elif defined(CONFIG_ALPHA_PYXIS)
# define IACK_SC PYXIS_IACK_SC
#elif defined(CONFIG_ALPHA_TITAN)
# define IACK_SC TITAN_IACK_SC
#elif defined(CONFIG_ALPHA_TSUNAMI)
# define IACK_SC TSUNAMI_IACK_SC
#elif defined(CONFIG_ALPHA_IRONGATE)
# define IACK_SC IRONGATE_IACK_SC
#endif
/* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since
sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */
#if defined(IACK_SC)
void
isa_device_interrupt(unsigned long vector)
{
/*
* Generate a PCI interrupt acknowledge cycle. The PIC will
* respond with the interrupt vector of the highest priority
* interrupt that is pending. The PALcode sets up the
* interrupts vectors such that irq level L generates vector L.
*/
int j = *(vuip) IACK_SC;
j &= 0xff;
handle_irq(j);
}
#endif
#if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC)
void
isa_no_iack_sc_device_interrupt(unsigned long vector)
{
unsigned long pic;
/*
* It seems to me that the probability of two or more *device*
* interrupts occurring at almost exactly the same time is
* pretty low. So why pay the price of checking for
* additional interrupts here if the common case can be
* handled so much easier?
*/
/*
* The first read of gives you *all* interrupting lines.
* Therefore, read the mask register and and out those lines
* not enabled. Note that some documentation has 21 and a1
* write only. This is not true.
*/
pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
pic &= 0xFFFB; /* mask out cascade & hibits */
while (pic) {
int j = ffz(~pic);
pic &= pic - 1;
handle_irq(j);
}
}
#endif

View file

@ -0,0 +1,40 @@
/*
* linux/arch/alpha/kernel/irq_impl.h
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1998, 2000 Richard Henderson
*
* This file contains declarations and inline functions for interfacing
* with the IRQ handling routines in irq.c.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/profile.h>
#define RTC_IRQ 8
extern void isa_device_interrupt(unsigned long);
extern void isa_no_iack_sc_device_interrupt(unsigned long);
extern void srm_device_interrupt(unsigned long);
extern void pyxis_device_interrupt(unsigned long);
extern struct irqaction timer_irqaction;
extern struct irqaction isa_cascade_irqaction;
extern struct irqaction timer_cascade_irqaction;
extern struct irqaction halt_switch_irqaction;
extern void init_srm_irqs(long, unsigned long);
extern void init_pyxis_irqs(unsigned long);
extern void init_rtc_irq(void);
extern void common_init_isa_dma(void);
extern void i8259a_enable_irq(struct irq_data *d);
extern void i8259a_disable_irq(struct irq_data *d);
extern void i8259a_mask_and_ack_irq(struct irq_data *d);
extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void);
extern void handle_irq(int irq);

View file

@ -0,0 +1,110 @@
/*
* linux/arch/alpha/kernel/irq_pyxis.c
*
* Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
*
* IRQ Code common to all PYXIS core logic chips.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/core_cia.h>
#include "proto.h"
#include "irq_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
static inline void
pyxis_update_irq_hw(unsigned long mask)
{
*(vulp)PYXIS_INT_MASK = mask;
mb();
*(vulp)PYXIS_INT_MASK;
}
static inline void
pyxis_enable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
pyxis_disable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
pyxis_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit = 1UL << (d->irq - 16);
unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */
*(vulp)PYXIS_INT_MASK = mask;
wmb();
/* Ack PYXIS PCI interrupt. */
*(vulp)PYXIS_INT_REQ = bit;
mb();
/* Re-read to force both writes. */
*(vulp)PYXIS_INT_MASK;
}
static struct irq_chip pyxis_irq_type = {
.name = "PYXIS",
.irq_mask_ack = pyxis_mask_and_ack_irq,
.irq_mask = pyxis_disable_irq,
.irq_unmask = pyxis_enable_irq,
};
void
pyxis_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of PYXIS */
pld = *(vulp)PYXIS_INT_REQ;
pld &= cached_irq_mask;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 7)
isa_device_interrupt(vector);
else
handle_irq(16+i);
}
}
void __init
init_pyxis_irqs(unsigned long ignore_mask)
{
long i;
*(vulp)PYXIS_INT_MASK = 0; /* disable all */
*(vulp)PYXIS_INT_REQ = -1; /* flush all */
mb();
/* Send -INTA pulses to clear any pending interrupts ...*/
*(vuip) CIA_IACK_SC;
for (i = 16; i < 48; ++i) {
if ((ignore_mask >> i) & 1)
continue;
irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
setup_irq(16+7, &isa_cascade_irqaction);
}

View file

@ -0,0 +1,64 @@
/*
* Handle interrupts from the SRM, assuming no additional weirdness.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include "proto.h"
#include "irq_impl.h"
/*
* Is the palcode SMP safe? In other words: can we call cserve_ena/dis
* at the same time in multiple CPUs? To be safe I added a spinlock
* but it can be removed trivially if the palcode is robust against smp.
*/
DEFINE_SPINLOCK(srm_irq_lock);
static inline void
srm_enable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_ena(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
static void
srm_disable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_dis(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = {
.name = "SRM",
.irq_unmask = srm_enable_irq,
.irq_mask = srm_disable_irq,
.irq_mask_ack = srm_disable_irq,
};
void __init
init_srm_irqs(long max, unsigned long ignore_mask)
{
long i;
if (NR_IRQS <= 16)
return;
for (i = 16; i < max; ++i) {
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
void
srm_device_interrupt(unsigned long vector)
{
int irq = (vector - 0x800) >> 4;
handle_irq(irq);
}

View file

@ -0,0 +1,152 @@
/*
* linux/arch/alpha/kernel/machvec_impl.h
*
* Copyright (C) 1997, 1998 Richard Henderson
*
* This file has goodies to help simplify instantiation of machine vectors.
*/
#include <asm/pgalloc.h>
/* Whee. These systems don't have an HAE:
IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE
Fix things up for the GENERIC kernel by defining the HAE address
to be that of the cache. Now we can read and write it as we like. ;-) */
#define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache)
#define MARVEL_HAE_ADDRESS (&alpha_mv.hae_cache)
#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache)
#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
#define TITAN_HAE_ADDRESS (&alpha_mv.hae_cache)
#define WILDFIRE_HAE_ADDRESS (&alpha_mv.hae_cache)
#ifdef CIA_ONE_HAE_WINDOW
#define CIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
#ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
#ifdef T2_ONE_HAE_WINDOW
#define T2_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
/* Only a few systems don't define IACK_SC, handling all interrupts through
the SRM console. But splitting out that one case from IO() below
seems like such a pain. Define this to get things to compile. */
#define JENSEN_IACK_SC 1
#define T2_IACK_SC 1
#define WILDFIRE_IACK_SC 1 /* FIXME */
/*
* Some helpful macros for filling in the blanks.
*/
#define CAT1(x,y) x##y
#define CAT(x,y) CAT1(x,y)
#define DO_DEFAULT_RTC .rtc_port = 0x70
#define DO_EV4_MMU \
.max_asn = EV4_MAX_ASN, \
.mv_switch_mm = ev4_switch_mm, \
.mv_activate_mm = ev4_activate_mm, \
.mv_flush_tlb_current = ev4_flush_tlb_current, \
.mv_flush_tlb_current_page = ev4_flush_tlb_current_page
#define DO_EV5_MMU \
.max_asn = EV5_MAX_ASN, \
.mv_switch_mm = ev5_switch_mm, \
.mv_activate_mm = ev5_activate_mm, \
.mv_flush_tlb_current = ev5_flush_tlb_current, \
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
#define DO_EV6_MMU \
.max_asn = EV6_MAX_ASN, \
.mv_switch_mm = ev5_switch_mm, \
.mv_activate_mm = ev5_activate_mm, \
.mv_flush_tlb_current = ev5_flush_tlb_current, \
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
#define DO_EV7_MMU \
.max_asn = EV6_MAX_ASN, \
.mv_switch_mm = ev5_switch_mm, \
.mv_activate_mm = ev5_activate_mm, \
.mv_flush_tlb_current = ev5_flush_tlb_current, \
.mv_flush_tlb_current_page = ev5_flush_tlb_current_page
#define IO_LITE(UP,low) \
.hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
.iack_sc = CAT(UP,_IACK_SC), \
.mv_ioread8 = CAT(low,_ioread8), \
.mv_ioread16 = CAT(low,_ioread16), \
.mv_ioread32 = CAT(low,_ioread32), \
.mv_iowrite8 = CAT(low,_iowrite8), \
.mv_iowrite16 = CAT(low,_iowrite16), \
.mv_iowrite32 = CAT(low,_iowrite32), \
.mv_readb = CAT(low,_readb), \
.mv_readw = CAT(low,_readw), \
.mv_readl = CAT(low,_readl), \
.mv_readq = CAT(low,_readq), \
.mv_writeb = CAT(low,_writeb), \
.mv_writew = CAT(low,_writew), \
.mv_writel = CAT(low,_writel), \
.mv_writeq = CAT(low,_writeq), \
.mv_ioportmap = CAT(low,_ioportmap), \
.mv_ioremap = CAT(low,_ioremap), \
.mv_iounmap = CAT(low,_iounmap), \
.mv_is_ioaddr = CAT(low,_is_ioaddr), \
.mv_is_mmio = CAT(low,_is_mmio) \
#define IO(UP,low) \
IO_LITE(UP,low), \
.pci_ops = &CAT(low,_pci_ops), \
.mv_pci_tbi = CAT(low,_pci_tbi)
#define DO_APECS_IO IO(APECS,apecs)
#define DO_CIA_IO IO(CIA,cia)
#define DO_IRONGATE_IO IO(IRONGATE,irongate)
#define DO_LCA_IO IO(LCA,lca)
#define DO_MARVEL_IO IO(MARVEL,marvel)
#define DO_MCPCIA_IO IO(MCPCIA,mcpcia)
#define DO_POLARIS_IO IO(POLARIS,polaris)
#define DO_T2_IO IO(T2,t2)
#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami)
#define DO_TITAN_IO IO(TITAN,titan)
#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire)
#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \
.pci_ops = &cia_pci_ops, \
.mv_pci_tbi = cia_pci_tbi
/*
* In a GENERIC kernel, we have lots of these vectors floating about,
* all but one of which we want to go away. In a non-GENERIC kernel,
* we want only one, ever.
*
* Accomplish this in the GENERIC kernel by putting all of the vectors
* in the .init.data section where they'll go away. We'll copy the
* one we want to the real alpha_mv vector in setup_arch.
*
* Accomplish this in a non-GENERIC kernel by ifdef'ing out all but
* one of the vectors, which will not reside in .init.data. We then
* alias this one vector to alpha_mv, so no copy is needed.
*
* Upshot: set __initdata to nothing for non-GENERIC kernels.
*/
#ifdef CONFIG_ALPHA_GENERIC
#define __initmv __initdata
#define ALIAS_MV(x)
#else
#define __initmv __initdata_refok
/* GCC actually has a syntax for defining aliases, but is under some
delusion that you shouldn't be able to declare it extern somewhere
else beforehand. Fine. We'll do it ourselves. */
#if 0
#define ALIAS_MV(system) \
struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
#else
#define ALIAS_MV(system) \
asm(".global alpha_mv\nalpha_mv = " #system "_mv");
#endif
#endif /* GENERIC */

282
arch/alpha/kernel/module.c Normal file
View file

@ -0,0 +1,282 @@
/* Kernel module help for Alpha.
Copyright (C) 2002 Richard Henderson.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
/* Allocate the GOT at the end of the core sections. */
struct got_entry {
struct got_entry *next;
Elf64_Sxword r_addend;
int got_offset;
};
static inline void
process_reloc_for_got(Elf64_Rela *rela,
struct got_entry *chains, Elf64_Xword *poffset)
{
unsigned long r_sym = ELF64_R_SYM (rela->r_info);
unsigned long r_type = ELF64_R_TYPE (rela->r_info);
Elf64_Sxword r_addend = rela->r_addend;
struct got_entry *g;
if (r_type != R_ALPHA_LITERAL)
return;
for (g = chains + r_sym; g ; g = g->next)
if (g->r_addend == r_addend) {
if (g->got_offset == 0) {
g->got_offset = *poffset;
*poffset += 8;
}
goto found_entry;
}
g = kmalloc (sizeof (*g), GFP_KERNEL);
g->next = chains[r_sym].next;
g->r_addend = r_addend;
g->got_offset = *poffset;
*poffset += 8;
chains[r_sym].next = g;
found_entry:
/* Trick: most of the ELF64_R_TYPE field is unused. There are
42 valid relocation types, and a 32-bit field. Co-opt the
bits above 256 to store the got offset for this reloc. */
rela->r_info |= g->got_offset << 8;
}
int
module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
char *secstrings, struct module *me)
{
struct got_entry *chains;
Elf64_Rela *rela;
Elf64_Shdr *esechdrs, *symtab, *s, *got;
unsigned long nsyms, nrela, i;
esechdrs = sechdrs + hdr->e_shnum;
symtab = got = NULL;
/* Find out how large the symbol table is. Allocate one got_entry
head per symbol. Normally this will be enough, but not always.
We'll chain different offsets for the symbol down each head. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_SYMTAB)
symtab = s;
else if (!strcmp(".got", secstrings + s->sh_name)) {
got = s;
me->arch.gotsecindex = s - sechdrs;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", me->name);
return -ENOEXEC;
}
if (!got) {
printk(KERN_ERR "module %s: no got section\n", me->name);
return -ENOEXEC;
}
nsyms = symtab->sh_size / sizeof(Elf64_Sym);
chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
if (!chains) {
printk(KERN_ERR
"module %s: no memory for symbol chain buffer\n",
me->name);
return -ENOMEM;
}
got->sh_size = 0;
got->sh_addralign = 8;
got->sh_type = SHT_NOBITS;
/* Examine all LITERAL relocations to find out what GOT entries
are required. This sizes the GOT section as well. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_RELA) {
nrela = s->sh_size / sizeof(Elf64_Rela);
rela = (void *)hdr + s->sh_offset;
for (i = 0; i < nrela; ++i)
process_reloc_for_got(rela+i, chains,
&got->sh_size);
}
/* Free the memory we allocated. */
for (i = 0; i < nsyms; ++i) {
struct got_entry *g, *n;
for (g = chains[i].next; g ; g = n) {
n = g->next;
kfree(g);
}
}
kfree(chains);
return 0;
}
int
apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela);
Elf64_Sym *symtab, *sym;
void *base, *location;
unsigned long got, gp;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
gp = (u64)me->module_core + me->core_size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr;
for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
unsigned long r_type = ELF64_R_TYPE (rela[i].r_info);
unsigned long r_got_offset = r_type >> 8;
unsigned long value, hi, lo;
r_type &= 0xff;
/* This is where to make the change. */
location = base + rela[i].r_offset;
/* This is the symbol it is referring to. Note that all
unresolved symbols have been resolved. */
sym = symtab + r_sym;
value = sym->st_value + rela[i].r_addend;
switch (r_type) {
case R_ALPHA_NONE:
break;
case R_ALPHA_REFQUAD:
/* BUG() can produce misaligned relocations. */
((u32 *)location)[0] = value;
((u32 *)location)[1] = value >> 32;
break;
case R_ALPHA_GPREL32:
value -= gp;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_LITERAL:
hi = got + r_got_offset;
lo = hi - gp;
if ((short)lo != lo)
goto reloc_overflow;
*(u16 *)location = lo;
*(u64 *)hi = value;
break;
case R_ALPHA_LITUSE:
break;
case R_ALPHA_GPDISP:
value = gp - (u64)location;
lo = (short)value;
hi = (int)(value - lo);
if (hi + lo != value)
goto reloc_overflow;
*(u16 *)location = hi >> 16;
*(u16 *)(location + rela[i].r_addend) = lo;
break;
case R_ALPHA_BRSGP:
/* BRSGP is only allowed to bind to local symbols.
If the section is undef, this means that the
value was resolved from somewhere else. */
if (sym->st_shndx == SHN_UNDEF)
goto reloc_overflow;
if ((sym->st_other & STO_ALPHA_STD_GPLOAD) ==
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
/* FALLTHRU */
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
goto reloc_overflow;
value = (long)value >> 2;
if (value + (1<<21) >= 1<<22)
goto reloc_overflow;
value &= 0x1fffff;
value |= *(u32 *)location & ~0x1fffff;
*(u32 *)location = value;
break;
case R_ALPHA_HINT:
break;
case R_ALPHA_SREL32:
value -= (u64)location;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_SREL64:
value -= (u64)location;
*(u64 *)location = value;
break;
case R_ALPHA_GPRELHIGH:
value = (long)(value - gp + 0x8000) >> 16;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
case R_ALPHA_GPRELLOW:
value -= gp;
*(u16 *)location = value;
break;
case R_ALPHA_GPREL16:
value -= gp;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %lu\n",
me->name, r_type);
return -ENOEXEC;
reloc_overflow:
if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs section %d\n",
me->name, r_type, sym->st_shndx);
else
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs %s\n",
me->name, r_type, strtab + sym->st_name);
return -ENOEXEC;
}
}
return 0;
}

1445
arch/alpha/kernel/osf_sys.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,88 @@
#include <linux/ioport.h>
#include <asm/io.h>
#include "pc873xx.h"
static unsigned pc873xx_probelist[] = {0x398, 0x26e, 0};
static char *pc873xx_names[] = {
"PC87303", "PC87306", "PC87312", "PC87332", "PC87334"
};
static unsigned int base, model;
unsigned int __init pc873xx_get_base()
{
return base;
}
char *__init pc873xx_get_model()
{
return pc873xx_names[model];
}
static unsigned char __init pc873xx_read(unsigned int base, int reg)
{
outb(reg, base);
return inb(base + 1);
}
static void __init pc873xx_write(unsigned int base, int reg, unsigned char data)
{
unsigned long flags;
local_irq_save(flags);
outb(reg, base);
outb(data, base + 1);
outb(data, base + 1); /* Must be written twice */
local_irq_restore(flags);
}
int __init pc873xx_probe(void)
{
int val, index = 0;
while ((base = pc873xx_probelist[index++])) {
if (request_region(base, 2, "Super IO PC873xx") == NULL)
continue;
val = pc873xx_read(base, REG_SID);
if ((val & 0xf0) == 0x10) {
model = PC87332;
break;
} else if ((val & 0xf8) == 0x70) {
model = PC87306;
break;
} else if ((val & 0xf8) == 0x50) {
model = PC87334;
break;
} else if ((val & 0xf8) == 0x40) {
model = PC87303;
break;
}
release_region(base, 2);
}
return (base == 0) ? -1 : 1;
}
void __init pc873xx_enable_epp19(void)
{
unsigned char data;
printk(KERN_INFO "PC873xx enabling EPP v1.9\n");
data = pc873xx_read(base, REG_PCR);
pc873xx_write(base, REG_PCR, (data & 0xFC) | 0x02);
}
void __init pc873xx_enable_ide(void)
{
unsigned char data;
printk(KERN_INFO "PC873xx enabling IDE interrupt\n");
data = pc873xx_read(base, REG_FER);
pc873xx_write(base, REG_FER, data | 0x40);
}

View file

@ -0,0 +1,35 @@
#ifndef _PC873xx_H_
#define _PC873xx_H_
/*
* Control Register Values
*/
#define REG_FER 0x00
#define REG_FAR 0x01
#define REG_PTR 0x02
#define REG_FCR 0x03
#define REG_PCR 0x04
#define REG_KRR 0x05
#define REG_PMC 0x06
#define REG_TUP 0x07
#define REG_SID 0x08
#define REG_ASC 0x09
#define REG_IRC 0x0e
/*
* Model numbers
*/
#define PC87303 0
#define PC87306 1
#define PC87312 2
#define PC87332 3
#define PC87334 4
int pc873xx_probe(void);
unsigned int pc873xx_get_base(void);
char *pc873xx_get_model(void);
void pc873xx_enable_epp19(void);
void pc873xx_enable_ide(void);
#endif

View file

@ -0,0 +1,189 @@
/*
* linux/arch/alpha/kernel/pci-noop.c
*
* Stub PCI interfaces for Jensen-specific kernels.
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include "proto.h"
/*
* The PCI controller list.
*/
struct pci_controller *hose_head, **hose_tail = &hose_head;
struct pci_controller *pci_isa_hose;
struct pci_controller * __init
alloc_pci_controller(void)
{
struct pci_controller *hose;
hose = alloc_bootmem(sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
return hose;
}
struct resource * __init
alloc_resource(void)
{
struct resource *res;
res = alloc_bootmem(sizeof(*res));
return res;
}
asmlinkage long
sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
{
struct pci_controller *hose;
/* from hose or from bus.devfn */
if (which & IOBASE_FROM_HOSE) {
for (hose = hose_head; hose; hose = hose->next)
if (hose->index == bus)
break;
if (!hose)
return -ENODEV;
} else {
/* Special hook for ISA access. */
if (bus == 0 && dfn == 0)
hose = pci_isa_hose;
else
return -ENODEV;
}
switch (which & ~IOBASE_FROM_HOSE) {
case IOBASE_HOSE:
return hose->index;
case IOBASE_SPARSE_MEM:
return hose->sparse_mem_base;
case IOBASE_DENSE_MEM:
return hose->dense_mem_base;
case IOBASE_SPARSE_IO:
return hose->sparse_io_base;
case IOBASE_DENSE_IO:
return hose->dense_io_base;
case IOBASE_ROOT_BUS:
return hose->bus->number;
}
return -EOPNOTSUPP;
}
asmlinkage long
sys_pciconfig_read(unsigned long bus, unsigned long dfn,
unsigned long off, unsigned long len, void *buf)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
else
return -ENODEV;
}
asmlinkage long
sys_pciconfig_write(unsigned long bus, unsigned long dfn,
unsigned long off, unsigned long len, void *buf)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
else
return -ENODEV;
}
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
void *ret;
if (!dev || *dev->dma_mask >= 0xffffffffUL)
gfp &= ~GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret) {
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret);
}
return ret;
}
static void alpha_noop_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return page_to_pa(page) + offset;
}
static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
void *va;
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
sg_dma_len(sg) = sg->length;
}
return nents;
}
static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static int alpha_noop_supported(struct device *dev, u64 mask)
{
return mask < 0x00ffffffUL ? 0 : 1;
}
static int alpha_noop_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent,
.free = alpha_noop_free_coherent,
.map_page = alpha_noop_map_page,
.map_sg = alpha_noop_map_sg,
.mapping_error = alpha_noop_mapping_error,
.dma_supported = alpha_noop_supported,
.set_dma_mask = alpha_noop_set_mask,
};
struct dma_map_ops *dma_ops = &alpha_noop_ops;
EXPORT_SYMBOL(dma_ops);

View file

@ -0,0 +1,368 @@
/*
* arch/alpha/kernel/pci-sysfs.c
*
* Copyright (C) 2009 Ivan Kokshaysky
*
* Alpha PCI resource files.
*
* Loosely based on generic HAVE_PCI_MMAP implementation in
* drivers/pci/pci-sysfs.c
*/
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pci.h>
static int hose_mmap_page_range(struct pci_controller *hose,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_type, int sparse)
{
unsigned long base;
if (mmap_type == pci_mmap_mem)
base = sparse ? hose->sparse_mem_base : hose->dense_mem_base;
else
base = sparse ? hose->sparse_io_base : hose->dense_io_base;
vma->vm_pgoff += base >> PAGE_SHIFT;
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static int __pci_mmap_fits(struct pci_dev *pdev, int num,
struct vm_area_struct *vma, int sparse)
{
unsigned long nr, start, size;
int shift = sparse ? 5 : 0;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1;
if (start < size && size - start >= nr)
return 1;
WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d "
"(size 0x%08lx)\n",
current->comm, sparse ? " sparse" : "", start, start + nr,
pci_name(pdev), num, size);
return 0;
}
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
* @attr: struct bin_attribute for the file being mapped
* @vma: struct vm_area_struct passed into the mmap
* @sparse: address space type
*
* Use the bus mapping routines to map a PCI resource into userspace.
*/
static int pci_mmap_resource(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
if (!__pci_mmap_fits(pdev, i, vma, sparse))
return -EINVAL;
if (iomem_is_exclusive(res->start))
return -EINVAL;
pcibios_resource_to_bus(pdev->bus, &bar, res);
vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
}
static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
/**
* pci_remove_resource_files - cleanup resource files
* @dev: dev to cleanup
*
* If we created resource files for @dev, remove them from sysfs and
* free their resources.
*/
void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
if (res_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr);
}
res_attr = pdev->res_attr_wc[i];
if (res_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr);
}
}
}
static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num)
{
struct pci_bus_region bar;
struct pci_controller *hose = pdev->sysdata;
long dense_offset;
unsigned long sparse_size;
pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]);
/* All core logic chips have 4G sparse address space, except
CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM
definitions in asm/core_xxx.h files). This corresponds
to 128M or 512M of the bus space. */
dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base);
sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000;
return bar.end < sparse_size;
}
static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name,
char *suffix, struct bin_attribute *res_attr,
unsigned long sparse)
{
size_t size = pci_resource_len(pdev, num);
sprintf(name, "resource%d%s", num, suffix);
res_attr->mmap = sparse ? pci_mmap_resource_sparse :
pci_mmap_resource_dense;
res_attr->attr.name = name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = sparse ? size << 5 : size;
res_attr->private = &pdev->resource[num];
return sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
}
static int pci_create_attr(struct pci_dev *pdev, int num)
{
/* allocate attribute structure, piggyback attribute name */
int retval, nlen1, nlen2 = 0, res_count = 1;
unsigned long sparse_base, dense_base;
struct bin_attribute *attr;
struct pci_controller *hose = pdev->sysdata;
char *suffix, *attr_name;
suffix = ""; /* Assume bwx machine, normal resourceN files. */
nlen1 = 10;
if (pdev->resource[num].flags & IORESOURCE_MEM) {
sparse_base = hose->sparse_mem_base;
dense_base = hose->dense_mem_base;
if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) {
sparse_base = 0;
suffix = "_dense";
nlen1 = 16; /* resourceN_dense */
}
} else {
sparse_base = hose->sparse_io_base;
dense_base = hose->dense_io_base;
}
if (sparse_base) {
suffix = "_sparse";
nlen1 = 17;
if (dense_base) {
nlen2 = 16; /* resourceN_dense */
res_count = 2;
}
}
attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC);
if (!attr)
return -ENOMEM;
/* Create bwx, sparse or single dense file */
attr_name = (char *)(attr + res_count);
pdev->res_attr[num] = attr;
retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr,
sparse_base);
if (retval || res_count == 1)
return retval;
/* Create dense file */
attr_name += nlen1;
attr++;
pdev->res_attr_wc[num] = attr;
return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0);
}
/**
* pci_create_resource_files - create resource files in sysfs for @dev
* @dev: dev in question
*
* Walk the resources in @dev creating files for each resource available.
*/
int pci_create_resource_files(struct pci_dev *pdev)
{
int i;
int retval;
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
continue;
retval = pci_create_attr(pdev, i);
if (retval) {
pci_remove_resource_files(pdev);
return retval;
}
}
return 0;
}
/* Legacy I/O bus mapping stuff. */
static int __legacy_mmap_fits(struct pci_controller *hose,
struct vm_area_struct *vma,
unsigned long res_size, int sparse)
{
unsigned long nr, start, size;
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((res_size - 1) >> PAGE_SHIFT) + 1;
if (start < size && size - start >= nr)
return 1;
WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d "
"(size 0x%08lx)\n",
current->comm, sparse ? " sparse" : "", start, start + nr,
hose->index, size);
return 0;
}
static inline int has_sparse(struct pci_controller *hose,
enum pci_mmap_state mmap_type)
{
unsigned long base;
base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base :
hose->sparse_io_base;
return base != 0;
}
int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
enum pci_mmap_state mmap_type)
{
struct pci_controller *hose = bus->sysdata;
int sparse = has_sparse(hose, mmap_type);
unsigned long res_size;
res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size :
bus->legacy_io->size;
if (!__legacy_mmap_fits(hose, vma, res_size, sparse))
return -EINVAL;
return hose_mmap_page_range(hose, vma, mmap_type, sparse);
}
/**
* pci_adjust_legacy_attr - adjustment of legacy file attributes
* @b: bus to create files under
* @mmap_type: I/O port or memory
*
* Adjust file name and size for sparse mappings.
*/
void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type)
{
struct pci_controller *hose = bus->sysdata;
if (!has_sparse(hose, mmap_type))
return;
if (mmap_type == pci_mmap_mem) {
bus->legacy_mem->attr.name = "legacy_mem_sparse";
bus->legacy_mem->size <<= 5;
} else {
bus->legacy_io->attr.name = "legacy_io_sparse";
bus->legacy_io->size <<= 5;
}
return;
}
/* Legacy I/O bus read/write functions */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
{
struct pci_controller *hose = bus->sysdata;
port += hose->io_space->start;
switch(size) {
case 1:
*((u8 *)val) = inb(port);
return 1;
case 2:
if (port & 1)
return -EINVAL;
*((u16 *)val) = inw(port);
return 2;
case 4:
if (port & 3)
return -EINVAL;
*((u32 *)val) = inl(port);
return 4;
}
return -EINVAL;
}
int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
{
struct pci_controller *hose = bus->sysdata;
port += hose->io_space->start;
switch(size) {
case 1:
outb(port, val);
return 1;
case 2:
if (port & 1)
return -EINVAL;
outw(port, val);
return 2;
case 4:
if (port & 3)
return -EINVAL;
outl(port, val);
return 4;
}
return -EINVAL;
}

436
arch/alpha/kernel/pci.c Normal file
View file

@ -0,0 +1,436 @@
/*
* linux/arch/alpha/kernel/pci.c
*
* Extruded from code written by
* Dave Rusling (david.rusling@reo.mts.dec.com)
* David Mosberger (davidm@cs.arizona.edu)
*/
/* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
/*
* Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
* PCI-PCI bridges cleanup
*/
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <asm/machvec.h>
#include "proto.h"
#include "pci_impl.h"
/*
* Some string constants used by the various core logics.
*/
const char *const pci_io_names[] = {
"PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
"PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
};
const char *const pci_mem_names[] = {
"PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
"PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
};
const char pci_hae0_name[] = "HAE0";
/*
* If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
* assignments.
*/
/*
* The PCI controller list.
*/
struct pci_controller *hose_head, **hose_tail = &hose_head;
struct pci_controller *pci_isa_hose;
/*
* Quirks.
*/
static void quirk_isa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_ISA << 8;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
static void quirk_cypress(struct pci_dev *dev)
{
/* The Notorious Cy82C693 chip. */
/* The generic legacy mode IDE fixup in drivers/pci/probe.c
doesn't work correctly with the Cypress IDE controller as
it has non-standard register layout. Fix that. */
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
dev->resource[2].start = dev->resource[3].start = 0;
dev->resource[2].end = dev->resource[3].end = 0;
dev->resource[2].flags = dev->resource[3].flags = 0;
if (PCI_FUNC(dev->devfn) == 2) {
dev->resource[0].start = 0x170;
dev->resource[0].end = 0x177;
dev->resource[1].start = 0x376;
dev->resource[1].end = 0x376;
}
}
/* The Cypress bridge responds on the PCI bus in the address range
0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
way to turn this off. The bridge also supports several extended
BIOS ranges (disabled after power-up), and some consoles do turn
them on. So if we use a large direct-map window, or a large SG
window, we must avoid the entire 0xfff00000-0xffffffff region. */
if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
__direct_map_size = 0xfff00000UL - __direct_map_base;
else {
struct pci_controller *hose = dev->sysdata;
struct pci_iommu_arena *pci = hose->sg_pci;
if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
pci->size = 0xfff00000UL - pci->dma_base;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
/* Called for each device after PCI setup is done. */
static void pcibios_fixup_final(struct pci_dev *dev)
{
unsigned int class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
isa_bridge = dev;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
/* Just declaring that the power-of-ten prefixes are actually the
power-of-two ones doesn't make it true :) */
#define KB 1024
#define MB (1024*KB)
#define GB (1024*MB)
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_controller *hose = dev->sysdata;
unsigned long alignto;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
/* Make sure we start at our min on all hoses */
if (start - hose->io_space->start < PCIBIOS_MIN_IO)
start = PCIBIOS_MIN_IO + hose->io_space->start;
/*
* Put everything into 0x00-0xff region modulo 0x400
*/
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
else if (res->flags & IORESOURCE_MEM) {
/* Make sure we start at our min on all hoses */
if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
start = PCIBIOS_MIN_MEM + hose->mem_space->start;
/*
* The following holds at least for the Low Cost
* Alpha implementation of the PCI interface:
*
* In sparse memory address space, the first
* octant (16MB) of every 128MB segment is
* aliased to the very first 16 MB of the
* address space (i.e., it aliases the ISA
* memory address space). Thus, we try to
* avoid allocating PCI devices in that range.
* Can be allocated in 2nd-7th octant only.
* Devices that need more than 112MB of
* address space must be accessed through
* dense memory space only!
*/
/* Align to multiple of size of minimum base. */
alignto = max_t(resource_size_t, 0x1000, align);
start = ALIGN(start, alignto);
if (hose->sparse_mem_base && size <= 7 * 16*MB) {
if (((start / (16*MB)) & 0x7) == 0) {
start &= ~(128*MB - 1);
start += 16*MB;
start = ALIGN(start, alignto);
}
if (start/(128*MB) != (start + size - 1)/(128*MB)) {
start &= ~(128*MB - 1);
start += (128 + 16)*MB;
start = ALIGN(start, alignto);
}
}
}
return start;
}
#undef KB
#undef MB
#undef GB
static int __init
pcibios_init(void)
{
if (alpha_mv.init_pci)
alpha_mv.init_pci();
return 0;
}
subsys_initcall(pcibios_init);
#ifdef ALPHA_RESTORE_SRM_SETUP
static struct pdev_srm_saved_conf *srm_saved_configs;
void pdev_save_srm_config(struct pci_dev *dev)
{
struct pdev_srm_saved_conf *tmp;
static int printed = 0;
if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY))
return;
if (!printed) {
printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
printed = 1;
}
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
return;
}
tmp->next = srm_saved_configs;
tmp->dev = dev;
pci_save_state(dev);
srm_saved_configs = tmp;
}
void
pci_restore_srm_config(void)
{
struct pdev_srm_saved_conf *tmp;
/* No need to restore if probed only. */
if (pci_has_flag(PCI_PROBE_ONLY))
return;
/* Restore SRM config. */
for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
pci_restore_state(tmp->dev);
}
}
#endif
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
pci_read_bridge_bases(bus);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
pdev_save_srm_config(dev);
}
}
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as certain firmware forgets to set it properly, as seen
* on SX164 and LX164 with SRM.
*/
void
pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat >= 16) return;
printk("PCI: Setting latency timer of device %s to 64\n",
pci_name(dev));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
}
void __init
pcibios_claim_one_bus(struct pci_bus *b)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
list_for_each_entry(dev, &b->devices, bus_list) {
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
if (r->parent || !r->start || !r->flags)
continue;
if (pci_has_flag(PCI_PROBE_ONLY) ||
(r->flags & IORESOURCE_PCI_FIXED))
pci_claim_resource(dev, i);
}
}
list_for_each_entry(child_bus, &b->children, node)
pcibios_claim_one_bus(child_bus);
}
static void __init
pcibios_claim_console_setup(void)
{
struct pci_bus *b;
list_for_each_entry(b, &pci_root_buses, node)
pcibios_claim_one_bus(b);
}
void __init
common_init_pci(void)
{
struct pci_controller *hose;
struct list_head resources;
struct pci_bus *bus;
int next_busno;
int need_domain_info = 0;
u32 pci_mem_end;
u32 sg_base;
unsigned long end;
/* Scan all of the recorded PCI controllers. */
for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
/* Adjust hose mem_space limit to prevent PCI allocations
in the iommu windows. */
pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
end = hose->mem_space->start + pci_mem_end;
if (hose->mem_space->end > end)
hose->mem_space->end = end;
INIT_LIST_HEAD(&resources);
pci_add_resource_offset(&resources, hose->io_space,
hose->io_space->start);
pci_add_resource_offset(&resources, hose->mem_space,
hose->mem_space->start);
bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops,
hose, &resources);
hose->bus = bus;
hose->need_domain_info = need_domain_info;
next_busno = bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
next_busno = 0;
need_domain_info = 1;
}
}
pcibios_claim_console_setup();
pci_assign_unassigned_resources();
pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
}
struct pci_controller * __init
alloc_pci_controller(void)
{
struct pci_controller *hose;
hose = alloc_bootmem(sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
return hose;
}
struct resource * __init
alloc_resource(void)
{
struct resource *res;
res = alloc_bootmem(sizeof(*res));
return res;
}
/* Provide information on locations of various I/O regions in physical
memory. Do this on a per-card basis so that we choose the right hose. */
asmlinkage long
sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
{
struct pci_controller *hose;
struct pci_dev *dev;
/* from hose or from bus.devfn */
if (which & IOBASE_FROM_HOSE) {
for(hose = hose_head; hose; hose = hose->next)
if (hose->index == bus) break;
if (!hose) return -ENODEV;
} else {
/* Special hook for ISA access. */
if (bus == 0 && dfn == 0) {
hose = pci_isa_hose;
} else {
dev = pci_get_bus_and_slot(bus, dfn);
if (!dev)
return -ENODEV;
hose = dev->sysdata;
pci_dev_put(dev);
}
}
switch (which & ~IOBASE_FROM_HOSE) {
case IOBASE_HOSE:
return hose->index;
case IOBASE_SPARSE_MEM:
return hose->sparse_mem_base;
case IOBASE_DENSE_MEM:
return hose->dense_mem_base;
case IOBASE_SPARSE_IO:
return hose->sparse_io_base;
case IOBASE_DENSE_IO:
return hose->dense_io_base;
case IOBASE_ROOT_BUS:
return hose->bus->number;
}
return -EOPNOTSUPP;
}
/* Destroy an __iomem token. Not copied from lib/iomap.c. */
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (__is_mmio(addr))
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
/* FIXME: Some boxes have multiple ISA bridges! */
struct pci_dev *isa_bridge;
EXPORT_SYMBOL(isa_bridge);

View file

@ -0,0 +1,201 @@
/*
* linux/arch/alpha/kernel/pci_impl.h
*
* This file contains declarations and inline functions for interfacing
* with the PCI initialization routines.
*/
struct pci_dev;
struct pci_controller;
struct pci_iommu_arena;
/*
* We can't just blindly use 64K for machines with EISA busses; they
* may also have PCI-PCI bridges present, and then we'd configure the
* bridge incorrectly.
*
* Also, we start at 0x8000 or 0x9000, in hopes to get all devices'
* IO space areas allocated *before* 0xC000; this is because certain
* BIOSes (Millennium for one) use PCI Config space "mechanism #2"
* accesses to probe the bus. If a device's registers appear at 0xC000,
* it may see an INx/OUTx at that address during BIOS emulation of the
* VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense.
*/
#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */
#define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */
/*
* We try to make the DEFAULT_MEM_BASE addresses *always* have more than
* a single bit set. This is so that devices like the broken Myrinet card
* will always have a PCI memory address that will never match a IDSEL
* address in PCI Config space, which can cause problems with early rev cards.
*/
/*
* An XL is AVANTI (APECS) family, *but* it has only 27 bits of ISA address
* that get passed through the PCI<->ISA bridge chip. Although this causes
* us to set the PCI->Mem window bases lower than normal, we still allocate
* PCI bus devices' memory addresses *below* the low DMA mapping window,
* and hope they fit below 64Mb (to avoid conflicts), and so that they can
* be accessed via SPARSE space.
*
* We accept the risk that a broken Myrinet card will be put into a true XL
* and thus can more easily run into the problem described below.
*/
#define XL_DEFAULT_MEM_BASE ((16+2)*1024*1024) /* 16M to 64M-1 is avail */
/*
* APECS and LCA have only 34 bits for physical addresses, thus limiting PCI
* bus memory addresses for SPARSE access to be less than 128Mb.
*/
#define APECS_AND_LCA_DEFAULT_MEM_BASE ((16+2)*1024*1024)
/*
* Because MCPCIA and T2 core logic support more bits for
* physical addresses, they should allow an expanded range of SPARSE
* memory addresses. However, we do not use them all, in order to
* avoid the HAE manipulation that would be needed.
*/
#define MCPCIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
#define T2_DEFAULT_MEM_BASE ((16+1)*1024*1024)
/*
* Because CIA and PYXIS have more bits for physical addresses,
* they support an expanded range of SPARSE memory addresses.
*/
#define DEFAULT_MEM_BASE ((128+16)*1024*1024)
/* ??? Experimenting with no HAE for CIA. */
#define CIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
#define IRONGATE_DEFAULT_MEM_BASE ((256*8-16)*1024*1024)
#define DEFAULT_AGP_APER_SIZE (64*1024*1024)
/*
* A small note about bridges and interrupts. The DECchip 21050 (and
* later) adheres to the PCI-PCI bridge specification. This says that
* the interrupts on the other side of a bridge are swizzled in the
* following manner:
*
* Dev Interrupt Interrupt
* Pin on Pin on
* Device Connector
*
* 4 A A
* B B
* C C
* D D
*
* 5 A B
* B C
* C D
* D A
*
* 6 A C
* B D
* C A
* D B
*
* 7 A D
* B A
* C B
* D C
*
* Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
* Thus, each swizzle is ((pin-1) + (device#-4)) % 4
*
* pci_swizzle_interrupt_pin() swizzles for exactly one bridge. The routine
* pci_common_swizzle() handles multiple bridges. But there are a
* couple boards that do strange things.
*/
/* The following macro is used to implement the table-based irq mapping
function for all single-bus Alphas. */
#define COMMON_TABLE_LOOKUP \
({ long _ctl_ = -1; \
if (slot >= min_idsel && slot <= max_idsel && pin < irqs_per_slot) \
_ctl_ = irq_tab[slot - min_idsel][pin]; \
_ctl_; })
/* A PCI IOMMU allocation arena. There are typically two of these
regions per bus. */
/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
lives directly on the host bridge (no tlb?). We don't support this
machine, but if we ever did, we'd need to parameterize all this quite
a bit further. Probably with per-bus operation tables. */
struct pci_iommu_arena
{
spinlock_t lock;
struct pci_controller *hose;
#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */
#define IOMMU_RESERVED_PTE 0xface
unsigned long *ptes;
dma_addr_t dma_base;
unsigned int size;
unsigned int next_entry;
unsigned int align_entry;
};
#if defined(CONFIG_ALPHA_SRM) && \
(defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
# define NEED_SRM_SAVE_RESTORE
#else
# undef NEED_SRM_SAVE_RESTORE
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(NEED_SRM_SAVE_RESTORE)
# define ALPHA_RESTORE_SRM_SETUP
#else
# undef ALPHA_RESTORE_SRM_SETUP
#endif
#ifdef ALPHA_RESTORE_SRM_SETUP
/* Store PCI device configuration left by SRM here. */
struct pdev_srm_saved_conf
{
struct pdev_srm_saved_conf *next;
struct pci_dev *dev;
};
extern void pci_restore_srm_config(void);
#else
#define pdev_save_srm_config(dev) do {} while (0)
#define pci_restore_srm_config() do {} while (0)
#endif
/* The hose list. */
extern struct pci_controller *hose_head, **hose_tail;
extern struct pci_controller *pci_isa_hose;
extern unsigned long alpha_agpgart_size;
extern void common_init_pci(void);
#define common_swizzle pci_common_swizzle
extern struct pci_controller *alloc_pci_controller(void);
extern struct resource *alloc_resource(void);
extern struct pci_iommu_arena *iommu_arena_new_node(int,
struct pci_controller *,
dma_addr_t, unsigned long,
unsigned long);
extern struct pci_iommu_arena *iommu_arena_new(struct pci_controller *,
dma_addr_t, unsigned long,
unsigned long);
extern const char *const pci_io_names[];
extern const char *const pci_mem_names[];
extern const char pci_hae0_name[];
extern unsigned long size_for_memory(unsigned long max);
extern int iommu_reserve(struct pci_iommu_arena *, long, long);
extern int iommu_release(struct pci_iommu_arena *, long, long);
extern int iommu_bind(struct pci_iommu_arena *, long, long, struct page **);
extern int iommu_unbind(struct pci_iommu_arena *, long, long);

View file

@ -0,0 +1,965 @@
/*
* linux/arch/alpha/kernel/pci_iommu.c
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/dma-mapping.h>
#include <linux/iommu-helper.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include "proto.h"
#include "pci_impl.h"
#define DEBUG_ALLOC 0
#if DEBUG_ALLOC > 0
# define DBGA(args...) printk(KERN_DEBUG args)
#else
# define DBGA(args...)
#endif
#if DEBUG_ALLOC > 1
# define DBGA2(args...) printk(KERN_DEBUG args)
#else
# define DBGA2(args...)
#endif
#define DEBUG_NODIRECT 0
#define ISA_DMA_MASK 0x00ffffff
static inline unsigned long
mk_iommu_pte(unsigned long paddr)
{
return (paddr >> (PAGE_SHIFT-1)) | 1;
}
/* Return the minimum of MAX or the first power of two larger
than main memory. */
unsigned long
size_for_memory(unsigned long max)
{
unsigned long mem = max_low_pfn << PAGE_SHIFT;
if (mem < max)
max = roundup_pow_of_two(mem);
return max;
}
struct pci_iommu_arena * __init
iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
unsigned long mem_size;
struct pci_iommu_arena *arena;
mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
/* Note that the TLB lookup logic uses bitwise concatenation,
not addition, so the required arena alignment is based on
the size of the window. Retain the align parameter so that
particular systems can over-align the arena. */
if (align < mem_size)
align = mem_size;
#ifdef CONFIG_DISCONTIGMEM
arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
if (!NODE_DATA(nid) || !arena) {
printk("%s: couldn't allocate arena from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
arena = alloc_bootmem(sizeof(*arena));
}
arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
if (!NODE_DATA(nid) || !arena->ptes) {
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
arena->ptes = __alloc_bootmem(mem_size, align, 0);
}
#else /* CONFIG_DISCONTIGMEM */
arena = alloc_bootmem(sizeof(*arena));
arena->ptes = __alloc_bootmem(mem_size, align, 0);
#endif /* CONFIG_DISCONTIGMEM */
spin_lock_init(&arena->lock);
arena->hose = hose;
arena->dma_base = base;
arena->size = window_size;
arena->next_entry = 0;
/* Align allocations to a multiple of a page size. Not needed
unless there are chip bugs. */
arena->align_entry = 1;
return arena;
}
struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
return iommu_arena_new_node(0, hose, base, window_size, align);
}
/* Must be called with the arena lock held */
static long
iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
long n, long mask)
{
unsigned long *ptes;
long i, p, nent;
int pass = 0;
unsigned long base;
unsigned long boundary_size;
base = arena->dma_base >> PAGE_SHIFT;
if (dev) {
boundary_size = dma_get_seg_boundary(dev) + 1;
boundary_size >>= PAGE_SHIFT;
} else {
boundary_size = 1UL << (32 - PAGE_SHIFT);
}
/* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes;
nent = arena->size >> PAGE_SHIFT;
p = ALIGN(arena->next_entry, mask + 1);
i = 0;
again:
while (i < n && p+i < nent) {
if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
p = ALIGN(p + 1, mask + 1);
goto again;
}
if (ptes[p+i])
p = ALIGN(p + i + 1, mask + 1), i = 0;
else
i = i + 1;
}
if (i < n) {
if (pass < 1) {
/*
* Reached the end. Flush the TLB and restart
* the search from the beginning.
*/
alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
pass++;
p = 0;
i = 0;
goto again;
} else
return -1;
}
/* Success. It's the responsibility of the caller to mark them
in use before releasing the lock */
return p;
}
static long
iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
unsigned int align)
{
unsigned long flags;
unsigned long *ptes;
long i, p, mask;
spin_lock_irqsave(&arena->lock, flags);
/* Search for N empty ptes */
ptes = arena->ptes;
mask = max(align, arena->align_entry) - 1;
p = iommu_arena_find_pages(dev, arena, n, mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
}
/* Success. Mark them all in use, ie not zero and invalid
for the iommu tlb that could load them from under us.
The chip specific bits will fill this in with something
kosher when we return. */
for (i = 0; i < n; ++i)
ptes[p+i] = IOMMU_INVALID_PTE;
arena->next_entry = p + n;
spin_unlock_irqrestore(&arena->lock, flags);
return p;
}
static void
iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
{
unsigned long *p;
long i;
p = arena->ptes + ofs;
for (i = 0; i < n; ++i)
p[i] = 0;
}
/*
* True if the machine supports DAC addressing, and DEV can
* make use of it given MASK.
*/
static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
{
dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
int ok = 1;
/* If this is not set, the machine doesn't support DAC at all. */
if (dac_offset == 0)
ok = 0;
/* The device has to be able to address our DAC bit. */
if ((dac_offset & dev->dma_mask) != dac_offset)
ok = 0;
/* If both conditions above are met, we are fine. */
DBGA("pci_dac_dma_supported %s from %pf\n",
ok ? "yes" : "no", __builtin_return_address(0));
return ok;
}
/* Map a single buffer of the indicated size for PCI DMA in streaming
mode. The 32-bit PCI bus mastering address to use is returned.
Once the device is given the dma address, the device owns this memory
until either pci_unmap_single or pci_dma_sync_single is performed. */
static dma_addr_t
pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
int dac_allowed)
{
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
struct pci_iommu_arena *arena;
long npages, dma_ofs, i;
unsigned long paddr;
dma_addr_t ret;
unsigned int align = 0;
struct device *dev = pdev ? &pdev->dev : NULL;
paddr = __pa(cpu_addr);
#if !DEBUG_NODIRECT
/* First check to see if we can use the direct map window. */
if (paddr + size + __direct_map_base - 1 <= max_dma
&& paddr + size <= __direct_map_size) {
ret = paddr + __direct_map_base;
DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
}
#endif
/* Next, use DAC if selected earlier. */
if (dac_allowed) {
ret = paddr + alpha_mv.pci_dac_offset;
DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
}
/* If the machine doesn't define a pci_tbi routine, we have to
assume it doesn't support sg mapping, and, since we tried to
use direct_map above, it now must be considered an error. */
if (! alpha_mv.mv_pci_tbi) {
printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
return 0;
}
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
/* Force allocation to 64KB boundary for ISA bridges. */
if (pdev && pdev == isa_bridge)
align = 8;
dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
if (dma_ofs < 0) {
printk(KERN_WARNING "pci_map_single failed: "
"could not allocate dma page tables\n");
return 0;
}
paddr &= PAGE_MASK;
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
cpu_addr, size, npages, ret, __builtin_return_address(0));
return ret;
}
/* Helper for generic DMA-mapping functions. */
static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
{
if (dev && dev_is_pci(dev))
return to_pci_dev(dev);
/* Assume that non-PCI devices asking for DMA are either ISA or EISA,
BUG() otherwise. */
BUG_ON(!isa_bridge);
/* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
bridge is bus master then). */
if (!dev || !dev->dma_mask || !*dev->dma_mask)
return isa_bridge;
/* For EISA bus masters, return isa_bridge (it might have smaller
dma_mask due to wiring limitations). */
if (*dev->dma_mask >= isa_bridge->dma_mask)
return isa_bridge;
/* This assumes ISA bus master with dma_mask 0xffffff. */
return NULL;
}
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
size, dac_allowed);
}
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
SIZE must match what was provided for in a previous pci_map_single
call. All other usages are undefined. After this call, reads by
the cpu to the buffer are guaranteed to see whatever the device
wrote there. */
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long flags;
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
struct pci_iommu_arena *arena;
long dma_ofs, npages;
BUG_ON(dir == PCI_DMA_NONE);
if (dma_addr >= __direct_map_base
&& dma_addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
if (dma_addr > 0xffffffff) {
DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
arena = hose->sg_pci;
if (!arena || dma_addr < arena->dma_base)
arena = hose->sg_isa;
dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
if (dma_ofs * PAGE_SIZE >= arena->size) {
printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
" base %llx size %x\n",
dma_addr, arena->dma_base, arena->size);
return;
BUG();
}
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
spin_lock_irqsave(&arena->lock, flags);
iommu_arena_free(arena, dma_ofs, npages);
/* If we're freeing ptes above the `next_entry' pointer (they
may have snuck back into the TLB since the last wrap flush),
we need to flush the TLB before reallocating the latter. */
if (dma_ofs >= arena->next_entry)
alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
spin_unlock_irqrestore(&arena->lock, flags);
DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
dma_addr, size, npages, __builtin_return_address(0));
}
/* Allocate and map kernel buffer using consistent mode DMA for PCI
device. Returns non-NULL cpu-view pointer to the buffer if
successful and sets *DMA_ADDRP to the pci side dma address as well,
else DMA_ADDRP is undefined. */
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
long order = get_order(size);
gfp &= ~GFP_DMA;
try_again:
cpu_addr = (void *)__get_free_pages(gfp, order);
if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: "
"get_free_pages failed from %pf\n",
__builtin_return_address(0));
/* ??? Really atomic allocation? Otherwise we could play
with vmalloc and sg if we can't find contiguous memory. */
return NULL;
}
memset(cpu_addr, 0, size);
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
if (*dma_addrp == 0) {
free_pages((unsigned long)cpu_addr, order);
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
return NULL;
/* The address doesn't fit required mask and we
do not have iommu. Try again with GFP_DMA. */
gfp |= GFP_DMA;
goto try_again;
}
DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
size, cpu_addr, *dma_addrp, __builtin_return_address(0));
return cpu_addr;
}
/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
be values that were returned from pci_alloc_consistent. SIZE must
be the same as what as passed into pci_alloc_consistent.
References to the memory and mappings associated with CPU_ADDR or
DMA_ADDR past this call are illegal. */
static void alpha_pci_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));
DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
}
/* Classify the elements of the scatterlist. Write dma_address
of each element with:
0 : Followers all physically adjacent.
1 : Followers all virtually adjacent.
-1 : Not leader, physically adjacent to previous.
-2 : Not leader, virtually adjacent to previous.
Write dma_length of each leader with the combined lengths of
the mergable followers. */
#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static void
sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
int virt_ok)
{
unsigned long next_paddr;
struct scatterlist *leader;
long leader_flag, leader_length;
unsigned int max_seg_size;
leader = sg;
leader_flag = 0;
leader_length = leader->length;
next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
/* we will not marge sg without device. */
max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
for (++sg; sg < end; ++sg) {
unsigned long addr, len;
addr = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
if (leader_length + len > max_seg_size)
goto new_segment;
if (next_paddr == addr) {
sg->dma_address = -1;
leader_length += len;
} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
sg->dma_address = -2;
leader_flag = 1;
leader_length += len;
} else {
new_segment:
leader->dma_address = leader_flag;
leader->dma_length = leader_length;
leader = sg;
leader_flag = 0;
leader_length = len;
}
next_paddr = addr + len;
}
leader->dma_address = leader_flag;
leader->dma_length = leader_length;
}
/* Given a scatterlist leader, choose an allocation method and fill
in the blanks. */
static int
sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
struct scatterlist *out, struct pci_iommu_arena *arena,
dma_addr_t max_dma, int dac_allowed)
{
unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
long size = leader->dma_length;
struct scatterlist *sg;
unsigned long *ptes;
long npages, dma_ofs, i;
#if !DEBUG_NODIRECT
/* If everything is physically contiguous, and the addresses
fall into the direct-map window, use it. */
if (leader->dma_address == 0
&& paddr + size + __direct_map_base - 1 <= max_dma
&& paddr + size <= __direct_map_size) {
out->dma_address = paddr + __direct_map_base;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
__va(paddr), size, out->dma_address);
return 0;
}
#endif
/* If physically contiguous and DAC is available, use it. */
if (leader->dma_address == 0 && dac_allowed) {
out->dma_address = paddr + alpha_mv.pci_dac_offset;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
__va(paddr), size, out->dma_address);
return 0;
}
/* Otherwise, we'll use the iommu to make the pages virtually
contiguous. */
paddr &= ~PAGE_MASK;
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
if (dma_ofs < 0) {
/* If we attempted a direct map above but failed, die. */
if (leader->dma_address == 0)
return -1;
/* Otherwise, break up the remaining virtually contiguous
hunks into individual direct maps and retry. */
sg_classify(dev, leader, end, 0);
return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
}
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
out->dma_length = size;
DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
__va(paddr), size, out->dma_address, npages);
/* All virtually contiguous. We need to find the length of each
physically contiguous subsegment to fill in the ptes. */
ptes = &arena->ptes[dma_ofs];
sg = leader;
do {
#if DEBUG_ALLOC > 0
struct scatterlist *last_sg = sg;
#endif
size = sg->length;
paddr = SG_ENT_PHYS_ADDRESS(sg);
while (sg+1 < end && (int) sg[1].dma_address == -1) {
size += sg[1].length;
sg++;
}
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
*ptes++ = mk_iommu_pte(paddr);
#if DEBUG_ALLOC > 0
DBGA(" (%ld) [%p,%x] np %ld\n",
last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
last_sg->length, npages);
while (++last_sg <= sg) {
DBGA(" (%ld) [%p,%x] cont\n",
last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
last_sg->length);
}
#endif
} while (++sg < end && (int) sg->dma_address < 0);
return 1;
}
static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct scatterlist *start, *end, *out;
struct pci_controller *hose;
struct pci_iommu_arena *arena;
dma_addr_t max_dma;
int dac_allowed;
BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg->dma_length = sg->length;
sg->dma_address
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
sg->length, dac_allowed);
return sg->dma_address != 0;
}
start = sg;
end = sg + nents;
/* First, prepare information about the entries. */
sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
/* Second, figure out where we're going to map things. */
if (alpha_mv.mv_pci_tbi) {
hose = pdev ? pdev->sysdata : pci_isa_hose;
max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
} else {
max_dma = -1;
arena = NULL;
hose = NULL;
}
/* Third, iterate over the scatterlist leaders and allocate
dma space as needed. */
for (out = sg; sg < end; ++sg) {
if ((int) sg->dma_address < 0)
continue;
if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
goto error;
out++;
}
/* Mark the end of the list for pci_unmap_sg. */
if (out < end)
out->dma_length = 0;
if (out - start == 0)
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
DBGA("pci_map_sg: %ld entries\n", out - start);
return out - start;
error:
printk(KERN_WARNING "pci_map_sg failed: "
"could not allocate dma page tables\n");
/* Some allocation failed while mapping the scatterlist
entries. Unmap them now. */
if (out > start)
pci_unmap_sg(pdev, start, out - start, dir);
return 0;
}
/* Unmap a set of streaming mode DMA translations. Again, cpu read
rules concerning calls here are the same as for pci_unmap_single()
above. */
static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
unsigned long flags;
struct pci_controller *hose;
struct pci_iommu_arena *arena;
struct scatterlist *end;
dma_addr_t max_dma;
dma_addr_t fbeg, fend;
BUG_ON(dir == PCI_DMA_NONE);
if (! alpha_mv.mv_pci_tbi)
return;
hose = pdev ? pdev->sysdata : pci_isa_hose;
max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
arena = hose->sg_pci;
if (!arena || arena->dma_base + arena->size - 1 > max_dma)
arena = hose->sg_isa;
fbeg = -1, fend = 0;
spin_lock_irqsave(&arena->lock, flags);
for (end = sg + nents; sg < end; ++sg) {
dma_addr_t addr;
size_t size;
long npages, ofs;
dma_addr_t tend;
addr = sg->dma_address;
size = sg->dma_length;
if (!size)
break;
if (addr > 0xffffffff) {
/* It's a DAC address -- nothing to do. */
DBGA(" (%ld) DAC [%llx,%zx]\n",
sg - end + nents, addr, size);
continue;
}
if (addr >= __direct_map_base
&& addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
DBGA(" (%ld) direct [%llx,%zx]\n",
sg - end + nents, addr, size);
continue;
}
DBGA(" (%ld) sg [%llx,%zx]\n",
sg - end + nents, addr, size);
npages = iommu_num_pages(addr, size, PAGE_SIZE);
ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
iommu_arena_free(arena, ofs, npages);
tend = addr + size - 1;
if (fbeg > addr) fbeg = addr;
if (fend < tend) fend = tend;
}
/* If we're freeing ptes above the `next_entry' pointer (they
may have snuck back into the TLB since the last wrap flush),
we need to flush the TLB before reallocating the latter. */
if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
alpha_mv.mv_pci_tbi(hose, fbeg, fend);
spin_unlock_irqrestore(&arena->lock, flags);
DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
}
/* Return whether the given PCI device DMA address mask can be
supported properly. */
static int alpha_pci_supported(struct device *dev, u64 mask)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct pci_controller *hose;
struct pci_iommu_arena *arena;
/* If there exists a direct map, and the mask fits either
the entire direct mapped space or the total system memory as
shifted by the map base */
if (__direct_map_size != 0
&& (__direct_map_base + __direct_map_size - 1 <= mask ||
__direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
return 1;
/* Check that we have a scatter-gather arena that fits. */
hose = pdev ? pdev->sysdata : pci_isa_hose;
arena = hose->sg_isa;
if (arena && arena->dma_base + arena->size - 1 <= mask)
return 1;
arena = hose->sg_pci;
if (arena && arena->dma_base + arena->size - 1 <= mask)
return 1;
/* As last resort try ZONE_DMA. */
if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
return 1;
return 0;
}
/*
* AGP GART extensions to the IOMMU
*/
int
iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
{
unsigned long flags;
unsigned long *ptes;
long i, p;
if (!arena) return -EINVAL;
spin_lock_irqsave(&arena->lock, flags);
/* Search for N empty ptes. */
ptes = arena->ptes;
p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
if (p < 0) {
spin_unlock_irqrestore(&arena->lock, flags);
return -1;
}
/* Success. Mark them all reserved (ie not zero and invalid)
for the iommu tlb that could load them from under us.
They will be filled in with valid bits by _bind() */
for (i = 0; i < pg_count; ++i)
ptes[p+i] = IOMMU_RESERVED_PTE;
arena->next_entry = p + pg_count;
spin_unlock_irqrestore(&arena->lock, flags);
return p;
}
int
iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
{
unsigned long *ptes;
long i;
if (!arena) return -EINVAL;
ptes = arena->ptes;
/* Make sure they're all reserved first... */
for(i = pg_start; i < pg_start + pg_count; i++)
if (ptes[i] != IOMMU_RESERVED_PTE)
return -EBUSY;
iommu_arena_free(arena, pg_start, pg_count);
return 0;
}
int
iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
struct page **pages)
{
unsigned long flags;
unsigned long *ptes;
long i, j;
if (!arena) return -EINVAL;
spin_lock_irqsave(&arena->lock, flags);
ptes = arena->ptes;
for(j = pg_start; j < pg_start + pg_count; j++) {
if (ptes[j] != IOMMU_RESERVED_PTE) {
spin_unlock_irqrestore(&arena->lock, flags);
return -EBUSY;
}
}
for(i = 0, j = pg_start; i < pg_count; i++, j++)
ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
spin_unlock_irqrestore(&arena->lock, flags);
return 0;
}
int
iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
{
unsigned long *p;
long i;
if (!arena) return -EINVAL;
p = arena->ptes + pg_start;
for(i = 0; i < pg_count; i++)
p[i] = IOMMU_RESERVED_PTE;
return 0;
}
static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == 0;
}
static int alpha_pci_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask ||
!pci_dma_supported(alpha_gendev_to_pci(dev), mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
.unmap_sg = alpha_pci_unmap_sg,
.mapping_error = alpha_pci_mapping_error,
.dma_supported = alpha_pci_supported,
.set_dma_mask = alpha_pci_set_mask,
};
struct dma_map_ops *dma_ops = &alpha_pci_ops;
EXPORT_SYMBOL(dma_ops);

View file

@ -0,0 +1,903 @@
/*
* Hardware performance events for the Alpha.
*
* We implement HW counts on the EV67 and subsequent CPUs only.
*
* (C) 2010 Michael J. Cree
*
* Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
* ARM code, which are copyright by their respective authors.
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <asm/hwrpb.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pal.h>
#include <asm/wrperfmon.h>
#include <asm/hw_irq.h>
/* The maximum number of PMCs on any Alpha CPU whatsoever. */
#define MAX_HWEVENTS 3
#define PMC_NO_INDEX -1
/* For tracking PMCs and the hw events they monitor on each CPU. */
struct cpu_hw_events {
int enabled;
/* Number of events scheduled; also number entries valid in arrays below. */
int n_events;
/* Number events added since last hw_perf_disable(). */
int n_added;
/* Events currently scheduled. */
struct perf_event *event[MAX_HWEVENTS];
/* Event type of each scheduled event. */
unsigned long evtype[MAX_HWEVENTS];
/* Current index of each scheduled event; if not yet determined
* contains PMC_NO_INDEX.
*/
int current_idx[MAX_HWEVENTS];
/* The active PMCs' config for easy use with wrperfmon(). */
unsigned long config;
/* The active counters' indices for easy use with wrperfmon(). */
unsigned long idx_mask;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
/*
* A structure to hold the description of the PMCs available on a particular
* type of Alpha CPU.
*/
struct alpha_pmu_t {
/* Mapping of the perf system hw event types to indigenous event types */
const int *event_map;
/* The number of entries in the event_map */
int max_events;
/* The number of PMCs on this Alpha */
int num_pmcs;
/*
* All PMC counters reside in the IBOX register PCTR. This is the
* LSB of the counter.
*/
int pmc_count_shift[MAX_HWEVENTS];
/*
* The mask that isolates the PMC bits when the LSB of the counter
* is shifted to bit 0.
*/
unsigned long pmc_count_mask[MAX_HWEVENTS];
/* The maximum period the PMC can count. */
unsigned long pmc_max_period[MAX_HWEVENTS];
/*
* The maximum value that may be written to the counter due to
* hardware restrictions is pmc_max_period - pmc_left.
*/
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
/* Subroutine for checking validity of a raw event for this PMU. */
int (*raw_event_valid)(u64 config);
};
/*
* The Alpha CPU PMU description currently in operation. This is set during
* the boot process to the specific CPU of the machine.
*/
static const struct alpha_pmu_t *alpha_pmu;
#define HW_OP_UNSUPPORTED -1
/*
* The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
* follow. Since they are identical we refer to them collectively as the
* EV67 henceforth.
*/
/*
* EV67 PMC event types
*
* There is no one-to-one mapping of the possible hw event types to the
* actual codes that are used to program the PMCs hence we introduce our
* own hw event type identifiers.
*/
enum ev67_pmc_event_type {
EV67_CYCLES = 1,
EV67_INSTRUCTIONS,
EV67_BCACHEMISS,
EV67_MBOXREPLAY,
EV67_LAST_ET
};
#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
/* Mapping of the hw event types to the perf tool interface */
static const int ev67_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
};
struct ev67_mapping_t {
int config;
int idx;
};
/*
* The mapping used for one event only - these must be in same order as enum
* ev67_pmc_event_type definition.
*/
static const struct ev67_mapping_t ev67_mapping[] = {
{EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
{EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
{EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
{EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
};
/*
* Check that a group of events can be simultaneously scheduled on to the
* EV67 PMU. Also allocate counter indices and config.
*/
static int ev67_check_constraints(struct perf_event **event,
unsigned long *evtype, int n_ev)
{
int idx0;
unsigned long config;
idx0 = ev67_mapping[evtype[0]-1].idx;
config = ev67_mapping[evtype[0]-1].config;
if (n_ev == 1)
goto success;
BUG_ON(n_ev != 2);
if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
/* MBOX replay traps must be on PMC 1 */
idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
/* Only cycles can accompany MBOX replay traps */
if (evtype[idx0] == EV67_CYCLES) {
config = EV67_PCTR_CYCLES_MBOX;
goto success;
}
}
if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
/* Bcache misses must be on PMC 1 */
idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
/* Only instructions can accompany Bcache misses */
if (evtype[idx0] == EV67_INSTRUCTIONS) {
config = EV67_PCTR_INSTR_BCACHEMISS;
goto success;
}
}
if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
/* Instructions must be on PMC 0 */
idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
/* By this point only cycles can accompany instructions */
if (evtype[idx0^1] == EV67_CYCLES) {
config = EV67_PCTR_INSTR_CYCLES;
goto success;
}
}
/* Otherwise, darn it, there is a conflict. */
return -1;
success:
event[0]->hw.idx = idx0;
event[0]->hw.config_base = config;
if (n_ev == 2) {
event[1]->hw.idx = idx0 ^ 1;
event[1]->hw.config_base = config;
}
return 0;
}
static int ev67_raw_event_valid(u64 config)
{
return config >= EV67_CYCLES && config < EV67_LAST_ET;
};
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
.num_pmcs = 2,
.pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
.check_constraints = ev67_check_constraints,
.raw_event_valid = ev67_raw_event_valid,
};
/*
* Helper routines to ensure that we read/write only the correct PMC bits
* when calling the wrperfmon PALcall.
*/
static inline void alpha_write_pmc(int idx, unsigned long val)
{
val &= alpha_pmu->pmc_count_mask[idx];
val <<= alpha_pmu->pmc_count_shift[idx];
val |= (1<<idx);
wrperfmon(PERFMON_CMD_WRITE, val);
}
static inline unsigned long alpha_read_pmc(int idx)
{
unsigned long val;
val = wrperfmon(PERFMON_CMD_READ, 0);
val >>= alpha_pmu->pmc_count_shift[idx];
val &= alpha_pmu->pmc_count_mask[idx];
return val;
}
/* Set a new period to sample over */
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Hardware restrictions require that the counters must not be
* written with values that are too close to the maximum period.
*/
if (unlikely(left < alpha_pmu->pmc_left[idx]))
left = alpha_pmu->pmc_left[idx];
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
local64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
perf_event_update_userpage(event);
return ret;
}
/*
* Calculates the count (the 'delta') since the last time the PMC was read.
*
* As the PMCs' full period can easily be exceeded within the perf system
* sampling period we cannot use any high order bits as a guard bit in the
* PMCs to detect overflow as is done by other architectures. The code here
* calculates the delta on the basis that there is no overflow when ovf is
* zero. The value passed via ovf by the interrupt handler corrects for
* overflow.
*
* This can be racey on rare occasions -- a call to this routine can occur
* with an overflowed counter just before the PMI service routine is called.
* The check for delta negative hopefully always rectifies this situation.
*/
static unsigned long alpha_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx, long ovf)
{
long prev_raw_count, new_raw_count;
long delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
*/
if (unlikely(delta < 0)) {
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
/*
* Collect all HW events into the array event[].
*/
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *event[], unsigned long *evtype,
int *current_idx)
{
struct perf_event *pe;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
event[n] = group;
evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
list_for_each_entry(pe, &group->sibling_list, group_entry) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
event[n] = pe;
evtype[n] = pe->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
}
return n;
}
/*
* Check that a group of events can be simultaneously scheduled on to the PMU.
*/
static int alpha_check_constraints(struct perf_event **events,
unsigned long *evtypes, int n_ev)
{
/* No HW events is possible from hw_perf_group_sched_in(). */
if (n_ev == 0)
return 0;
if (n_ev > alpha_pmu->num_pmcs)
return -1;
return alpha_pmu->check_constraints(events, evtypes, n_ev);
}
/*
* If new events have been scheduled then update cpuc with the new
* configuration. This may involve shifting cycle counts from one PMC to
* another.
*/
static void maybe_change_configuration(struct cpu_hw_events *cpuc)
{
int j;
if (cpuc->n_added == 0)
return;
/* Find counters that are moving to another PMC and update */
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
if (cpuc->current_idx[j] != PMC_NO_INDEX &&
cpuc->current_idx[j] != pe->hw.idx) {
alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
cpuc->current_idx[j] = PMC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
cpuc->idx_mask = 0;
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
if (cpuc->current_idx[j] == PMC_NO_INDEX) {
alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
}
if (!(hwc->state & PERF_HES_STOPPED))
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
/* Schedule perf HW event on to PMU.
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
* disable, however this can lead to an overflowed counter with the
* PMI disabled on rare occasions. The alpha_perf_event_update()
* routine should detect this situation by noting a negative delta,
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
/* Insert event on to PMU and if successful modify ret to valid return */
n0 = cpuc->n_events;
if (n0 < alpha_pmu->num_pmcs) {
cpuc->event[n0] = event;
cpuc->evtype[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PMC_NO_INDEX;
if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
}
}
hwc->state = PERF_HES_UPTODATE;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
return ret;
}
/* Disable performance monitoring unit
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
int idx = cpuc->current_idx[j];
/* Shift remaining entries down into the existing
* slot.
*/
while (++j < cpuc->n_events) {
cpuc->event[j - 1] = cpuc->event[j];
cpuc->evtype[j - 1] = cpuc->evtype[j];
cpuc->current_idx[j - 1] =
cpuc->current_idx[j];
}
/* Absorb the final count and turn off the event. */
alpha_perf_event_update(event, hwc, idx, 0);
perf_event_update_userpage(event);
cpuc->idx_mask &= ~(1UL<<idx);
cpuc->n_events--;
break;
}
}
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
}
static void alpha_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
alpha_perf_event_update(event, hwc, hwc->idx, 0);
}
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
alpha_perf_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_UPTODATE;
}
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
}
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
alpha_perf_event_set_period(event, hwc, hwc->idx);
}
hwc->state = 0;
cpuc->idx_mask |= 1UL<<hwc->idx;
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}
/*
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
* EV67 but we don't do suffiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
{
struct percpu_struct *cpu;
unsigned long cputype;
/* Get cpu type from HW */
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cputype = cpu->type & 0xffffffff;
/* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Nothing to be done! */
return;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
struct perf_event *evts[MAX_HWEVENTS];
unsigned long evtypes[MAX_HWEVENTS];
int idx_rubbish_bin[MAX_HWEVENTS];
int ev;
int n;
/* We only support a limited range of HARDWARE event types with one
* only programmable via a RAW event type.
*/
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= alpha_pmu->max_events)
return -EINVAL;
ev = alpha_pmu->event_map[attr->config];
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
if (!alpha_pmu->raw_event_valid(attr->config))
return -EINVAL;
ev = attr->config;
} else {
return -EOPNOTSUPP;
}
if (ev < 0) {
return ev;
}
/* The EV67 does not support mode exclusion */
if (attr->exclude_kernel || attr->exclude_user
|| attr->exclude_hv || attr->exclude_idle) {
return -EPERM;
}
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
* it is only then we will know what HW events are actually
* scheduled on to the PMU. At that point the code to programme the
* PMU is put into config_base and the PMC to use is placed into
* idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
* it is yet to be determined.
*/
hwc->event_base = ev;
/* Collect events in a group together suitable for calling
* alpha_check_constraints() to verify that the group as a whole can
* be scheduled on to the PMU.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
alpha_pmu->num_pmcs - 1,
evts, evtypes, idx_rubbish_bin);
if (n < 0)
return -EINVAL;
}
evtypes[n] = hwc->event_base;
evts[n] = event;
if (alpha_check_constraints(evts, evtypes, n + 1))
return -EINVAL;
/* Indicate that PMU config and idx are yet to be determined. */
hwc->config_base = 0;
hwc->idx = PMC_NO_INDEX;
event->destroy = hw_perf_event_destroy;
/*
* Most architectures reserve the PMU for their use at this point.
* As there is no existing mechanism to arbitrate usage and there
* appears to be no other user of the Alpha PMU we just assume
* that we can just use it, hence a NO-OP here.
*
* Maybe an alpha_reserve_pmu() routine should be implemented but is
* anything else ever going to use it?
*/
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Main entry point to initialise a HW performance event.
*/
static int alpha_pmu_event_init(struct perf_event *event)
{
int err;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!alpha_pmu)
return -ENODEV;
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
return err;
}
/*
* Main entry point - enable HW performance counters.
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
if (cpuc->n_events > 0) {
/* Update cpuc with information from any new scheduled events. */
maybe_change_configuration(cpuc);
/* Start counting the desired events. */
wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
}
}
/*
* Main entry point - disable HW performance counters.
*/
static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
static struct pmu pmu = {
.pmu_enable = alpha_pmu_enable,
.pmu_disable = alpha_pmu_disable,
.event_init = alpha_pmu_event_init,
.add = alpha_pmu_add,
.del = alpha_pmu_del,
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
};
/*
* Main entry point - don't know when this is called but it
* obviously dumps debug info.
*/
void perf_event_print_debug(void)
{
unsigned long flags;
unsigned long pcr;
int pcr0, pcr1;
int cpu;
if (!supported_cpu())
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = wrperfmon(PERFMON_CMD_READ, 0);
pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
local_irq_restore(flags);
}
/*
* Performance Monitoring Interrupt Service Routine called when a PMC
* overflows. The PMC that overflowed is passed in la_ptr.
*/
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__this_cpu_inc(irq_pmi_count);
cpuc = this_cpu_ptr(&cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
perf_sample_data_init(&data, 0, hwc->last_period);
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
/*
* Init call to initialise performance events at kernel startup.
*/
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_cpu()) {
pr_cont("No support for your CPU.\n");
return 0;
}
pr_cont("Supported CPU type!\n");
/* Override performance counter IRQ vector */
perf_irq = alpha_perf_event_irq_handler;
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);

407
arch/alpha/kernel/process.c Normal file
View file

@ -0,0 +1,407 @@
/*
* linux/arch/alpha/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
*/
/*
* This file handles the architecture-dependent parts of process handling.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/time.h>
#include <linux/major.h>
#include <linux/stat.h>
#include <linux/vt.h>
#include <linux/mman.h>
#include <linux/elfcore.h>
#include <linux/reboot.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/fpu.h>
#include "proto.h"
#include "pci_impl.h"
/*
* Power off function, if any
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_ALPHA_WTINT
/*
* Sleep the CPU.
* EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts.
*/
void arch_cpu_idle(void)
{
wtint(0);
local_irq_enable();
}
void arch_cpu_idle_dead(void)
{
wtint(INT_MAX);
}
#endif /* ALPHA_WTINT */
struct halt_info {
int mode;
char *restart_cmd;
};
static void
common_shutdown_1(void *generic_ptr)
{
struct halt_info *how = (struct halt_info *)generic_ptr;
struct percpu_struct *cpup;
unsigned long *pflags, flags;
int cpuid = smp_processor_id();
/* No point in taking interrupts anymore. */
local_irq_disable();
cpup = (struct percpu_struct *)
((unsigned long)hwrpb + hwrpb->processor_offset
+ hwrpb->processor_size * cpuid);
pflags = &cpup->flags;
flags = *pflags;
/* Clear reason to "default"; clear "bootstrap in progress". */
flags &= ~0x00ff0001UL;
#ifdef CONFIG_SMP
/* Secondaries halt here. */
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
set_cpu_present(cpuid, false);
set_cpu_possible(cpuid, false);
halt();
}
#endif
if (how->mode == LINUX_REBOOT_CMD_RESTART) {
if (!how->restart_cmd) {
flags |= 0x00020000UL; /* "cold bootstrap" */
} else {
/* For SRM, we could probably set environment
variables to get this to work. We'd have to
delay this until after srm_paging_stop unless
we ever got srm_fixup working.
At the moment, SRM will use the last boot device,
but the file and flags will be the defaults, when
doing a "warm" bootstrap. */
flags |= 0x00030000UL; /* "warm bootstrap" */
}
} else {
flags |= 0x00040000UL; /* "remain halted" */
}
*pflags = flags;
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
set_cpu_present(boot_cpuid, false);
set_cpu_possible(boot_cpuid, false);
while (cpumask_weight(cpu_present_mask))
barrier();
#endif
/* If booted from SRM, reset some of the original environment. */
if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
/* If we've gotten here after SysRq-b, leave interrupt
context before taking over the console. */
if (in_interrupt())
irq_exit();
/* This has the effect of resetting the VGA video origin. */
console_lock();
do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
console_unlock();
#endif
pci_restore_srm_config();
set_hae(srm_hae);
}
if (alpha_mv.kill_arch)
alpha_mv.kill_arch(how->mode);
if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
/* Unfortunately, since MILO doesn't currently understand
the hwrpb bits above, we can't reliably halt the
processor and keep it halted. So just loop. */
return;
}
if (alpha_using_srm)
srm_paging_stop();
halt();
}
static void
common_shutdown(int mode, char *restart_cmd)
{
struct halt_info args;
args.mode = mode;
args.restart_cmd = restart_cmd;
on_each_cpu(common_shutdown_1, &args, 0);
}
void
machine_restart(char *restart_cmd)
{
common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
}
void
machine_halt(void)
{
common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
}
void
machine_power_off(void)
{
common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
}
/* Used by sysrq-p, among others. I don't believe r9-r15 are ever
saved in the context it's used. */
void
show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
dik_show_regs(regs, NULL);
}
/*
* Re-start a thread when doing execve()
*/
void
start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
regs->pc = pc;
regs->ps = 8;
wrusp(sp);
}
EXPORT_SYMBOL(start_thread);
/*
* Free current thread data structures etc..
*/
void
exit_thread(void)
{
}
void
flush_thread(void)
{
/* Arrange for each exec'ed process to start off with a clean slate
with respect to the FPU. This is all exceptions disabled. */
current_thread_info()->ieee_state = 0;
wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
/* Clean slate for TLS. */
current_thread_info()->pcb.unique = 0;
}
void
release_thread(struct task_struct *dead_task)
{
}
/*
* Copy an alpha thread..
*/
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg,
struct task_struct *p)
{
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
struct thread_info *childti = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs = current_pt_regs();
struct switch_stack *childstack, *stack;
childstack = ((struct switch_stack *) childregs) - 1;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = arg;
childregs->hae = alpha_mv.hae_cache,
childti->pcb.usp = 0;
return 0;
}
/* Note: if CLONE_SETTLS is not set, then we must inherit the
value from the parent, which will have been set by the block
copy in dup_task_struct. This is non-intuitive, but is
required for proper operation in the case of a threaded
application calling fork. */
if (clone_flags & CLONE_SETTLS)
childti->pcb.unique = regs->r20;
childti->pcb.usp = usp ?: rdusp();
*childregs = *regs;
childregs->r0 = 0;
childregs->r19 = 0;
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
regs->r20 = 0;
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
childstack->r26 = (unsigned long) ret_from_fork;
return 0;
}
/*
* Fill in the user structure for a ELF core dump.
*/
void
dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
{
/* switch stack follows right below pt_regs: */
struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
dest[ 0] = pt->r0;
dest[ 1] = pt->r1;
dest[ 2] = pt->r2;
dest[ 3] = pt->r3;
dest[ 4] = pt->r4;
dest[ 5] = pt->r5;
dest[ 6] = pt->r6;
dest[ 7] = pt->r7;
dest[ 8] = pt->r8;
dest[ 9] = sw->r9;
dest[10] = sw->r10;
dest[11] = sw->r11;
dest[12] = sw->r12;
dest[13] = sw->r13;
dest[14] = sw->r14;
dest[15] = sw->r15;
dest[16] = pt->r16;
dest[17] = pt->r17;
dest[18] = pt->r18;
dest[19] = pt->r19;
dest[20] = pt->r20;
dest[21] = pt->r21;
dest[22] = pt->r22;
dest[23] = pt->r23;
dest[24] = pt->r24;
dest[25] = pt->r25;
dest[26] = pt->r26;
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
since that is always 8 for usermode. Usurped for the more
useful value of the thread's UNIQUE field. */
dest[32] = ti->pcb.unique;
}
EXPORT_SYMBOL(dump_elf_thread);
int
dump_elf_task(elf_greg_t *dest, struct task_struct *task)
{
dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
return 1;
}
EXPORT_SYMBOL(dump_elf_task);
int
dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
{
struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1;
memcpy(dest, sw->fp, 32 * 8);
return 1;
}
EXPORT_SYMBOL(dump_elf_task_fp);
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
* Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
*/
unsigned long
thread_saved_pc(struct task_struct *t)
{
unsigned long base = (unsigned long)task_stack_page(t);
unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
if (sp > base && sp+6*8 < base + 16*1024) {
fp = ((unsigned long*)sp)[6];
if (fp > sp && fp < base + 16*1024)
return *(unsigned long *)fp;
}
return 0;
}
unsigned long
get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* This one depends on the frame size of schedule(). Do a
* "disass schedule" in gdb to find the frame size. Also, the
* code assumes that sleep_on() follows immediately after
* interruptible_sleep_on() and that add_timer() follows
* immediately after interruptible_sleep(). Ugly, isn't it?
* Maybe adding a wchan field to task_struct would be better,
* after all...
*/
pc = thread_saved_pc(p);
if (in_sched_functions(pc)) {
schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
}

222
arch/alpha/kernel/proto.h Normal file
View file

@ -0,0 +1,222 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/pgtable.h>
/* Prototypes of functions used across modules here in this directory. */
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vip volatile int *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
struct pt_regs;
struct task_struct;
struct pci_dev;
struct pci_controller;
/* core_apecs.c */
extern struct pci_ops apecs_pci_ops;
extern void apecs_init_arch(void);
extern void apecs_pci_clr_err(void);
extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr);
extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_cia.c */
extern struct pci_ops cia_pci_ops;
extern void cia_init_pci(void);
extern void cia_init_arch(void);
extern void pyxis_init_arch(void);
extern void cia_kill_arch(int);
extern void cia_machine_check(unsigned long vector, unsigned long la_ptr);
extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_irongate.c */
extern struct pci_ops irongate_pci_ops;
extern int irongate_pci_clr_err(void);
extern void irongate_init_arch(void);
#define irongate_pci_tbi ((void *)0)
/* core_lca.c */
extern struct pci_ops lca_pci_ops;
extern void lca_init_arch(void);
extern void lca_machine_check(unsigned long vector, unsigned long la_ptr);
extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_marvel.c */
extern struct pci_ops marvel_pci_ops;
extern void marvel_init_arch(void);
extern void marvel_kill_arch(int);
extern void marvel_machine_check(unsigned long, unsigned long);
extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
extern int marvel_pa_to_nid(unsigned long);
extern int marvel_cpuid_to_nid(int);
extern unsigned long marvel_node_mem_start(int);
extern unsigned long marvel_node_mem_size(int);
extern struct _alpha_agp_info *marvel_agp_info(void);
struct io7 *marvel_find_io7(int pe);
struct io7 *marvel_next_io7(struct io7 *prev);
void io7_clear_errors(struct io7 *io7);
/* core_mcpcia.c */
extern struct pci_ops mcpcia_pci_ops;
extern void mcpcia_init_arch(void);
extern void mcpcia_init_hoses(void);
extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr);
extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_polaris.c */
extern struct pci_ops polaris_pci_ops;
extern int polaris_read_config_dword(struct pci_dev *, int, u32 *);
extern int polaris_write_config_dword(struct pci_dev *, int, u32);
extern void polaris_init_arch(void);
extern void polaris_machine_check(unsigned long vector, unsigned long la_ptr);
#define polaris_pci_tbi ((void *)0)
/* core_t2.c */
extern struct pci_ops t2_pci_ops;
extern void t2_init_arch(void);
extern void t2_kill_arch(int);
extern void t2_machine_check(unsigned long vector, unsigned long la_ptr);
extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_titan.c */
extern struct pci_ops titan_pci_ops;
extern void titan_init_arch(void);
extern void titan_kill_arch(int);
extern void titan_machine_check(unsigned long, unsigned long);
extern void titan_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
extern struct _alpha_agp_info *titan_agp_info(void);
/* core_tsunami.c */
extern struct pci_ops tsunami_pci_ops;
extern void tsunami_init_arch(void);
extern void tsunami_kill_arch(int);
extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr);
extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
/* core_wildfire.c */
extern struct pci_ops wildfire_pci_ops;
extern void wildfire_init_arch(void);
extern void wildfire_kill_arch(int);
extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
extern int wildfire_pa_to_nid(unsigned long);
extern int wildfire_cpuid_to_nid(int);
extern unsigned long wildfire_node_mem_start(int);
extern unsigned long wildfire_node_mem_size(int);
/* console.c */
#ifdef CONFIG_VGA_HOSE
extern void find_console_vga_hose(void);
extern void locate_and_init_vga(void *(*)(void *, void *));
#else
static inline void find_console_vga_hose(void) { }
static inline void locate_and_init_vga(void *(*sel_func)(void *, void *)) { }
#endif
/* setup.c */
extern unsigned long srm_hae;
extern int boot_cpuid;
#ifdef CONFIG_VERBOSE_MCHECK
extern unsigned long alpha_verbose_mcheck;
#endif
/* srmcons.c */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
extern void register_srm_console(void);
extern void unregister_srm_console(void);
#else
#define register_srm_console()
#define unregister_srm_console()
#endif
/* smp.c */
extern void setup_smp(void);
extern void handle_ipi(struct pt_regs *);
/* bios32.c */
/* extern void reset_for_srm(void); */
/* time.c */
extern irqreturn_t rtc_timer_interrupt(int irq, void *dev);
extern void init_clockevent(void);
extern void common_init_rtc(void);
extern unsigned long est_cycle_freq;
/* smc37c93x.c */
extern void SMC93x_Init(void);
/* smc37c669.c */
extern void SMC669_Init(int);
/* es1888.c */
extern void es1888_init(void);
/* ../lib/fpreg.c */
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg (unsigned long reg);
/* head.S */
extern void wrmces(unsigned long mces);
extern void cserve_ena(unsigned long);
extern void cserve_dis(unsigned long);
extern void __smp_callin(unsigned long);
/* entry.S */
extern void entArith(void);
extern void entIF(void);
extern void entInt(void);
extern void entMM(void);
extern void entSys(void);
extern void entUna(void);
extern void entDbg(void);
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
extern int ptrace_cancel_bpt (struct task_struct *child);
/* traps.c */
extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15);
extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *);
/* sys_titan.c */
extern void titan_dispatch_irqs(u64);
/* ../mm/init.c */
extern void switch_to_system_map(void);
extern void srm_paging_stop(void);
static inline int
__alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
pgprot_t prot;
prot = __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE
| _PAGE_KWE | flags);
return ioremap_page_range(address, address + size, phys_addr, prot);
}
/* irq.c */
#ifdef CONFIG_SMP
#define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected)
#define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken)
#define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra)
#else
extern struct mcheck_info
{
unsigned char expected __attribute__((aligned(8)));
unsigned char taken;
unsigned char extra;
} __mcheck_info;
#define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected))
#define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken))
#define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra))
#endif
extern void process_mcheck_info(unsigned long vector, unsigned long la_ptr,
const char *machine, int expected);

334
arch/alpha/kernel/ptrace.c Normal file
View file

@ -0,0 +1,334 @@
/* ptrace.c */
/* By Ross Biro 1/23/92 */
/* edited by Linus Torvalds */
/* mangled further by Bob Manson (manson@santafe.edu) */
/* more mutilation by David Mosberger (davidm@azstarnet.com) */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/tracehook.h>
#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
#include "proto.h"
#define DEBUG DBG_MEM
#undef DEBUG
#ifdef DEBUG
enum {
DBG_MEM = (1<<0),
DBG_BPT = (1<<1),
DBG_MEM_ALL = (1<<2)
};
#define DBG(fac,args) {if ((fac) & DEBUG) printk args;}
#else
#define DBG(fac,args)
#endif
#define BREAKINST 0x00000080 /* call_pal bpt */
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Processes always block with the following stack-layout:
*
* +================================+ <---- task + 2*PAGE_SIZE
* | PALcode saved frame (ps, pc, | ^
* | gp, a0, a1, a2) | |
* +================================+ | struct pt_regs
* | | |
* | frame generated by SAVE_ALL | |
* | | v
* +================================+
* | | ^
* | frame saved by do_switch_stack | | struct switch_stack
* | | v
* +================================+
*/
/*
* The following table maps a register index into the stack offset at
* which the register is saved. Register indices are 0-31 for integer
* regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and
* zero have no stack-slot and need to be treated specially (see
* get_reg/put_reg below).
*/
enum {
REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
};
#define PT_REG(reg) \
(PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
#define SW_REG(reg) \
(PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+ offsetof(struct switch_stack, reg))
static int regoff[] = {
PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3),
PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7),
PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11),
SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15),
PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19),
PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23),
PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27),
PT_REG( r28), PT_REG( gp), -1, -1,
SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]),
SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]),
SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]),
SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]),
SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]),
SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]),
SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]),
SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]),
PT_REG( pc)
};
static unsigned long zero;
/*
* Get address of register REGNO in task TASK.
*/
static unsigned long *
get_reg_addr(struct task_struct * task, unsigned long regno)
{
unsigned long *addr;
if (regno == 30) {
addr = &task_thread_info(task)->pcb.usp;
} else if (regno == 65) {
addr = &task_thread_info(task)->pcb.unique;
} else if (regno == 31 || regno > 65) {
zero = 0;
addr = &zero;
} else {
addr = task_stack_page(task) + regoff[regno];
}
return addr;
}
/*
* Get contents of register REGNO in task TASK.
*/
static unsigned long
get_reg(struct task_struct * task, unsigned long regno)
{
/* Special hack for fpcr -- combine hardware and software bits. */
if (regno == 63) {
unsigned long fpcr = *get_reg_addr(task, regno);
unsigned long swcr
= task_thread_info(task)->ieee_state & IEEE_SW_MASK;
swcr = swcr_update_status(swcr, fpcr);
return fpcr | swcr;
}
return *get_reg_addr(task, regno);
}
/*
* Write contents of register REGNO in task TASK.
*/
static int
put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
{
if (regno == 63) {
task_thread_info(task)->ieee_state
= ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK)
| (data & IEEE_SW_MASK));
data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
}
*get_reg_addr(task, regno) = data;
return 0;
}
static inline int
read_int(struct task_struct *task, unsigned long addr, int * data)
{
int copied = access_process_vm(task, addr, data, sizeof(int), 0);
return (copied == sizeof(int)) ? 0 : -EIO;
}
static inline int
write_int(struct task_struct *task, unsigned long addr, int data)
{
int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
return (copied == sizeof(int)) ? 0 : -EIO;
}
/*
* Set breakpoint.
*/
int
ptrace_set_bpt(struct task_struct * child)
{
int displ, i, res, reg_b, nsaved = 0;
unsigned int insn, op_code;
unsigned long pc;
pc = get_reg(child, REG_PC);
res = read_int(child, pc, (int *) &insn);
if (res < 0)
return res;
op_code = insn >> 26;
if (op_code >= 0x30) {
/*
* It's a branch: instead of trying to figure out
* whether the branch will be taken or not, we'll put
* a breakpoint at either location. This is simpler,
* more reliable, and probably not a whole lot slower
* than the alternative approach of emulating the
* branch (emulation can be tricky for fp branches).
*/
displ = ((s32)(insn << 11)) >> 9;
task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
if (displ) /* guard against unoptimized code */
task_thread_info(child)->bpt_addr[nsaved++]
= pc + 4 + displ;
DBG(DBG_BPT, ("execing branch\n"));
} else if (op_code == 0x1a) {
reg_b = (insn >> 16) & 0x1f;
task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b);
DBG(DBG_BPT, ("execing jump\n"));
} else {
task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
DBG(DBG_BPT, ("execing normal insn\n"));
}
/* install breakpoints: */
for (i = 0; i < nsaved; ++i) {
res = read_int(child, task_thread_info(child)->bpt_addr[i],
(int *) &insn);
if (res < 0)
return res;
task_thread_info(child)->bpt_insn[i] = insn;
DBG(DBG_BPT, (" -> next_pc=%lx\n",
task_thread_info(child)->bpt_addr[i]));
res = write_int(child, task_thread_info(child)->bpt_addr[i],
BREAKINST);
if (res < 0)
return res;
}
task_thread_info(child)->bpt_nsaved = nsaved;
return 0;
}
/*
* Ensure no single-step breakpoint is pending. Returns non-zero
* value if child was being single-stepped.
*/
int
ptrace_cancel_bpt(struct task_struct * child)
{
int i, nsaved = task_thread_info(child)->bpt_nsaved;
task_thread_info(child)->bpt_nsaved = 0;
if (nsaved > 2) {
printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
nsaved = 2;
}
for (i = 0; i < nsaved; ++i) {
write_int(child, task_thread_info(child)->bpt_addr[i],
task_thread_info(child)->bpt_insn[i]);
}
return (nsaved != 0);
}
void user_enable_single_step(struct task_struct *child)
{
/* Mark single stepping. */
task_thread_info(child)->bpt_nsaved = -1;
}
void user_disable_single_step(struct task_struct *child)
{
ptrace_cancel_bpt(child);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
size_t copied;
long ret;
switch (request) {
/* When I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
break;
force_successful_syscall_return();
ret = tmp;
break;
/* Read register number ADDR. */
case PTRACE_PEEKUSR:
force_successful_syscall_return();
ret = get_reg(child, addr);
DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
break;
/* When I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the specified register */
DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
ret = put_reg(child, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage unsigned long syscall_trace_enter(void)
{
unsigned long ret = 0;
struct pt_regs *regs = current_pt_regs();
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(current_pt_regs()))
ret = -1UL;
audit_syscall_entry(regs->r0, regs->r16, regs->r17, regs->r18, regs->r19);
return ret ?: current_pt_regs()->r0;
}
asmlinkage void
syscall_trace_leave(void)
{
audit_syscall_exit(current_pt_regs());
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(current_pt_regs(), 0);
}

323
arch/alpha/kernel/rtc.c Normal file
View file

@ -0,0 +1,323 @@
/*
* linux/arch/alpha/kernel/rtc.c
*
* Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
*
* This file contains date handling.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mc146818rtc.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/rtc.h>
#include "proto.h"
/*
* Support for the RTC device.
*
* We don't want to use the rtc-cmos driver, because we don't want to support
* alarms, as that would be indistinguishable from timer interrupts.
*
* Further, generic code is really, really tied to a 1900 epoch. This is
* true in __get_rtc_time as well as the users of struct rtc_time e.g.
* rtc_tm_to_time. Thankfully all of the other epochs in use are later
* than 1900, and so it's easy to adjust.
*/
static unsigned long rtc_epoch;
static int __init
specifiy_epoch(char *str)
{
unsigned long epoch = simple_strtoul(str, NULL, 0);
if (epoch < 1900)
printk("Ignoring invalid user specified epoch %lu\n", epoch);
else
rtc_epoch = epoch;
return 1;
}
__setup("epoch=", specifiy_epoch);
static void __init
init_rtc_epoch(void)
{
int epoch, year, ctrl;
if (rtc_epoch != 0) {
/* The epoch was specified on the command-line. */
return;
}
/* Detect the epoch in use on this computer. */
ctrl = CMOS_READ(RTC_CONTROL);
year = CMOS_READ(RTC_YEAR);
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
year = bcd2bin(year);
/* PC-like is standard; used for year >= 70 */
epoch = 1900;
if (year < 20) {
epoch = 2000;
} else if (year >= 20 && year < 48) {
/* NT epoch */
epoch = 1980;
} else if (year >= 48 && year < 70) {
/* Digital UNIX epoch */
epoch = 1952;
}
rtc_epoch = epoch;
printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year);
}
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
__get_rtc_time(tm);
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create
a copy of __get_rtc_time with the edits we need. */
if (rtc_epoch != 1900) {
int year = tm->tm_year;
/* Undo the century adjustment made in __get_rtc_time. */
if (year >= 100)
year -= 100;
year += rtc_epoch - 1900;
/* Redo the century adjustment with the epoch in place. */
if (year <= 69)
year += 100;
tm->tm_year = year;
}
return rtc_valid_tm(tm);
}
static int
alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct rtc_time xtm;
if (rtc_epoch != 1900) {
xtm = *tm;
xtm.tm_year -= rtc_epoch - 1900;
tm = &xtm;
}
return __set_rtc_time(tm);
}
static int
alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
/* Note: This code only updates minutes and seconds. Comments
indicate this was to avoid messing with unknown time zones,
and with the epoch nonsense described above. In order for
this to work, the existing clock cannot be off by more than
15 minutes.
??? This choice is may be out of date. The x86 port does
not have problems with timezones, and the epoch processing has
now been fixed in alpha_set_rtc_time.
In either case, one can always force a full rtc update with
the userland hwclock program, so surely 15 minute accuracy
is no real burden. */
/* In order to set the CMOS clock precisely, we have to be called
500 ms after the second nowtime has started, because when
nowtime is written into the registers of the CMOS clock, it will
jump to the next second precisely 500 ms later. Check the Motorola
MC146818A or Dallas DS12887 data sheet for details. */
/* irq are locally disabled here */
spin_lock(&rtc_lock);
/* Tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
/* Stop and reset prescaler */
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
cmos_minutes = CMOS_READ(RTC_MINUTES);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
cmos_minutes = bcd2bin(cmos_minutes);
real_seconds = nowtime % 60;
real_minutes = nowtime / 60;
if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) {
/* correct for half hour time zone */
real_minutes += 30;
}
real_minutes %= 60;
if (abs(real_minutes - cmos_minutes) < 30) {
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
real_seconds = bin2bcd(real_seconds);
real_minutes = bin2bcd(real_minutes);
}
CMOS_WRITE(real_seconds,RTC_SECONDS);
CMOS_WRITE(real_minutes,RTC_MINUTES);
} else {
printk_once(KERN_NOTICE
"set_rtc_mmss: can't update from %d to %d\n",
cmos_minutes, real_minutes);
retval = -1;
}
/* The following flags have to be released exactly in this order,
* otherwise the DS12887 (popular MC146818A clone with integrated
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
* sheets anyway ... -- Markus Kuhn
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
spin_unlock(&rtc_lock);
return retval;
}
static int
alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case RTC_EPOCH_READ:
return put_user(rtc_epoch, (unsigned long __user *)arg);
case RTC_EPOCH_SET:
if (arg < 1900)
return -EINVAL;
rtc_epoch = arg;
return 0;
default:
return -ENOIOCTLCMD;
}
}
static const struct rtc_class_ops alpha_rtc_ops = {
.read_time = alpha_rtc_read_time,
.set_time = alpha_rtc_set_time,
.set_mmss = alpha_rtc_set_mmss,
.ioctl = alpha_rtc_ioctl,
};
/*
* Similarly, except do the actual CMOS access on the boot cpu only.
* This requires marshalling the data across an interprocessor call.
*/
#if defined(CONFIG_SMP) && \
(defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL))
# define HAVE_REMOTE_RTC 1
union remote_data {
struct rtc_time *tm;
unsigned long now;
long retval;
};
static void
do_remote_read(void *data)
{
union remote_data *x = data;
x->retval = alpha_rtc_read_time(NULL, x->tm);
}
static int
remote_read_time(struct device *dev, struct rtc_time *tm)
{
union remote_data x;
if (smp_processor_id() != boot_cpuid) {
x.tm = tm;
smp_call_function_single(boot_cpuid, do_remote_read, &x, 1);
return x.retval;
}
return alpha_rtc_read_time(NULL, tm);
}
static void
do_remote_set(void *data)
{
union remote_data *x = data;
x->retval = alpha_rtc_set_time(NULL, x->tm);
}
static int
remote_set_time(struct device *dev, struct rtc_time *tm)
{
union remote_data x;
if (smp_processor_id() != boot_cpuid) {
x.tm = tm;
smp_call_function_single(boot_cpuid, do_remote_set, &x, 1);
return x.retval;
}
return alpha_rtc_set_time(NULL, tm);
}
static void
do_remote_mmss(void *data)
{
union remote_data *x = data;
x->retval = alpha_rtc_set_mmss(NULL, x->now);
}
static int
remote_set_mmss(struct device *dev, unsigned long now)
{
union remote_data x;
if (smp_processor_id() != boot_cpuid) {
x.now = now;
smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1);
return x.retval;
}
return alpha_rtc_set_mmss(NULL, now);
}
static const struct rtc_class_ops remote_rtc_ops = {
.read_time = remote_read_time,
.set_time = remote_set_time,
.set_mmss = remote_set_mmss,
.ioctl = alpha_rtc_ioctl,
};
#endif
static int __init
alpha_rtc_init(void)
{
const struct rtc_class_ops *ops;
struct platform_device *pdev;
struct rtc_device *rtc;
const char *name;
init_rtc_epoch();
name = "rtc-alpha";
ops = &alpha_rtc_ops;
#ifdef HAVE_REMOTE_RTC
if (alpha_mv.rtc_boot_cpu_only)
ops = &remote_rtc_ops;
#endif
pdev = platform_device_register_simple(name, -1, NULL, 0);
rtc = devm_rtc_device_register(&pdev->dev, name, ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
platform_set_drvdata(pdev, rtc);
return 0;
}
device_initcall(alpha_rtc_init);

1531
arch/alpha/kernel/setup.c Normal file

File diff suppressed because it is too large Load diff

551
arch/alpha/kernel/signal.c Normal file
View file

@ -0,0 +1,551 @@
/*
* linux/arch/alpha/kernel/signal.c
*
* Copyright (C) 1995 Linus Torvalds
*
* 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
#include <linux/syscalls.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include "proto.h"
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage void ret_from_sys_call(void);
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
*/
SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
{
sigset_t oldmask;
sigset_t mask;
unsigned long res;
siginitset(&mask, newmask & _BLOCKABLE);
res = sigprocmask(how, &mask, &oldmask);
if (!res) {
force_successful_syscall_return();
res = oldmask.sig[0];
}
return res;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
const struct osf_sigaction __user *, act,
struct osf_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
}
SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact,
size_t, sigsetsize, void __user *, restorer)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (act) {
new_ka.ka_restorer = restorer;
if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
return -EFAULT;
}
return ret;
}
/*
* Do a signal return; undo the signal stack.
*/
#if _NSIG_WORDS > 1
# error "Non SA_SIGINFO frame needs rearranging"
#endif
struct sigframe
{
struct sigcontext sc;
unsigned int retcode[3];
};
struct rt_sigframe
{
struct siginfo info;
struct ucontext uc;
unsigned int retcode[3];
};
/* If this changes, userland unwinders that Know Things about our signal
frame will break. Do not undertake lightly. It also implies an ABI
change wrt the size of siginfo_t, which may cause some pain. */
extern char compile_time_assert
[offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1];
#define INSN_MOV_R30_R16 0x47fe0410
#define INSN_LDI_R0 0x201f0000
#define INSN_CALLSYS 0x00000083
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
unsigned long usp;
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long i, err = __get_user(regs->pc, &sc->sc_pc);
current_thread_info()->restart_block.fn = do_no_restart_syscall;
sw->r26 = (unsigned long) ret_from_sys_call;
err |= __get_user(regs->r0, sc->sc_regs+0);
err |= __get_user(regs->r1, sc->sc_regs+1);
err |= __get_user(regs->r2, sc->sc_regs+2);
err |= __get_user(regs->r3, sc->sc_regs+3);
err |= __get_user(regs->r4, sc->sc_regs+4);
err |= __get_user(regs->r5, sc->sc_regs+5);
err |= __get_user(regs->r6, sc->sc_regs+6);
err |= __get_user(regs->r7, sc->sc_regs+7);
err |= __get_user(regs->r8, sc->sc_regs+8);
err |= __get_user(sw->r9, sc->sc_regs+9);
err |= __get_user(sw->r10, sc->sc_regs+10);
err |= __get_user(sw->r11, sc->sc_regs+11);
err |= __get_user(sw->r12, sc->sc_regs+12);
err |= __get_user(sw->r13, sc->sc_regs+13);
err |= __get_user(sw->r14, sc->sc_regs+14);
err |= __get_user(sw->r15, sc->sc_regs+15);
err |= __get_user(regs->r16, sc->sc_regs+16);
err |= __get_user(regs->r17, sc->sc_regs+17);
err |= __get_user(regs->r18, sc->sc_regs+18);
err |= __get_user(regs->r19, sc->sc_regs+19);
err |= __get_user(regs->r20, sc->sc_regs+20);
err |= __get_user(regs->r21, sc->sc_regs+21);
err |= __get_user(regs->r22, sc->sc_regs+22);
err |= __get_user(regs->r23, sc->sc_regs+23);
err |= __get_user(regs->r24, sc->sc_regs+24);
err |= __get_user(regs->r25, sc->sc_regs+25);
err |= __get_user(regs->r26, sc->sc_regs+26);
err |= __get_user(regs->r27, sc->sc_regs+27);
err |= __get_user(regs->r28, sc->sc_regs+28);
err |= __get_user(regs->gp, sc->sc_regs+29);
err |= __get_user(usp, sc->sc_regs+30);
wrusp(usp);
for (i = 0; i < 31; i++)
err |= __get_user(sw->fp[i], sc->sc_fpregs+i);
err |= __get_user(sw->fp[31], &sc->sc_fpcr);
return err;
}
/* Note that this syscall is also used by setcontext(3) to install
a given sigcontext. This because it's impossible to set *all*
registers and transfer control from userland. */
asmlinkage void
do_sigreturn(struct sigcontext __user *sc)
{
struct pt_regs *regs = current_pt_regs();
sigset_t set;
/* Verify that it's a good sigcontext before using it */
if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
goto give_sigsegv;
if (__get_user(set.sig[0], &sc->sc_mask))
goto give_sigsegv;
set_current_blocked(&set);
if (restore_sigcontext(sc, regs))
goto give_sigsegv;
/* Send SIGTRAP if we're single-stepping: */
if (ptrace_cancel_bpt (current)) {
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) regs->pc;
info.si_trapno = 0;
send_sig_info(SIGTRAP, &info, current);
}
return;
give_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void
do_rt_sigreturn(struct rt_sigframe __user *frame)
{
struct pt_regs *regs = current_pt_regs();
sigset_t set;
/* Verify that it's a good ucontext_t before using it */
if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc)))
goto give_sigsegv;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto give_sigsegv;
set_current_blocked(&set);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
/* Send SIGTRAP if we're single-stepping: */
if (ptrace_cancel_bpt (current)) {
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) regs->pc;
info.si_trapno = 0;
send_sig_info(SIGTRAP, &info, current);
}
return;
give_sigsegv:
force_sig(SIGSEGV, current);
}
/*
* Set up a signal frame.
*/
static inline void __user *
get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size)
{
return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul);
}
static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask, unsigned long sp)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long i, err = 0;
err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack);
err |= __put_user(mask, &sc->sc_mask);
err |= __put_user(regs->pc, &sc->sc_pc);
err |= __put_user(8, &sc->sc_ps);
err |= __put_user(regs->r0 , sc->sc_regs+0);
err |= __put_user(regs->r1 , sc->sc_regs+1);
err |= __put_user(regs->r2 , sc->sc_regs+2);
err |= __put_user(regs->r3 , sc->sc_regs+3);
err |= __put_user(regs->r4 , sc->sc_regs+4);
err |= __put_user(regs->r5 , sc->sc_regs+5);
err |= __put_user(regs->r6 , sc->sc_regs+6);
err |= __put_user(regs->r7 , sc->sc_regs+7);
err |= __put_user(regs->r8 , sc->sc_regs+8);
err |= __put_user(sw->r9 , sc->sc_regs+9);
err |= __put_user(sw->r10 , sc->sc_regs+10);
err |= __put_user(sw->r11 , sc->sc_regs+11);
err |= __put_user(sw->r12 , sc->sc_regs+12);
err |= __put_user(sw->r13 , sc->sc_regs+13);
err |= __put_user(sw->r14 , sc->sc_regs+14);
err |= __put_user(sw->r15 , sc->sc_regs+15);
err |= __put_user(regs->r16, sc->sc_regs+16);
err |= __put_user(regs->r17, sc->sc_regs+17);
err |= __put_user(regs->r18, sc->sc_regs+18);
err |= __put_user(regs->r19, sc->sc_regs+19);
err |= __put_user(regs->r20, sc->sc_regs+20);
err |= __put_user(regs->r21, sc->sc_regs+21);
err |= __put_user(regs->r22, sc->sc_regs+22);
err |= __put_user(regs->r23, sc->sc_regs+23);
err |= __put_user(regs->r24, sc->sc_regs+24);
err |= __put_user(regs->r25, sc->sc_regs+25);
err |= __put_user(regs->r26, sc->sc_regs+26);
err |= __put_user(regs->r27, sc->sc_regs+27);
err |= __put_user(regs->r28, sc->sc_regs+28);
err |= __put_user(regs->gp , sc->sc_regs+29);
err |= __put_user(sp, sc->sc_regs+30);
err |= __put_user(0, sc->sc_regs+31);
for (i = 0; i < 31; i++)
err |= __put_user(sw->fp[i], sc->sc_fpregs+i);
err |= __put_user(0, sc->sc_fpregs+31);
err |= __put_user(sw->fp[31], &sc->sc_fpcr);
err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0);
err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1);
err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2);
return err;
}
static int
setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
unsigned long oldsp, r26, err = 0;
struct sigframe __user *frame;
oldsp = rdusp();
frame = get_sigframe(ksig, oldsp, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp);
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
r26 = (unsigned long) ksig->ka.ka_restorer;
if (!r26) {
err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0);
err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1);
err |= __put_user(INSN_CALLSYS, frame->retcode+2);
imb();
r26 = (unsigned long) frame->retcode;
}
/* Check that everything was written properly. */
if (err)
return err;
/* "Return" to the handler */
regs->r26 = r26;
regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
regs->r16 = ksig->sig; /* a0: signal number */
regs->r17 = 0; /* a1: exception code */
regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */
wrusp((unsigned long) frame);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
current->comm, current->pid, frame, regs->pc, regs->r26);
#endif
return 0;
}
static int
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
unsigned long oldsp, r26, err = 0;
struct rt_sigframe __user *frame;
oldsp = rdusp();
frame = get_sigframe(ksig, oldsp, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT;
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask);
err |= __save_altstack(&frame->uc.uc_stack, oldsp);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs,
set->sig[0], oldsp);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
r26 = (unsigned long) ksig->ka.ka_restorer;
if (!r26) {
err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0);
err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn,
frame->retcode+1);
err |= __put_user(INSN_CALLSYS, frame->retcode+2);
imb();
r26 = (unsigned long) frame->retcode;
}
if (err)
return -EFAULT;
/* "Return" to the handler */
regs->r26 = r26;
regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
regs->r16 = ksig->sig; /* a0: signal number */
regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */
regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */
wrusp((unsigned long) frame);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
current->comm, current->pid, frame, regs->pc, regs->r26);
#endif
return 0;
}
/*
* OK, we're invoking a handler.
*/
static inline void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, 0);
}
static inline void
syscall_restart(unsigned long r0, unsigned long r19,
struct pt_regs *regs, struct k_sigaction *ka)
{
switch (regs->r0) {
case ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
case ERESTARTNOHAND:
regs->r0 = EINTR;
break;
}
/* fallthrough */
case ERESTARTNOINTR:
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
regs->r0 = EINTR;
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*
* "r0" and "r19" are the registers we need to restore for system call
* restart. "r0" is also used as an indicator whether we can restart at
* all (if we get here from anything but a syscall return, it will be 0)
*/
static void
do_signal(struct pt_regs *regs, unsigned long r0, unsigned long r19)
{
unsigned long single_stepping = ptrace_cancel_bpt(current);
struct ksignal ksig;
/* This lets the debugger run, ... */
if (get_signal(&ksig)) {
/* ... so re-check the single stepping. */
single_stepping |= ptrace_cancel_bpt(current);
/* Whee! Actually deliver the signal. */
if (r0)
syscall_restart(r0, r19, regs, &ksig.ka);
handle_signal(&ksig, regs);
} else {
single_stepping |= ptrace_cancel_bpt(current);
if (r0) {
switch (regs->r0) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
/* Reset v0 and a3 and replay syscall. */
regs->r0 = r0;
regs->r19 = r19;
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
/* Set v0 to the restart_syscall and replay */
regs->r0 = __NR_restart_syscall;
regs->pc -= 4;
break;
}
}
restore_saved_sigmask();
}
if (single_stepping)
ptrace_set_bpt(current); /* re-set breakpoint */
}
void
do_work_pending(struct pt_regs *regs, unsigned long thread_flags,
unsigned long r0, unsigned long r19)
{
do {
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
} else {
local_irq_enable();
if (thread_flags & _TIF_SIGPENDING) {
do_signal(regs, r0, r19);
r0 = 0;
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
} while (thread_flags & _TIF_WORK_MASK);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,274 @@
/*
* SMC 37C93X initialization code
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/hwrpb.h>
#include <asm/io.h>
#include <asm/segment.h>
#define SMC_DEBUG 0
#if SMC_DEBUG
# define DBG_DEVS(args) printk args
#else
# define DBG_DEVS(args)
#endif
#define KB 1024
#define MB (1024*KB)
#define GB (1024*MB)
/* device "activate" register contents */
#define DEVICE_ON 1
#define DEVICE_OFF 0
/* configuration on/off keys */
#define CONFIG_ON_KEY 0x55
#define CONFIG_OFF_KEY 0xaa
/* configuration space device definitions */
#define FDC 0
#define IDE1 1
#define IDE2 2
#define PARP 3
#define SER1 4
#define SER2 5
#define RTCL 6
#define KYBD 7
#define AUXIO 8
/* Chip register offsets from base */
#define CONFIG_CONTROL 0x02
#define INDEX_ADDRESS 0x03
#define LOGICAL_DEVICE_NUMBER 0x07
#define DEVICE_ID 0x20
#define DEVICE_REV 0x21
#define POWER_CONTROL 0x22
#define POWER_MGMT 0x23
#define OSC 0x24
#define ACTIVATE 0x30
#define ADDR_HI 0x60
#define ADDR_LO 0x61
#define INTERRUPT_SEL 0x70
#define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */
#define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */
#define FDD_MODE_REGISTER 0x90
#define FDD_OPTION_REGISTER 0x91
/* values that we read back that are expected ... */
#define VALID_DEVICE_ID 2
/* default device addresses */
#define KYBD_INTERRUPT 1
#define MOUS_INTERRUPT 12
#define COM2_BASE 0x2f8
#define COM2_INTERRUPT 3
#define COM1_BASE 0x3f8
#define COM1_INTERRUPT 4
#define PARP_BASE 0x3bc
#define PARP_INTERRUPT 7
static unsigned long __init SMCConfigState(unsigned long baseAddr)
{
unsigned char devId;
unsigned long configPort;
unsigned long indexPort;
unsigned long dataPort;
int i;
configPort = indexPort = baseAddr;
dataPort = configPort + 1;
#define NUM_RETRIES 5
for (i = 0; i < NUM_RETRIES; i++)
{
outb(CONFIG_ON_KEY, configPort);
outb(CONFIG_ON_KEY, configPort);
outb(DEVICE_ID, indexPort);
devId = inb(dataPort);
if (devId == VALID_DEVICE_ID) {
outb(DEVICE_REV, indexPort);
/* unsigned char devRev = */ inb(dataPort);
break;
}
else
udelay(100);
}
return (i != NUM_RETRIES) ? baseAddr : 0L;
}
static void __init SMCRunState(unsigned long baseAddr)
{
outb(CONFIG_OFF_KEY, baseAddr);
}
static unsigned long __init SMCDetectUltraIO(void)
{
unsigned long baseAddr;
baseAddr = 0x3F0;
if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) {
return( baseAddr );
}
baseAddr = 0x370;
if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) {
return( baseAddr );
}
return( ( unsigned long )0 );
}
static void __init SMCEnableDevice(unsigned long baseAddr,
unsigned long device,
unsigned long portaddr,
unsigned long interrupt)
{
unsigned long indexPort;
unsigned long dataPort;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(device, dataPort);
outb(ADDR_LO, indexPort);
outb(( portaddr & 0xFF ), dataPort);
outb(ADDR_HI, indexPort);
outb((portaddr >> 8) & 0xFF, dataPort);
outb(INTERRUPT_SEL, indexPort);
outb(interrupt, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
static void __init SMCEnableKYBD(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(KYBD, dataPort);
outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
outb(KYBD_INTERRUPT, dataPort);
outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */
outb(MOUS_INTERRUPT, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
static void __init SMCEnableFDC(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
unsigned char oldValue;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(LOGICAL_DEVICE_NUMBER, indexPort);
outb(FDC, dataPort);
outb(FDD_MODE_REGISTER, indexPort);
oldValue = inb(dataPort);
oldValue |= 0x0E; /* Enable burst mode */
outb(oldValue, dataPort);
outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */
outb(0x06, dataPort );
outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */
outb(0x02, dataPort);
outb(ACTIVATE, indexPort);
outb(DEVICE_ON, dataPort);
}
#if SMC_DEBUG
static void __init SMCReportDeviceStatus(unsigned long baseAddr)
{
unsigned long indexPort;
unsigned long dataPort;
unsigned char currentControl;
indexPort = baseAddr;
dataPort = baseAddr + 1;
outb(POWER_CONTROL, indexPort);
currentControl = inb(dataPort);
printk(currentControl & (1 << FDC)
? "\t+FDC Enabled\n" : "\t-FDC Disabled\n");
printk(currentControl & (1 << IDE1)
? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n");
printk(currentControl & (1 << IDE2)
? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n");
printk(currentControl & (1 << PARP)
? "\t+PARP Enabled\n" : "\t-PARP Disabled\n");
printk(currentControl & (1 << SER1)
? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n");
printk(currentControl & (1 << SER2)
? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n");
printk( "\n" );
}
#endif
int __init SMC93x_Init(void)
{
unsigned long SMCUltraBase;
unsigned long flags;
local_irq_save(flags);
if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) {
#if SMC_DEBUG
SMCReportDeviceStatus(SMCUltraBase);
#endif
SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: SER1 done\n"));
SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: SER2 done\n"));
SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT);
DBG_DEVS(("SMC FDC37C93X: PARP done\n"));
/* On PC164, IDE on the SMC is not enabled;
CMD646 (PCI) on MB */
SMCEnableKYBD(SMCUltraBase);
DBG_DEVS(("SMC FDC37C93X: KYB done\n"));
SMCEnableFDC(SMCUltraBase);
DBG_DEVS(("SMC FDC37C93X: FDC done\n"));
#if SMC_DEBUG
SMCReportDeviceStatus(SMCUltraBase);
#endif
SMCRunState(SMCUltraBase);
local_irq_restore(flags);
printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n",
SMCUltraBase);
return 1;
}
else {
local_irq_restore(flags);
DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n"));
return 0;
}
}

785
arch/alpha/kernel/smp.c Normal file
View file

@ -0,0 +1,785 @@
/*
* linux/arch/alpha/kernel/smp.c
*
* 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
* Renamed modified smp_call_function to smp_call_function_on_cpu()
* Created an function that conforms to the old calling convention
* of smp_call_function().
*
* This is helpful for DCPI.
*
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <asm/hwrpb.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#define DEBUG_SMP 0
#if DEBUG_SMP
#define DBGS(args) printk args
#else
#define DBGS(args)
#endif
/* A collection of per-processor data. */
struct cpuinfo_alpha cpu_data[NR_CPUS];
EXPORT_SYMBOL(cpu_data);
/* A collection of single bit ipi messages. */
static struct {
unsigned long bits ____cacheline_aligned;
} ipi_data[NR_CPUS] __cacheline_aligned;
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
/* Set to a secondary's cpuid when it comes online. */
static int smp_secondary_alive = 0;
int smp_num_probed; /* Internal processor count */
int smp_num_cpus = 1; /* Number that came online. */
EXPORT_SYMBOL(smp_num_cpus);
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
static inline void __init
smp_store_cpu_info(int cpuid)
{
cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
cpu_data[cpuid].need_new_asn = 0;
cpu_data[cpuid].asn_lock = 0;
}
/*
* Ideally sets up per-cpu profiling hooks. Doesn't do much now...
*/
static inline void __init
smp_setup_percpu_timer(int cpuid)
{
cpu_data[cpuid].prof_counter = 1;
cpu_data[cpuid].prof_multiplier = 1;
}
static void __init
wait_boot_cpu_to_stop(int cpuid)
{
unsigned long stop = jiffies + 10*HZ;
while (time_before(jiffies, stop)) {
if (!smp_secondary_alive)
return;
barrier();
}
printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
for (;;)
barrier();
}
/*
* Where secondaries begin a life of C.
*/
void
smp_callin(void)
{
int cpuid = hard_smp_processor_id();
if (cpu_online(cpuid)) {
printk("??, cpu 0x%x already present??\n", cpuid);
BUG();
}
set_cpu_online(cpuid, true);
/* Turn on machine checks. */
wrmces(7);
/* Set trap vectors. */
trap_init();
/* Set interrupt vector. */
wrent(entInt, 0);
/* Get our local ticker going. */
smp_setup_percpu_timer(cpuid);
init_clockevent();
/* Call platform-specific callin, if specified */
if (alpha_mv.smp_callin)
alpha_mv.smp_callin();
/* All kernel threads share the same mm context. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
notify_cpu_starting(cpuid);
/* Must have completely accurate bogos. */
local_irq_enable();
/* Wait boot CPU to stop with irq enabled before running
calibrate_delay. */
wait_boot_cpu_to_stop(cpuid);
mb();
calibrate_delay();
smp_store_cpu_info(cpuid);
/* Allow master to continue only after we written loops_per_jiffy. */
wmb();
smp_secondary_alive = 1;
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
cpuid, current, current->active_mm));
preempt_disable();
cpu_startup_entry(CPUHP_ONLINE);
}
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
static int
wait_for_txrdy (unsigned long cpumask)
{
unsigned long timeout;
if (!(hwrpb->txrdy & cpumask))
return 0;
timeout = jiffies + 10*HZ;
while (time_before(jiffies, timeout)) {
if (!(hwrpb->txrdy & cpumask))
return 0;
udelay(10);
barrier();
}
return -1;
}
/*
* Send a message to a secondary's console. "START" is one such
* interesting message. ;-)
*/
static void
send_secondary_console_msg(char *str, int cpuid)
{
struct percpu_struct *cpu;
register char *cp1, *cp2;
unsigned long cpumask;
size_t len;
cpu = (struct percpu_struct *)
((char*)hwrpb
+ hwrpb->processor_offset
+ cpuid * hwrpb->processor_size);
cpumask = (1UL << cpuid);
if (wait_for_txrdy(cpumask))
goto timeout;
cp2 = str;
len = strlen(cp2);
*(unsigned int *)&cpu->ipc_buffer[0] = len;
cp1 = (char *) &cpu->ipc_buffer[1];
memcpy(cp1, cp2, len);
/* atomic test and set */
wmb();
set_bit(cpuid, &hwrpb->rxrdy);
if (wait_for_txrdy(cpumask))
goto timeout;
return;
timeout:
printk("Processor %x not ready\n", cpuid);
}
/*
* A secondary console wants to send a message. Receive it.
*/
static void
recv_secondary_console_msg(void)
{
int mycpu, i, cnt;
unsigned long txrdy = hwrpb->txrdy;
char *cp1, *cp2, buf[80];
struct percpu_struct *cpu;
DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
mycpu = hard_smp_processor_id();
for (i = 0; i < NR_CPUS; i++) {
if (!(txrdy & (1UL << i)))
continue;
DBGS(("recv_secondary_console_msg: "
"TXRDY contains CPU %d.\n", i));
cpu = (struct percpu_struct *)
((char*)hwrpb
+ hwrpb->processor_offset
+ i * hwrpb->processor_size);
DBGS(("recv_secondary_console_msg: on %d from %d"
" HALT_REASON 0x%lx FLAGS 0x%lx\n",
mycpu, i, cpu->halt_reason, cpu->flags));
cnt = cpu->ipc_buffer[0] >> 32;
if (cnt <= 0 || cnt >= 80)
strcpy(buf, "<<< BOGUS MSG >>>");
else {
cp1 = (char *) &cpu->ipc_buffer[1];
cp2 = buf;
memcpy(cp2, cp1, cnt);
cp2[cnt] = '\0';
while ((cp2 = strchr(cp2, '\r')) != 0) {
*cp2 = ' ';
if (cp2[1] == '\n')
cp2[1] = ' ';
}
}
DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
"message is '%s'\n", mycpu, buf));
}
hwrpb->txrdy = 0;
}
/*
* Convince the console to have a secondary cpu begin execution.
*/
static int
secondary_cpu_start(int cpuid, struct task_struct *idle)
{
struct percpu_struct *cpu;
struct pcb_struct *hwpcb, *ipcb;
unsigned long timeout;
cpu = (struct percpu_struct *)
((char*)hwrpb
+ hwrpb->processor_offset
+ cpuid * hwrpb->processor_size);
hwpcb = (struct pcb_struct *) cpu->hwpcb;
ipcb = &task_thread_info(idle)->pcb;
/* Initialize the CPU's HWPCB to something just good enough for
us to get started. Immediately after starting, we'll swpctx
to the target idle task's pcb. Reuse the stack in the mean
time. Precalculate the target PCBB. */
hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
hwpcb->usp = 0;
hwpcb->ptbr = ipcb->ptbr;
hwpcb->pcc = 0;
hwpcb->asn = 0;
hwpcb->unique = virt_to_phys(ipcb);
hwpcb->flags = ipcb->flags;
hwpcb->res1 = hwpcb->res2 = 0;
#if 0
DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
#endif
DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
cpuid, idle->state, ipcb->flags));
/* Setup HWRPB fields that SRM uses to activate secondary CPU */
hwrpb->CPU_restart = __smp_callin;
hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
/* Recalculate and update the HWRPB checksum */
hwrpb_update_checksum(hwrpb);
/*
* Send a "start" command to the specified processor.
*/
/* SRM III 3.4.1.3 */
cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
cpu->flags &= ~1; /* turn off Bootstrap In Progress */
wmb();
send_secondary_console_msg("START\r\n", cpuid);
/* Wait 10 seconds for an ACK from the console. */
timeout = jiffies + 10*HZ;
while (time_before(jiffies, timeout)) {
if (cpu->flags & 1)
goto started;
udelay(10);
barrier();
}
printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
return -1;
started:
DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
return 0;
}
/*
* Bring one cpu online.
*/
static int
smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
unsigned long timeout;
/* Signal the secondary to wait a moment. */
smp_secondary_alive = -1;
/* Whirrr, whirrr, whirrrrrrrrr... */
if (secondary_cpu_start(cpuid, idle))
return -1;
/* Notify the secondary CPU it can run calibrate_delay. */
mb();
smp_secondary_alive = 0;
/* We've been acked by the console; wait one second for
the task to start up for real. */
timeout = jiffies + 1*HZ;
while (time_before(jiffies, timeout)) {
if (smp_secondary_alive == 1)
goto alive;
udelay(10);
barrier();
}
/* We failed to boot the CPU. */
printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
return -1;
alive:
/* Another "Red Snapper". */
return 0;
}
/*
* Called from setup_arch. Detect an SMP system and which processors
* are present.
*/
void __init
setup_smp(void)
{
struct percpu_struct *cpubase, *cpu;
unsigned long i;
if (boot_cpuid != 0) {
printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
boot_cpuid);
}
if (hwrpb->nr_processors > 1) {
int boot_cpu_palrev;
DBGS(("setup_smp: nr_processors %ld\n",
hwrpb->nr_processors));
cpubase = (struct percpu_struct *)
((char*)hwrpb + hwrpb->processor_offset);
boot_cpu_palrev = cpubase->pal_revision;
for (i = 0; i < hwrpb->nr_processors; i++) {
cpu = (struct percpu_struct *)
((char *)cpubase + i*hwrpb->processor_size);
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
set_cpu_possible(i, true);
set_cpu_present(i, true);
cpu->pal_revision = boot_cpu_palrev;
}
DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
i, cpu->flags, cpu->type));
DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
i, cpu->pal_revision));
}
} else {
smp_num_probed = 1;
}
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
}
/*
* Called by smp_init prepare the secondaries
*/
void __init
smp_prepare_cpus(unsigned int max_cpus)
{
/* Take care of some initial bookkeeping. */
memset(ipi_data, 0, sizeof(ipi_data));
current_thread_info()->cpu = boot_cpuid;
smp_store_cpu_info(boot_cpuid);
smp_setup_percpu_timer(boot_cpuid);
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
init_cpu_possible(cpumask_of(boot_cpuid));
init_cpu_present(cpumask_of(boot_cpuid));
printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
printk(KERN_INFO "SMP starting up secondaries.\n");
smp_num_cpus = smp_num_probed;
}
void
smp_prepare_boot_cpu(void)
{
}
int
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
void __init
smp_cpus_done(unsigned int max_cpus)
{
int cpu;
unsigned long bogosum = 0;
for(cpu = 0; cpu < NR_CPUS; cpu++)
if (cpu_online(cpu))
bogosum += cpu_data[cpu].loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
num_online_cpus(),
(bogosum + 2500) / (500000/HZ),
((bogosum + 2500) / (5000/HZ)) % 100);
}
int
setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
static void
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
int i;
mb();
for_each_cpu(i, to_whom)
set_bit(operation, &ipi_data[i].bits);
mb();
for_each_cpu(i, to_whom)
wripir(i);
}
void
handle_ipi(struct pt_regs *regs)
{
int this_cpu = smp_processor_id();
unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
unsigned long ops;
#if 0
DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
this_cpu, *pending_ipis, regs->pc));
#endif
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
mb(); /* Order bit clearing and data access. */
do {
unsigned long which;
which = ops & -ops;
ops &= ~which;
which = __ffs(which);
switch (which) {
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP:
halt();
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
this_cpu, which);
break;
}
} while (ops);
mb(); /* Order data access and bit testing. */
}
cpu_data[this_cpu].ipi_count++;
if (hwrpb->txrdy)
recv_secondary_console_msg();
}
void
smp_send_reschedule(int cpu)
{
#ifdef DEBUG_IPI_MSG
if (cpu == hard_smp_processor_id())
printk(KERN_WARNING
"smp_send_reschedule: Sending IPI to self.\n");
#endif
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void
smp_send_stop(void)
{
cpumask_t to_whom;
cpumask_copy(&to_whom, cpu_possible_mask);
cpumask_clear_cpu(smp_processor_id(), &to_whom);
#ifdef DEBUG_IPI_MSG
if (hard_smp_processor_id() != boot_cpu_id)
printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
#endif
send_ipi_message(&to_whom, IPI_CPU_STOP);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static void
ipi_imb(void *ignored)
{
imb();
}
void
smp_imb(void)
{
/* Must wait other processors to flush their icache before continue. */
if (on_each_cpu(ipi_imb, NULL, 1))
printk(KERN_CRIT "smp_imb: timed out\n");
}
EXPORT_SYMBOL(smp_imb);
static void
ipi_flush_tlb_all(void *ignored)
{
tbia();
}
void
flush_tlb_all(void)
{
/* Although we don't have any data to pass, we do want to
synchronize with the other processors. */
if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
printk(KERN_CRIT "flush_tlb_all: timed out\n");
}
}
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
static void
ipi_flush_tlb_mm(void *x)
{
struct mm_struct *mm = (struct mm_struct *) x;
if (mm == current->active_mm && !asn_locked())
flush_tlb_current(mm);
else
flush_tlb_other(mm);
}
void
flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if (mm == current->active_mm) {
flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
continue;
if (mm->context[cpu])
mm->context[cpu] = 0;
}
preempt_enable();
return;
}
}
if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
}
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);
struct flush_tlb_page_struct {
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long addr;
};
static void
ipi_flush_tlb_page(void *x)
{
struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
struct mm_struct * mm = data->mm;
if (mm == current->active_mm && !asn_locked())
flush_tlb_current_page(mm, data->vma, data->addr);
else
flush_tlb_other(mm);
}
void
flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
continue;
if (mm->context[cpu])
mm->context[cpu] = 0;
}
preempt_enable();
return;
}
}
data.vma = vma;
data.mm = mm;
data.addr = addr;
if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);
void
flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
/* On the Alpha we always flush the whole user tlb. */
flush_tlb_mm(vma->vm_mm);
}
EXPORT_SYMBOL(flush_tlb_range);
static void
ipi_flush_icache_page(void *x)
{
struct mm_struct *mm = (struct mm_struct *) x;
if (mm == current->active_mm && !asn_locked())
__load_new_mm_context(mm);
else
flush_tlb_other(mm);
}
void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
struct mm_struct *mm = vma->vm_mm;
if ((vma->vm_flags & VM_EXEC) == 0)
return;
preempt_disable();
if (mm == current->active_mm) {
__load_new_mm_context(mm);
if (atomic_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
continue;
if (mm->context[cpu])
mm->context[cpu] = 0;
}
preempt_enable();
return;
}
}
if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
printk(KERN_CRIT "flush_icache_page: timed out\n");
}
preempt_enable();
}

235
arch/alpha/kernel/srm_env.c Normal file
View file

@ -0,0 +1,235 @@
/*
* srm_env.c - Access to SRM environment
* variables through linux' procfs
*
* (C) 2001,2002,2006 by Jan-Benedict Glaw <jbglaw@lug-owl.de>
*
* This driver is a modified version of Erik Mouw's example proc
* interface, so: thank you, Erik! He can be reached via email at
* <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
* provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
* included a patch like this as well. Thanks for idea!
*
* This program is free software; you can redistribute
* it and/or modify it under the terms of the GNU General
* Public License version 2 as published by the Free Software
* Foundation.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place,
* Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/machvec.h>
#define BASE_DIR "srm_environment" /* Subdir in /proc/ */
#define NAMED_DIR "named_variables" /* Subdir for known variables */
#define NUMBERED_DIR "numbered_variables" /* Subdir for all variables */
#define VERSION "0.0.6" /* Module version */
#define NAME "srm_env" /* Module name */
MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>");
MODULE_DESCRIPTION("Accessing Alpha SRM environment through procfs interface");
MODULE_LICENSE("GPL");
typedef struct _srm_env {
char *name;
unsigned long id;
} srm_env_t;
static struct proc_dir_entry *base_dir;
static struct proc_dir_entry *named_dir;
static struct proc_dir_entry *numbered_dir;
static srm_env_t srm_named_entries[] = {
{ "auto_action", ENV_AUTO_ACTION },
{ "boot_dev", ENV_BOOT_DEV },
{ "bootdef_dev", ENV_BOOTDEF_DEV },
{ "booted_dev", ENV_BOOTED_DEV },
{ "boot_file", ENV_BOOT_FILE },
{ "booted_file", ENV_BOOTED_FILE },
{ "boot_osflags", ENV_BOOT_OSFLAGS },
{ "booted_osflags", ENV_BOOTED_OSFLAGS },
{ "boot_reset", ENV_BOOT_RESET },
{ "dump_dev", ENV_DUMP_DEV },
{ "enable_audit", ENV_ENABLE_AUDIT },
{ "license", ENV_LICENSE },
{ "char_set", ENV_CHAR_SET },
{ "language", ENV_LANGUAGE },
{ "tty_dev", ENV_TTY_DEV },
{ NULL, 0 },
};
static int srm_env_proc_show(struct seq_file *m, void *v)
{
unsigned long ret;
unsigned long id = (unsigned long)m->private;
char *page;
page = (char *)__get_free_page(GFP_USER);
if (!page)
return -ENOMEM;
ret = callback_getenv(id, page, PAGE_SIZE);
if ((ret >> 61) == 0) {
seq_write(m, page, ret);
ret = 0;
} else
ret = -EFAULT;
free_page((unsigned long)page);
return ret;
}
static int srm_env_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, srm_env_proc_show, PDE_DATA(inode));
}
static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int res;
unsigned long id = (unsigned long)PDE_DATA(file_inode(file));
char *buf = (char *) __get_free_page(GFP_USER);
unsigned long ret1, ret2;
if (!buf)
return -ENOMEM;
res = -EINVAL;
if (count >= PAGE_SIZE)
goto out;
res = -EFAULT;
if (copy_from_user(buf, buffer, count))
goto out;
buf[count] = '\0';
ret1 = callback_setenv(id, buf, count);
if ((ret1 >> 61) == 0) {
do
ret2 = callback_save_env();
while((ret2 >> 61) == 1);
res = (int) ret1;
}
out:
free_page((unsigned long)buf);
return res;
}
static const struct file_operations srm_env_proc_fops = {
.owner = THIS_MODULE,
.open = srm_env_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = srm_env_proc_write,
};
static int __init
srm_env_init(void)
{
srm_env_t *entry;
unsigned long var_num;
/*
* Check system
*/
if (!alpha_using_srm) {
printk(KERN_INFO "%s: This Alpha system doesn't "
"know about SRM (or you've booted "
"SRM->MILO->Linux, which gets "
"misdetected)...\n", __func__);
return -ENODEV;
}
/*
* Create base directory
*/
base_dir = proc_mkdir(BASE_DIR, NULL);
if (!base_dir) {
printk(KERN_ERR "Couldn't create base dir /proc/%s\n",
BASE_DIR);
return -ENOMEM;
}
/*
* Create per-name subdirectory
*/
named_dir = proc_mkdir(NAMED_DIR, base_dir);
if (!named_dir) {
printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n",
BASE_DIR, NAMED_DIR);
goto cleanup;
}
/*
* Create per-number subdirectory
*/
numbered_dir = proc_mkdir(NUMBERED_DIR, base_dir);
if (!numbered_dir) {
printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n",
BASE_DIR, NUMBERED_DIR);
goto cleanup;
}
/*
* Create all named nodes
*/
entry = srm_named_entries;
while (entry->name && entry->id) {
if (!proc_create_data(entry->name, 0644, named_dir,
&srm_env_proc_fops, (void *)entry->id))
goto cleanup;
entry++;
}
/*
* Create all numbered nodes
*/
for (var_num = 0; var_num <= 255; var_num++) {
char name[4];
sprintf(name, "%ld", var_num);
if (!proc_create_data(name, 0644, numbered_dir,
&srm_env_proc_fops, (void *)var_num))
goto cleanup;
}
printk(KERN_INFO "%s: version %s loaded successfully\n", NAME,
VERSION);
return 0;
cleanup:
remove_proc_subtree(BASE_DIR, NULL);
return -ENOMEM;
}
static void __exit
srm_env_exit(void)
{
remove_proc_subtree(BASE_DIR, NULL);
printk(KERN_INFO "%s: unloaded successfully\n", NAME);
}
module_init(srm_env_init);
module_exit(srm_env_exit);

297
arch/alpha/kernel/srmcons.c Normal file
View file

@ -0,0 +1,297 @@
/*
* linux/arch/alpha/kernel/srmcons.c
*
* Callback based driver for SRM Console console device.
* (TTY driver and console driver)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <asm/console.h>
#include <asm/uaccess.h>
static DEFINE_SPINLOCK(srmcons_callback_lock);
static int srm_is_registered_console = 0;
/*
* The TTY driver
*/
#define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */
struct srmcons_private {
struct tty_port port;
struct timer_list timer;
} srmcons_singleton;
typedef union _srmcons_result {
struct {
unsigned long c :61;
unsigned long status :3;
} bits;
long as_long;
} srmcons_result;
/* called with callback_lock held */
static int
srmcons_do_receive_chars(struct tty_port *port)
{
srmcons_result result;
int count = 0, loops = 0;
do {
result.as_long = callback_getc(0);
if (result.bits.status < 2) {
tty_insert_flip_char(port, (char)result.bits.c, 0);
count++;
}
} while((result.bits.status & 1) && (++loops < 10));
if (count)
tty_schedule_flip(port);
return count;
}
static void
srmcons_receive_chars(unsigned long data)
{
struct srmcons_private *srmconsp = (struct srmcons_private *)data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
int incr = 10;
local_irq_save(flags);
if (spin_trylock(&srmcons_callback_lock)) {
if (!srmcons_do_receive_chars(port))
incr = 100;
spin_unlock(&srmcons_callback_lock);
}
spin_lock(&port->lock);
if (port->tty)
mod_timer(&srmconsp->timer, jiffies + incr);
spin_unlock(&port->lock);
local_irq_restore(flags);
}
/* called with callback_lock held */
static int
srmcons_do_write(struct tty_port *port, const char *buf, int count)
{
static char str_cr[1] = "\r";
long c, remaining = count;
srmcons_result result;
char *cur;
int need_cr;
for (cur = (char *)buf; remaining > 0; ) {
need_cr = 0;
/*
* Break it up into reasonable size chunks to allow a chance
* for input to get in
*/
for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++)
if (cur[c] == '\n')
need_cr = 1;
while (c > 0) {
result.as_long = callback_puts(0, cur, c);
c -= result.bits.c;
remaining -= result.bits.c;
cur += result.bits.c;
/*
* Check for pending input iff a tty port was provided
*/
if (port)
srmcons_do_receive_chars(port);
}
while (need_cr) {
result.as_long = callback_puts(0, str_cr, 1);
if (result.bits.c > 0)
need_cr = 0;
}
}
return count;
}
static int
srmcons_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(tty->port, (const char *) buf, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
return count;
}
static int
srmcons_write_room(struct tty_struct *tty)
{
return 512;
}
static int
srmcons_chars_in_buffer(struct tty_struct *tty)
{
return 0;
}
static int
srmcons_open(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = &srmcons_singleton;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (!port->tty) {
tty->driver_data = srmconsp;
tty->port = port;
port->tty = tty; /* XXX proper refcounting */
mod_timer(&srmconsp->timer, jiffies + 10);
}
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void
srmcons_close(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = tty->driver_data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (tty->count == 1) {
port->tty = NULL;
del_timer(&srmconsp->timer);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static struct tty_driver *srmcons_driver;
static const struct tty_operations srmcons_ops = {
.open = srmcons_open,
.close = srmcons_close,
.write = srmcons_write,
.write_room = srmcons_write_room,
.chars_in_buffer= srmcons_chars_in_buffer,
};
static int __init
srmcons_init(void)
{
setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
(unsigned long)&srmcons_singleton);
if (srm_is_registered_console) {
struct tty_driver *driver;
int err;
driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES);
if (!driver)
return -ENOMEM;
tty_port_init(&srmcons_singleton.port);
driver->driver_name = "srm";
driver->name = "srm";
driver->major = 0; /* dynamic */
driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_SYSCONS;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &srmcons_ops);
tty_port_link_device(&srmcons_singleton.port, driver, 0);
err = tty_register_driver(driver);
if (err) {
put_tty_driver(driver);
tty_port_destroy(&srmcons_singleton.port);
return err;
}
srmcons_driver = driver;
}
return -ENODEV;
}
module_init(srmcons_init);
/*
* The console driver
*/
static void
srm_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(NULL, s, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
}
static struct tty_driver *
srm_console_device(struct console *co, int *index)
{
*index = co->index;
return srmcons_driver;
}
static int
srm_console_setup(struct console *co, char *options)
{
return 0;
}
static struct console srmcons = {
.name = "srm",
.write = srm_console_write,
.device = srm_console_device,
.setup = srm_console_setup,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
void __init
register_srm_console(void)
{
if (!srm_is_registered_console) {
callback_open_console();
register_console(&srmcons);
srm_is_registered_console = 1;
}
}
void __init
unregister_srm_console(void)
{
if (srm_is_registered_console) {
callback_close_console();
unregister_console(&srmcons);
srm_is_registered_console = 0;
}
}

View file

@ -0,0 +1,308 @@
/*
* linux/arch/alpha/kernel/sys_alcor.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the ALCOR and XLT (XL-300/366/433).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
static inline void
alcor_update_irq_hw(unsigned long mask)
{
*(vuip)GRU_INT_MASK = mask;
mb();
}
static inline void
alcor_enable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
alcor_disable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
alcor_mask_and_ack_irq(struct irq_data *d)
{
alcor_disable_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static void
alcor_isa_mask_and_ack_irq(struct irq_data *d)
{
i8259a_mask_and_ack_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static struct irq_chip alcor_irq_type = {
.name = "ALCOR",
.irq_unmask = alcor_enable_irq,
.irq_mask = alcor_disable_irq,
.irq_mask_ack = alcor_mask_and_ack_irq,
};
static void
alcor_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of the GRU */
pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 31) {
isa_device_interrupt(vector);
} else {
handle_irq(16 + i);
}
}
}
static void __init
alcor_init_irq(void)
{
long i;
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
*(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */
*(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */
*(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */
*(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */
for (i = 16; i < 48; ++i) {
/* On Alcor, at least, lines 20..30 are not connected
and can generate spurious interrupts if we turn them
on while IRQ probing. */
if (i >= 16+20 && i <= 16+30)
continue;
irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs();
common_init_isa_dma();
setup_irq(16+31, &isa_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* Summary @ GRU_INT_REQ:
* Bit Meaning
* 0 Interrupt Line A from slot 2
* 1 Interrupt Line B from slot 2
* 2 Interrupt Line C from slot 2
* 3 Interrupt Line D from slot 2
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line C from slot 1
* 7 Interrupt Line D from slot 1
* 8 Interrupt Line A from slot 0
* 9 Interrupt Line B from slot 0
*10 Interrupt Line C from slot 0
*11 Interrupt Line D from slot 0
*12 Interrupt Line A from slot 4
*13 Interrupt Line B from slot 4
*14 Interrupt Line C from slot 4
*15 Interrupt Line D from slot 4
*16 Interrupt Line D from slot 3
*17 Interrupt Line D from slot 3
*18 Interrupt Line D from slot 3
*19 Interrupt Line D from slot 3
*20-30 Reserved
*31 EISA interrupt
*
* The device to slot mapping looks like:
*
* Slot Device
* 6 built-in TULIP (XLT only)
* 7 PCI on board slot 0
* 8 PCI on board slot 3
* 9 PCI on board slot 4
* 10 PCEB (PCI-EISA bridge)
* 11 PCI on board slot 2
* 12 PCI on board slot 1
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
/*INT INTA INTB INTC INTD */
/* note: IDSEL 17 is XLT only */
{16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
{ 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */
{16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */
{16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */
{ -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */
{ 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */
{ 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
};
const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static void
alcor_kill_arch(int mode)
{
cia_kill_arch(mode);
#ifndef ALPHA_RESTORE_SRM_SETUP
switch(mode) {
case LINUX_REBOOT_CMD_RESTART:
/* Who said DEC engineer's have no sense of humor? ;-) */
if (alpha_using_srm) {
*(vuip) GRU_RESET = 0x0000dead;
mb();
}
break;
case LINUX_REBOOT_CMD_HALT:
break;
case LINUX_REBOOT_CMD_POWER_OFF:
break;
}
halt();
#endif
}
static void __init
alcor_init_pci(void)
{
struct pci_dev *dev;
cia_init_pci();
/*
* Now we can look to see if we are really running on an XLT-type
* motherboard, by looking for a 21040 TULIP in slot 6, which is
* built into XLT and BRET/MAVERICK, but not available on ALCOR.
*/
dev = pci_get_device(PCI_VENDOR_ID_DEC,
PCI_DEVICE_ID_DEC_TULIP,
NULL);
if (dev && dev->devfn == PCI_DEVFN(6,0)) {
alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS;
printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
__func__);
}
pci_dev_put(dev);
}
/*
* The System Vectors
*/
struct alpha_machine_vector alcor_mv __initmv = {
.vector_name = "Alcor",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = alcor_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = alcor_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alcor_init_pci,
.kill_arch = alcor_kill_arch,
.pci_map_irq = alcor_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .cia = {
.gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS
}}
};
ALIAS_MV(alcor)
struct alpha_machine_vector xlt_mv __initmv = {
.vector_name = "XLT",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = alcor_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = alcor_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alcor_init_pci,
.kill_arch = alcor_kill_arch,
.pci_map_irq = alcor_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .cia = {
.gru_int_req_bits = XLT_GRU_INT_REQ_BITS
}}
};
/* No alpha_mv alias for XLT, since we compile it in unconditionally
with ALCOR; setup_arch knows how to cope. */

View file

@ -0,0 +1,443 @@
/*
* linux/arch/alpha/kernel/sys_cabriolet.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164,
* PC164 and LX164.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask = ~0UL;
static inline void
cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
{
int ofs = (irq - 16) / 8;
outb(mask >> (16 + ofs * 8), 0x804 + ofs);
}
static inline void
cabriolet_enable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
}
static void
cabriolet_disable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
}
static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET",
.irq_unmask = cabriolet_enable_irq,
.irq_mask = cabriolet_disable_irq,
.irq_mask_ack = cabriolet_disable_irq,
};
static void
cabriolet_device_interrupt(unsigned long v)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers */
pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16);
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 4) {
isa_device_interrupt(v);
} else {
handle_irq(16 + i);
}
}
}
static void __init
common_init_irq(void (*srm_dev_int)(unsigned long v))
{
init_i8259a_irqs();
if (alpha_using_srm) {
alpha_mv.device_interrupt = srm_dev_int;
init_srm_irqs(35, 0);
}
else {
long i;
outb(0xff, 0x804);
outb(0xff, 0x805);
outb(0xff, 0x806);
for (i = 16; i < 35; ++i) {
irq_set_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
common_init_isa_dma();
setup_irq(16+4, &isa_cascade_irqaction);
}
#ifndef CONFIG_ALPHA_PC164
static void __init
cabriolet_init_irq(void)
{
common_init_irq(srm_device_interrupt);
}
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
/* In theory, the PC164 has the same interrupt hardware as the other
Cabriolet based systems. However, something got screwed up late
in the development cycle which broke the interrupt masking hardware.
Repeat, it is not possible to mask and ack interrupts. At all.
In an attempt to work around this, while processing interrupts,
we do not allow the IPL to drop below what it is currently. This
prevents the possibility of recursion.
??? Another option might be to force all PCI devices to use edge
triggered rather than level triggered interrupts. That might be
too invasive though. */
static void
pc164_srm_device_interrupt(unsigned long v)
{
__min_ipl = getipl();
srm_device_interrupt(v);
__min_ipl = 0;
}
static void
pc164_device_interrupt(unsigned long v)
{
__min_ipl = getipl();
cabriolet_device_interrupt(v);
__min_ipl = 0;
}
static void __init
pc164_init_irq(void)
{
common_init_irq(pc164_srm_device_interrupt);
}
#endif
/*
* The EB66+ is very similar to the EB66 except that it does not have
* the on-board NCR and Tulip chips. In the code below, I have used
* slot number to refer to the id select line and *not* the slot
* number used in the EB66+ documentation. However, in the table,
* I've given the slot number, the id select line and the Jxx number
* that's printed on the board. The interrupt pins from the PCI slots
* are wired into 3 interrupt summary registers at 0x804, 0x805 and
* 0x806 ISA.
*
* In the table, -1 means don't assign an IRQ number. This is usually
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int __init
eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
{16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */
{16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */
};
const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
/*
* The AlphaPC64 is very similar to the EB66+ except that its slots
* are numbered differently. In the code below, I have used slot
* number to refer to the id select line and *not* the slot number
* used in the AlphaPC64 documentation. However, in the table, I've
* given the slot number, the id select line and the Jxx number that's
* printed on the board. The interrupt pins from the PCI slots are
* wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806
* ISA.
*
* In the table, -1 means don't assign an IRQ number. This is usually
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int __init
cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
{ 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
{ 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{ 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static inline void __init
cabriolet_enable_ide(void)
{
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
pc873xx_enable_ide();
}
}
static inline void __init
cabriolet_init_pci(void)
{
common_init_pci();
cabriolet_enable_ide();
}
static inline void __init
cia_cab_init_pci(void)
{
cia_init_pci();
cabriolet_enable_ide();
}
/*
* The PC164 and LX164 have 19 PCI interrupts, four from each of the four
* PCI slots, the SIO, PCI/IDE, and USB.
*
* Each of the interrupts can be individually masked. This is
* accomplished by setting the appropriate bit in the mask register.
* A bit is set by writing a "1" to the desired position in the mask
* register and cleared by writing a "0". There are 3 mask registers
* located at ISA address 804h, 805h and 806h.
*
* An I/O read at ISA address 804h, 805h, 806h will return the
* state of the 11 PCI interrupts and not the state of the MASKED
* interrupts.
*
* Note: A write to I/O 804h, 805h, and 806h the mask register will be
* updated.
*
*
* ISA DATA<7:0>
* ISA +--------------------------------------------------------------+
* ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +==============================================================+
* 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 |
* +--------------------------------------------------------------+
* 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 |
* +--------------------------------------------------------------+
* 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 |
* +--------------------------------------------------------------+
* * Rsrv = reserved bits
* Note: The mask register is write-only.
*
* IdSel
* 5 32 bit PCI option slot 2
* 6 64 bit PCI option slot 0
* 7 64 bit PCI option slot 1
* 8 Saturn I/O
* 9 32 bit PCI option slot 3
* 10 USB
* 11 IDE
*
*/
static inline int __init
alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
{ 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
{ 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{ 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */
{ 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */
{ 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */
};
const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static inline void __init
alphapc164_init_pci(void)
{
cia_init_pci();
SMC93x_Init();
}
/*
* The System Vector
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
struct alpha_machine_vector cabriolet_mv __initmv = {
.vector_name = "Cabriolet",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cabriolet_init_pci,
.pci_map_irq = cabriolet_map_irq,
.pci_swizzle = common_swizzle,
};
#ifndef CONFIG_ALPHA_EB64P
ALIAS_MV(cabriolet)
#endif
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164)
struct alpha_machine_vector eb164_mv __initmv = {
.vector_name = "EB164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cia_cab_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = cabriolet_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P)
struct alpha_machine_vector eb66p_mv __initmv = {
.vector_name = "EB66+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cabriolet_init_pci,
.pci_map_irq = eb66p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164)
struct alpha_machine_vector lx164_mv __initmv = {
.vector_name = "LX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = pyxis_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphapc164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = alphapc164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(lx164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
struct alpha_machine_vector pc164_mv __initmv = {
.vector_name = "PC164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = pc164_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = pc164_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphapc164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = alphapc164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(pc164)
#endif

View file

@ -0,0 +1,665 @@
/*
* linux/arch/alpha/kernel/sys_dp264.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996, 1999 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Modified by Christopher C. Chimelis, 2001 to
* add support for the addition of Shark to the
* Tsunami family.
*
* Code supporting the DP264 (EV6+TSUNAMI).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
/* dp264 boards handle at max four CPUs */
static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL };
DEFINE_SPINLOCK(dp264_irq_lock);
static void
tsunami_update_irq_hw(unsigned long mask)
{
register tsunami_cchip *cchip = TSUNAMI_cchip;
unsigned long isa_enable = 1UL << 55;
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
mask &= ~isa_enable;
mask0 = mask & cpu_irq_affinity[0];
mask1 = mask & cpu_irq_affinity[1];
mask2 = mask & cpu_irq_affinity[2];
mask3 = mask & cpu_irq_affinity[3];
if (bcpu == 0) mask0 |= isa_enable;
else if (bcpu == 1) mask1 |= isa_enable;
else if (bcpu == 2) mask2 |= isa_enable;
else mask3 |= isa_enable;
dim0 = &cchip->dim0.csr;
dim1 = &cchip->dim1.csr;
dim2 = &cchip->dim2.csr;
dim3 = &cchip->dim3.csr;
if (!cpu_possible(0)) dim0 = &dummy;
if (!cpu_possible(1)) dim1 = &dummy;
if (!cpu_possible(2)) dim2 = &dummy;
if (!cpu_possible(3)) dim3 = &dummy;
*dim0 = mask0;
*dim1 = mask1;
*dim2 = mask2;
*dim3 = mask3;
mb();
*dim0;
*dim1;
*dim2;
*dim3;
#else
volatile unsigned long *dimB;
if (bcpu == 0) dimB = &cchip->dim0.csr;
else if (bcpu == 1) dimB = &cchip->dim1.csr;
else if (bcpu == 2) dimB = &cchip->dim2.csr;
else dimB = &cchip->dim3.csr;
*dimB = mask | isa_enable;
mb();
*dimB;
#endif
}
static void
dp264_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << d->irq;
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
dp264_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << d->irq);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << (d->irq - 16);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << (d->irq - 16));
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
{
int cpu;
for (cpu = 0; cpu < 4; cpu++) {
unsigned long aff = cpu_irq_affinity[cpu];
if (cpumask_test_cpu(cpu, &affinity))
aff |= 1UL << irq;
else
aff &= ~(1UL << irq);
cpu_irq_affinity[cpu] = aff;
}
}
static int
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(d->irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
return 0;
}
static int
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(d->irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
return 0;
}
static struct irq_chip dp264_irq_type = {
.name = "DP264",
.irq_unmask = dp264_enable_irq,
.irq_mask = dp264_disable_irq,
.irq_mask_ack = dp264_disable_irq,
.irq_set_affinity = dp264_set_affinity,
};
static struct irq_chip clipper_irq_type = {
.name = "CLIPPER",
.irq_unmask = clipper_enable_irq,
.irq_mask = clipper_disable_irq,
.irq_mask_ack = clipper_disable_irq,
.irq_set_affinity = clipper_set_affinity,
};
static void
dp264_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of TSUNAMI */
pld = TSUNAMI_cchip->dir0.csr;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 55)
isa_device_interrupt(vector);
else
handle_irq(16 + i);
}
}
static void
dp264_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
*
* 0x900 + (0x10 * DRIR-bit)
*
* So bit 16 shows up as IRQ 32, etc.
*
* On DP264/BRICK/MONET, we adjust it down by 16 because at least
* that many of the low order bits of the DRIR are not used, and
* so we don't count them.
*/
if (irq >= 32)
irq -= 16;
handle_irq(irq);
}
static void
clipper_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
*
* 0x900 + (0x10 * DRIR-bit)
*
* So bit 16 shows up as IRQ 32, etc.
*
* CLIPPER uses bits 8-47 for PCI interrupts, so we do not need
* to scale down the vector reported, we just use it.
*
* Eg IRQ 24 is DRIR bit 8, etc, etc
*/
handle_irq(irq);
}
static void __init
init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static void __init
dp264_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = dp264_srm_device_interrupt;
tsunami_update_irq_hw(0);
init_i8259a_irqs();
init_tsunami_irqs(&dp264_irq_type, 16, 47);
}
static void __init
clipper_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = clipper_srm_device_interrupt;
tsunami_update_irq_hw(0);
init_i8259a_irqs();
init_tsunami_irqs(&clipper_irq_type, 24, 63);
}
/*
* PCI Fixup configuration.
*
* Summary @ TSUNAMI_CSR_DIM0:
* Bit Meaning
* 0-17 Unused
*18 Interrupt SCSI B (Adaptec 7895 builtin)
*19 Interrupt SCSI A (Adaptec 7895 builtin)
*20 Interrupt Line D from slot 2 PCI0
*21 Interrupt Line C from slot 2 PCI0
*22 Interrupt Line B from slot 2 PCI0
*23 Interrupt Line A from slot 2 PCI0
*24 Interrupt Line D from slot 1 PCI0
*25 Interrupt Line C from slot 1 PCI0
*26 Interrupt Line B from slot 1 PCI0
*27 Interrupt Line A from slot 1 PCI0
*28 Interrupt Line D from slot 0 PCI0
*29 Interrupt Line C from slot 0 PCI0
*30 Interrupt Line B from slot 0 PCI0
*31 Interrupt Line A from slot 0 PCI0
*
*32 Interrupt Line D from slot 3 PCI1
*33 Interrupt Line C from slot 3 PCI1
*34 Interrupt Line B from slot 3 PCI1
*35 Interrupt Line A from slot 3 PCI1
*36 Interrupt Line D from slot 2 PCI1
*37 Interrupt Line C from slot 2 PCI1
*38 Interrupt Line B from slot 2 PCI1
*39 Interrupt Line A from slot 2 PCI1
*40 Interrupt Line D from slot 1 PCI1
*41 Interrupt Line C from slot 1 PCI1
*42 Interrupt Line B from slot 1 PCI1
*43 Interrupt Line A from slot 1 PCI1
*44 Interrupt Line D from slot 0 PCI1
*45 Interrupt Line C from slot 0 PCI1
*46 Interrupt Line B from slot 0 PCI1
*47 Interrupt Line A from slot 0 PCI1
*48-52 Unused
*53 PCI0 NMI (from Cypress)
*54 PCI0 SMI INT (from Cypress)
*55 PCI0 ISA Interrupt (from Cypress)
*56-60 Unused
*61 PCI1 Bus Error
*62 PCI0 Bus Error
*63 Reserved
*
* IdSel
* 5 Cypress Bridge I/O
* 6 SCSI Adaptec builtin
* 7 64 bit PCI option slot 0 (all busses)
* 8 64 bit PCI option slot 1 (all busses)
* 9 64 bit PCI option slot 2 (all busses)
* 10 64 bit PCI option slot 3 (not bus 0)
*/
static int __init
isa_irq_fixup(const struct pci_dev *dev, int irq)
{
u8 irq8;
if (irq > 0)
return irq;
/* This interrupt is routed via ISA bridge, so we'll
just have to trust whatever value the console might
have assigned. */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8);
return irq8 & 0xf;
}
static int __init
dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[6][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
{ 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */
{ 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */
{ 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */
};
const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0)
irq += 16 * hose->index;
return isa_irq_fixup(dev, irq);
}
static int __init
monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 4 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 5 unused */
{ 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */
#if 1
{ 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
{ 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/
#else
{ -1, -1, -1, -1, -1}, /* IdSel 9 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 10 unused */
#endif
{ 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/
{ 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/
{ 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/
{ 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/
{ 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/
};
const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5;
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static u8 __init
monet_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
int slot, pin = *pinp;
if (!dev->bus->parent) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge on hose 1. */
else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn);
} else {
/* Must be a card-based bridge. */
do {
/* Check for built-in bridge on hose 1. */
if (hose->index == 1 &&
PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn);
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
static int __init
webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 unused */
{ 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */
{ -1, -1, -1, -1, -1}, /* IdSel 10 unused */
{ 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */
{ -1, -1, -1, -1, -1}, /* IdSel 12 unused */
{ -1, -1, -1, -1, -1}, /* IdSel 13 unused */
{ 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */
{ 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */
{ 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */
{ 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */
};
const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5;
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static int __init
clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
{ 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
{ 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */
{ 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */
{ 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */
{ 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */
{ -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */
};
const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0)
irq += 16 * hose->index;
return isa_irq_fixup(dev, irq);
}
static void __init
dp264_init_pci(void)
{
common_init_pci();
SMC669_Init(0);
locate_and_init_vga(NULL);
}
static void __init
monet_init_pci(void)
{
common_init_pci();
SMC669_Init(1);
es1888_init();
locate_and_init_vga(NULL);
}
static void __init
clipper_init_pci(void)
{
common_init_pci();
locate_and_init_vga(NULL);
}
static void __init
webbrick_init_arch(void)
{
tsunami_init_arch();
/* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */
hose_head->sg_isa->align_entry = 4;
hose_head->sg_pci->align_entry = 4;
}
/*
* The System Vectors
*/
struct alpha_machine_vector dp264_mv __initmv = {
.vector_name = "DP264",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = dp264_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = dp264_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(dp264)
struct alpha_machine_vector monet_mv __initmv = {
.vector_name = "Monet",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = monet_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = monet_map_irq,
.pci_swizzle = monet_swizzle,
};
struct alpha_machine_vector webbrick_mv __initmv = {
.vector_name = "Webbrick",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = webbrick_init_arch,
.init_irq = dp264_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = webbrick_map_irq,
.pci_swizzle = common_swizzle,
};
struct alpha_machine_vector clipper_mv __initmv = {
.vector_name = "Clipper",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = clipper_init_irq,
.init_rtc = common_init_rtc,
.init_pci = clipper_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = clipper_map_irq,
.pci_swizzle = common_swizzle,
};
/* Sharks strongly resemble Clipper, at least as far
* as interrupt routing, etc, so we're using the
* same functions as Clipper does
*/
struct alpha_machine_vector shark_mv __initmv = {
.vector_name = "Shark",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 64,
.device_interrupt = dp264_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = clipper_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = clipper_map_irq,
.pci_swizzle = common_swizzle,
};
/* No alpha_mv alias for webbrick/monet/clipper, since we compile them
in unconditionally with DP264; setup_arch knows how to cope. */

View file

@ -0,0 +1,237 @@
/*
* linux/arch/alpha/kernel/sys_eb64p.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the EB64+ and EB66.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned int cached_irq_mask = -1;
static inline void
eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
{
outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26));
}
static inline void
eb64p_enable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
}
static void
eb64p_disable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
}
static struct irq_chip eb64p_irq_type = {
.name = "EB64P",
.irq_unmask = eb64p_enable_irq,
.irq_mask = eb64p_disable_irq,
.irq_mask_ack = eb64p_disable_irq,
};
static void
eb64p_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers */
pld = inb(0x26) | (inb(0x27) << 8);
/*
* Now, for every possible bit set, work through
* them and call the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 5) {
isa_device_interrupt(vector);
} else {
handle_irq(16 + i);
}
}
}
static void __init
eb64p_init_irq(void)
{
long i;
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
/*
* CABRIO SRM may not set variation correctly, so here we test
* the high word of the interrupt summary register for the RAZ
* bits, and hope that a true EB64+ would read all ones...
*/
if (inw(0x806) != 0xffff) {
extern struct alpha_machine_vector cabriolet_mv;
printk("Detected Cabriolet: correcting HWRPB.\n");
hwrpb->sys_variation |= 2L << 10;
hwrpb_update_checksum(hwrpb);
alpha_mv = cabriolet_mv;
alpha_mv.init_irq();
return;
}
#endif /* GENERIC */
outb(0xff, 0x26);
outb(0xff, 0x27);
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* There are two 8 bit external summary registers as follows:
*
* Summary @ 0x26:
* Bit Meaning
* 0 Interrupt Line A from slot 0
* 1 Interrupt Line A from slot 1
* 2 Interrupt Line B from slot 0
* 3 Interrupt Line B from slot 1
* 4 Interrupt Line C from slot 0
* 5 Interrupt line from the two ISA PICs
* 6 Tulip
* 7 NCR SCSI
*
* Summary @ 0x27
* Bit Meaning
* 0 Interrupt Line C from slot 1
* 1 Interrupt Line D from slot 0
* 2 Interrupt Line D from slot 1
* 3 RAZ
* 4 RAZ
* 5 RAZ
* 6 RAZ
* 7 RAZ
*
* The device to slot mapping looks like:
*
* Slot Device
* 5 NCR SCSI controller
* 6 PCI on board slot 0
* 7 PCI on board slot 1
* 8 Intel SIO PCI-ISA bridge chip
* 9 Tulip - DECchip 21040 Ethernet controller
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
{16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
{16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
/*
* The System Vector
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P)
struct alpha_machine_vector eb64p_mv __initmv = {
.vector_name = "EB64+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = eb64p_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = eb64p_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.pci_map_irq = eb64p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb64p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66)
struct alpha_machine_vector eb66_mv __initmv = {
.vector_name = "EB66",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = eb64p_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = eb64p_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.pci_map_irq = eb64p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66)
#endif

View file

@ -0,0 +1,226 @@
/*
* linux/arch/alpha/kernel/sys_eiger.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996, 1999 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
* Copyright (C) 1999 Iain Grant
*
* Code supporting the EIGER (EV6+TSUNAMI).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pci.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note that this interrupt code is identical to TAKARA. */
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask[2] = { -1, -1 };
static inline void
eiger_update_irq_hw(unsigned long irq, unsigned long mask)
{
int regaddr;
mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
outl(mask & 0xffff0000UL, regaddr);
}
static inline void
eiger_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
eiger_update_irq_hw(irq, mask);
}
static void
eiger_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
eiger_update_irq_hw(irq, mask);
}
static struct irq_chip eiger_irq_type = {
.name = "EIGER",
.irq_unmask = eiger_enable_irq,
.irq_mask = eiger_disable_irq,
.irq_mask_ack = eiger_disable_irq,
};
static void
eiger_device_interrupt(unsigned long vector)
{
unsigned intstatus;
/*
* The PALcode will have passed us vectors 0x800 or 0x810,
* which are fairly arbitrary values and serve only to tell
* us whether an interrupt has come in on IRQ0 or IRQ1. If
* it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
* probably ISA, but PCI interrupts can come through IRQ0
* as well if the interrupt controller isn't in accelerated
* mode.
*
* OTOH, the accelerator thing doesn't seem to be working
* overly well, so what we'll do instead is try directly
* examining the Master Interrupt Register to see if it's a
* PCI interrupt, and if _not_ then we'll pass it on to the
* ISA handler.
*/
intstatus = inw(0x500) & 15;
if (intstatus) {
/*
* This is a PCI interrupt. Check each bit and
* despatch an interrupt if it's set.
*/
if (intstatus & 8) handle_irq(16+3);
if (intstatus & 4) handle_irq(16+2);
if (intstatus & 2) handle_irq(16+1);
if (intstatus & 1) handle_irq(16+0);
} else {
isa_device_interrupt(vector);
}
}
static void
eiger_srm_device_interrupt(unsigned long vector)
{
int irq = (vector - 0x800) >> 4;
handle_irq(irq);
}
static void __init
eiger_init_irq(void)
{
long i;
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = eiger_srm_device_interrupt;
for (i = 16; i < 128; i += 16)
eiger_update_irq_hw(i, -1);
init_i8259a_irqs();
for (i = 16; i < 128; ++i) {
irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static int __init
eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 irq_orig;
/* The SRM console has already calculated out the IRQ value's for
option cards. As this works lets just read in the value already
set and change it to a useable value by Linux.
All the IRQ values generated by the console are greater than 90,
so we subtract 80 because it is (90 - allocated ISA IRQ's). */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig);
return irq_orig - 0x80;
}
static u8 __init
eiger_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
int slot, pin = *pinp;
int bridge_count = 0;
/* Find the number of backplane bridges. */
int backplane = inw(0x502) & 0x0f;
switch (backplane)
{
case 0x00: bridge_count = 0; break; /* No bridges */
case 0x01: bridge_count = 1; break; /* 1 */
case 0x03: bridge_count = 2; break; /* 2 */
case 0x07: bridge_count = 3; break; /* 3 */
case 0x0f: bridge_count = 4; break; /* 4 */
};
slot = PCI_SLOT(dev->devfn);
while (dev->bus->self) {
/* Check for built-in bridges on hose 0. */
if (hose->index == 0
&& (PCI_SLOT(dev->bus->self->devfn)
> 20 - bridge_count)) {
slot = PCI_SLOT(dev->devfn);
break;
}
/* Must be a card-based bridge. */
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
}
*pinp = pin;
return slot;
}
/*
* The System Vectors
*/
struct alpha_machine_vector eiger_mv __initmv = {
.vector_name = "Eiger",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TSUNAMI_IO,
.machine_check = tsunami_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TSUNAMI_DAC_OFFSET,
.nr_irqs = 128,
.device_interrupt = eiger_device_interrupt,
.init_arch = tsunami_init_arch,
.init_irq = eiger_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = tsunami_kill_arch,
.pci_map_irq = eiger_map_irq,
.pci_swizzle = eiger_swizzle,
};
ALIAS_MV(eiger)

View file

@ -0,0 +1,237 @@
/*
* linux/arch/alpha/kernel/sys_jensen.c
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the Jensen.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/jensen.h>
#undef __EXTERN_INLINE
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/*
* Jensen is special: the vector is 0x8X0 for EISA interrupt X, and
* 0x9X0 for the local motherboard interrupts.
*
* Note especially that those local interrupts CANNOT be masked,
* which causes much of the pain below...
*
* 0x660 - NMI
*
* 0x800 - IRQ0 interval timer (not used, as we use the RTC timer)
* 0x810 - IRQ1 line printer (duh..)
* 0x860 - IRQ6 floppy disk
*
* 0x900 - COM1
* 0x920 - COM2
* 0x980 - keyboard
* 0x990 - mouse
*
* PCI-based systems are more sane: they don't have the local
* interrupts at all, and have only normal PCI interrupts from
* devices. Happily it's easy enough to do a sane mapping from the
* Jensen.
*
* Note that this means that we may have to do a hardware
* "local_op" to a different interrupt than we report to the rest of the
* world.
*/
static void
jensen_local_enable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (d->irq == 7)
i8259a_enable_irq(d);
}
static void
jensen_local_disable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (d->irq == 7)
i8259a_disable_irq(d);
}
static void
jensen_local_mask_ack(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (d->irq == 7)
i8259a_mask_and_ack_irq(d);
}
static struct irq_chip jensen_local_irq_type = {
.name = "LOCAL",
.irq_unmask = jensen_local_enable,
.irq_mask = jensen_local_disable,
.irq_mask_ack = jensen_local_mask_ack,
};
static void
jensen_device_interrupt(unsigned long vector)
{
int irq;
switch (vector) {
case 0x660:
printk("Whee.. NMI received. Probable hardware error\n");
printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461));
return;
/* local device interrupts: */
case 0x900: irq = 4; break; /* com1 -> irq 4 */
case 0x920: irq = 3; break; /* com2 -> irq 3 */
case 0x980: irq = 1; break; /* kbd -> irq 1 */
case 0x990: irq = 9; break; /* mouse -> irq 9 */
default:
if (vector > 0x900) {
printk("Unknown local interrupt %lx\n", vector);
return;
}
irq = (vector - 0x800) >> 4;
if (irq == 1)
irq = 7;
break;
}
/* If there is no handler yet... */
if (!irq_has_action(irq)) {
/* If it is a local interrupt that cannot be masked... */
if (vector >= 0x900)
{
/* Clear keyboard/mouse state */
inb(0x64);
inb(0x60);
/* Reset serial ports */
inb(0x3fa);
inb(0x2fa);
outb(0x0c, 0x3fc);
outb(0x0c, 0x2fc);
/* Clear NMI */
outb(0,0x61);
outb(0,0x461);
}
}
#if 0
/* A useful bit of code to find out if an interrupt is going wild. */
{
static unsigned int last_msg = 0, last_cc = 0;
static int last_irq = -1, count = 0;
unsigned int cc;
__asm __volatile("rpcc %0" : "=r"(cc));
++count;
#define JENSEN_CYCLES_PER_SEC (150000000)
if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) ||
irq != last_irq) {
printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n",
irq, count, cc-last_cc, get_irq_regs()->pc);
count = 0;
last_msg = cc;
last_irq = irq;
}
last_cc = cc;
}
#endif
handle_irq(irq);
}
static void __init
jensen_init_irq(void)
{
init_i8259a_irqs();
irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
common_init_isa_dma();
}
static void __init
jensen_init_arch(void)
{
struct pci_controller *hose;
#ifdef CONFIG_PCI
static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, };
isa_bridge = &fake_isa_bridge;
#endif
/* Create a hose so that we can report i/o base addresses to
userland. */
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
hose->sparse_mem_base = EISA_MEM - IDENT_ADDR;
hose->dense_mem_base = 0;
hose->sparse_io_base = EISA_IO - IDENT_ADDR;
hose->dense_io_base = 0;
hose->sg_isa = hose->sg_pci = NULL;
__direct_map_base = 0;
__direct_map_size = 0xffffffff;
}
static void
jensen_machine_check(unsigned long vector, unsigned long la)
{
printk(KERN_CRIT "Machine check\n");
}
/*
* The System Vector
*/
struct alpha_machine_vector jensen_mv __initmv = {
.vector_name = "Jensen",
DO_EV4_MMU,
IO_LITE(JENSEN,jensen),
.machine_check = jensen_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.rtc_port = 0x170,
.nr_irqs = 16,
.device_interrupt = jensen_device_interrupt,
.init_arch = jensen_init_arch,
.init_irq = jensen_init_irq,
.init_rtc = common_init_rtc,
.init_pci = NULL,
.kill_arch = NULL,
};
ALIAS_MV(jensen)

View file

@ -0,0 +1,470 @@
/*
* linux/arch/alpha/kernel/sys_marvel.c
*
* Marvel / IO7 support
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_marvel.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include <asm/vga.h>
#include "proto.h"
#include "err_impl.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#if NR_IRQS < MARVEL_NR_IRQS
# error NR_IRQS < MARVEL_NR_IRQS !!!
#endif
/*
* Interrupt handling.
*/
static void
io7_device_interrupt(unsigned long vector)
{
unsigned int pid;
unsigned int irq;
/*
* Vector is 0x800 + (interrupt)
*
* where (interrupt) is:
*
* ...16|15 14|13 4|3 0
* -----+-----+--------+---
* PE | 0 | irq | 0
*
* where (irq) is
*
* 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4)
* 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4)
*/
pid = vector >> 16;
irq = ((vector & 0xffff) - 0x800) >> 4;
irq += 16; /* offset for legacy */
irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */
irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
handle_irq(irq);
}
static volatile unsigned long *
io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
{
volatile unsigned long *ctl;
unsigned int pid;
struct io7 *io7;
pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
if (!(io7 = marvel_find_io7(pid))) {
printk(KERN_ERR
"%s for nonexistent io7 -- vec %x, pid %d\n",
__func__, irq, pid);
return NULL;
}
irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */
irq -= 16; /* subtract legacy bias */
if (irq >= 0x180) {
printk(KERN_ERR
"%s for invalid irq -- pid %d adjusted irq %x\n",
__func__, pid, irq);
return NULL;
}
ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */
if (irq >= 0x80) /* MSI */
ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
if (pio7) *pio7 = io7;
return ctl;
}
static void
io7_enable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
if (!ctl || !io7) {
printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24;
mb();
*ctl;
spin_unlock(&io7->irq_lock);
}
static void
io7_disable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
if (!ctl || !io7) {
printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24);
mb();
*ctl;
spin_unlock(&io7->irq_lock);
}
static void
marvel_irq_noop(struct irq_data *d)
{
return;
}
static struct irq_chip marvel_legacy_irq_type = {
.name = "LEGACY",
.irq_mask = marvel_irq_noop,
.irq_unmask = marvel_irq_noop,
};
static struct irq_chip io7_lsi_irq_type = {
.name = "LSI",
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_mask_ack = io7_disable_irq,
};
static struct irq_chip io7_msi_irq_type = {
.name = "MSI",
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_ack = marvel_irq_noop,
};
static void
io7_redirect_irq(struct io7 *io7,
volatile unsigned long *csr,
unsigned int where)
{
unsigned long val;
val = *csr;
val &= ~(0x1ffUL << 24); /* clear the target pid */
val |= ((unsigned long)where << 24); /* set the new target pid */
*csr = val;
mb();
*csr;
}
static void
io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
{
unsigned long val;
/*
* LSI_CTL has target PID @ 14
*/
val = io7->csrs->PO7_LSI_CTL[which].csr;
val &= ~(0x1ffUL << 14); /* clear the target pid */
val |= ((unsigned long)where << 14); /* set the new target pid */
io7->csrs->PO7_LSI_CTL[which].csr = val;
mb();
io7->csrs->PO7_LSI_CTL[which].csr;
}
static void
io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
{
unsigned long val;
/*
* MSI_CTL has target PID @ 14
*/
val = io7->csrs->PO7_MSI_CTL[which].csr;
val &= ~(0x1ffUL << 14); /* clear the target pid */
val |= ((unsigned long)where << 14); /* set the new target pid */
io7->csrs->PO7_MSI_CTL[which].csr = val;
mb();
io7->csrs->PO7_MSI_CTL[which].csr;
}
static void __init
init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
{
/*
* LSI_CTL has target PID @ 14
*/
io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
mb();
io7->csrs->PO7_LSI_CTL[which].csr;
}
static void __init
init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
{
/*
* MSI_CTL has target PID @ 14
*/
io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
mb();
io7->csrs->PO7_MSI_CTL[which].csr;
}
static void __init
init_io7_irqs(struct io7 *io7,
struct irq_chip *lsi_ops,
struct irq_chip *msi_ops)
{
long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
long i;
printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
io7->pe, base);
/*
* Where should interrupts from this IO7 go?
*
* They really should be sent to the local CPU to avoid having to
* traverse the mesh, but if it's not an SMP kernel, they have to
* go to the boot CPU. Send them all to the boot CPU for now,
* as each secondary starts, it can redirect it's local device
* interrupts.
*/
printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
spin_lock(&io7->irq_lock);
/* set up the error irqs */
io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
/* Disable the implemented irqs in hardware. */
for (i = 0; i < 0x60; ++i)
init_one_io7_lsi(io7, i, boot_cpuid);
init_one_io7_lsi(io7, 0x74, boot_cpuid);
init_one_io7_lsi(io7, 0x75, boot_cpuid);
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
for (i = 0; i < 16; ++i)
init_one_io7_msi(io7, i, boot_cpuid);
spin_unlock(&io7->irq_lock);
}
static void __init
marvel_init_irq(void)
{
int i;
struct io7 *io7 = NULL;
/* Reserve the legacy irqs. */
for (i = 0; i < 16; ++i) {
irq_set_chip_and_handler(i, &marvel_legacy_irq_type,
handle_level_irq);
}
/* Init the io7 irqs. */
for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
}
static int
marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
{
struct pci_dev *dev = (struct pci_dev *)cdev;
struct pci_controller *hose = dev->sysdata;
struct io7_port *io7_port = hose->sysdata;
struct io7 *io7 = io7_port->io7;
int msi_loc, msi_data_off;
u16 msg_ctl;
u16 msg_dat;
u8 intline;
int irq;
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
irq = intline;
msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
msg_ctl = 0;
if (msi_loc)
pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
msi_data_off = PCI_MSI_DATA_32;
if (msg_ctl & PCI_MSI_FLAGS_64BIT)
msi_data_off = PCI_MSI_DATA_64;
pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */
irq += 0x80; /* offset for lsi */
#if 1
printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
dev->bus->number,
PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn),
hose->index);
printk(" %d message(s) from 0x%04x\n",
1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
msg_dat);
printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
(irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
(irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
#endif
#if 0
pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
irq = intline;
printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
#endif
}
irq += 16; /* offset for legacy */
irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */
return irq;
}
static void __init
marvel_init_pci(void)
{
struct io7 *io7;
marvel_register_error_handlers();
/* Indicate that we trust the console to configure things properly */
pci_set_flags(PCI_PROBE_ONLY);
common_init_pci();
locate_and_init_vga(NULL);
/* Clear any io7 errors. */
for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
io7_clear_errors(io7);
}
static void __init
marvel_init_rtc(void)
{
init_rtc_irq();
}
static void
marvel_smp_callin(void)
{
int cpuid = hard_smp_processor_id();
struct io7 *io7 = marvel_find_io7(cpuid);
unsigned int i;
if (!io7)
return;
/*
* There is a local IO7 - redirect all of its interrupts here.
*/
printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
/* Redirect the error IRQS here. */
io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
/* Redirect the implemented LSIs here. */
for (i = 0; i < 0x60; ++i)
io7_redirect_one_lsi(io7, i, cpuid);
io7_redirect_one_lsi(io7, 0x74, cpuid);
io7_redirect_one_lsi(io7, 0x75, cpuid);
/* Redirect the MSIs here. */
for (i = 0; i < 16; ++i)
io7_redirect_one_msi(io7, i, cpuid);
}
/*
* System Vectors
*/
struct alpha_machine_vector marvel_ev7_mv __initmv = {
.vector_name = "MARVEL/EV7",
DO_EV7_MMU,
.rtc_port = 0x70,
.rtc_boot_cpu_only = 1,
DO_MARVEL_IO,
.machine_check = marvel_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = IO7_DAC_OFFSET,
.nr_irqs = MARVEL_NR_IRQS,
.device_interrupt = io7_device_interrupt,
.agp_info = marvel_agp_info,
.smp_callin = marvel_smp_callin,
.init_arch = marvel_init_arch,
.init_irq = marvel_init_irq,
.init_rtc = marvel_init_rtc,
.init_pci = marvel_init_pci,
.kill_arch = marvel_kill_arch,
.pci_map_irq = marvel_map_irq,
.pci_swizzle = common_swizzle,
.pa_to_nid = marvel_pa_to_nid,
.cpuid_to_nid = marvel_cpuid_to_nid,
.node_mem_start = marvel_node_mem_start,
.node_mem_size = marvel_node_mem_size,
};
ALIAS_MV(marvel_ev7)

View file

@ -0,0 +1,292 @@
/*
* linux/arch/alpha/kernel/sys_miata.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the MIATA (EV56+PYXIS).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static void
miata_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* I really hate to do this, but the MIATA SRM console ignores the
* low 8 bits in the interrupt summary register, and reports the
* vector 0x80 *lower* than I expected from the bit numbering in
* the documentation.
* This was done because the low 8 summary bits really aren't used
* for reporting any interrupts (the PCI-ISA bridge, bit 7, isn't
* used for this purpose, as PIC interrupts are delivered as the
* vectors 0x800-0x8f0).
* But I really don't want to change the fixup code for allocation
* of IRQs, nor the alpha_irq_mask maintenance stuff, both of which
* look nice and clean now.
* So, here's this grotty hack... :-(
*/
if (irq >= 16)
irq = irq + 8;
handle_irq(irq);
}
static void __init
miata_init_irq(void)
{
if (alpha_using_srm)
alpha_mv.device_interrupt = miata_srm_device_interrupt;
#if 0
/* These break on MiataGL so we'll try not to do it at all. */
*(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */
*(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */
#endif
init_i8259a_irqs();
/* Not interested in the bogus interrupts (3,10), Fan Fault (0),
NMI (1), or EIDE (9).
We also disable the risers (4,5), since we don't know how to
route the interrupts behind the bridge. */
init_pyxis_irqs(0x63b0000);
common_init_isa_dma();
setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */
setup_irq(16+6, &timer_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* Summary @ PYXIS_INT_REQ:
* Bit Meaning
* 0 Fan Fault
* 1 NMI
* 2 Halt/Reset switch
* 3 none
* 4 CID0 (Riser ID)
* 5 CID1 (Riser ID)
* 6 Interval timer
* 7 PCI-ISA Bridge
* 8 Ethernet
* 9 EIDE (deprecated, ISA 14/15 used)
*10 none
*11 USB
*12 Interrupt Line A from slot 4
*13 Interrupt Line B from slot 4
*14 Interrupt Line C from slot 4
*15 Interrupt Line D from slot 4
*16 Interrupt Line A from slot 5
*17 Interrupt line B from slot 5
*18 Interrupt Line C from slot 5
*19 Interrupt Line D from slot 5
*20 Interrupt Line A from slot 1
*21 Interrupt Line B from slot 1
*22 Interrupt Line C from slot 1
*23 Interrupt Line D from slot 1
*24 Interrupt Line A from slot 2
*25 Interrupt Line B from slot 2
*26 Interrupt Line C from slot 2
*27 Interrupt Line D from slot 2
*27 Interrupt Line A from slot 3
*29 Interrupt Line B from slot 3
*30 Interrupt Line C from slot 3
*31 Interrupt Line D from slot 3
*
* The device to slot mapping looks like:
*
* Slot Device
* 3 DC21142 Ethernet
* 4 EIDE CMD646
* 5 none
* 6 USB
* 7 PCI-ISA bridge
* 8 PCI-PCI Bridge (SBU Riser)
* 9 none
* 10 none
* 11 PCI on board slot 4 (SBU Riser)
* 12 PCI on board slot 5 (SBU Riser)
*
* These are behind the bridge, so I'm not sure what to do...
*
* 13 PCI on board slot 1 (SBU Riser)
* 14 PCI on board slot 2 (SBU Riser)
* 15 PCI on board slot 3 (SBU Riser)
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[18][5] __initdata = {
/*INT INTA INTB INTC INTD */
{16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
{ -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
{ -1, -1, -1, -1, -1}, /* IdSel 16, none */
{ -1, -1, -1, -1, -1}, /* IdSel 17, none */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCI-ISA */
{ -1, -1, -1, -1, -1}, /* IdSel 19, PCI-PCI */
{ -1, -1, -1, -1, -1}, /* IdSel 20, none */
{ -1, -1, -1, -1, -1}, /* IdSel 21, none */
{16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */
{16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */
/* the next 7 are actually on PCI bus 1, across the bridge */
{16+11, 16+11, 16+11, 16+11, 16+11}, /* IdSel 24, QLISP/GL*/
{ -1, -1, -1, -1, -1}, /* IdSel 25, none */
{ -1, -1, -1, -1, -1}, /* IdSel 26, none */
{ -1, -1, -1, -1, -1}, /* IdSel 27, none */
{16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 28, slot 1 */
{16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 29, slot 2 */
{16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 30, slot 3 */
/* This bridge is on the main bus of the later orig MIATA */
{ -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */
};
const long min_idsel = 3, max_idsel = 20, irqs_per_slot = 5;
/* the USB function of the 82c693 has it's interrupt connected to
the 2nd 8259 controller. So we have to check for it first. */
if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
u8 irq=0;
struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7);
if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) {
pci_dev_put(pdev);
return -1;
}
else {
pci_dev_put(pdev);
return irq;
}
}
return COMMON_TABLE_LOOKUP;
}
static u8 __init
miata_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge. */
else if ((PCI_SLOT(dev->bus->self->devfn) == 8) ||
(PCI_SLOT(dev->bus->self->devfn) == 20)) {
slot = PCI_SLOT(dev->devfn) + 9;
}
else
{
/* Must be a card-based bridge. */
do {
if ((PCI_SLOT(dev->bus->self->devfn) == 8) ||
(PCI_SLOT(dev->bus->self->devfn) == 20)) {
slot = PCI_SLOT(dev->devfn) + 9;
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
static void __init
miata_init_pci(void)
{
cia_init_pci();
SMC669_Init(0); /* it might be a GL (fails harmlessly if not) */
es1888_init();
}
static void
miata_kill_arch(int mode)
{
cia_kill_arch(mode);
#ifndef ALPHA_RESTORE_SRM_SETUP
switch(mode) {
case LINUX_REBOOT_CMD_RESTART:
/* Who said DEC engineers have no sense of humor? ;-) */
if (alpha_using_srm) {
*(vuip) PYXIS_RESET = 0x0000dead;
mb();
}
break;
case LINUX_REBOOT_CMD_HALT:
break;
case LINUX_REBOOT_CMD_POWER_OFF:
break;
}
halt();
#endif
}
/*
* The System Vector
*/
struct alpha_machine_vector miata_mv __initmv = {
.vector_name = "Miata",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 48,
.device_interrupt = pyxis_device_interrupt,
.init_arch = pyxis_init_arch,
.init_irq = miata_init_irq,
.init_rtc = common_init_rtc,
.init_pci = miata_init_pci,
.kill_arch = miata_kill_arch,
.pci_map_irq = miata_map_irq,
.pci_swizzle = miata_swizzle,
};
ALIAS_MV(miata)

View file

@ -0,0 +1,247 @@
/*
* linux/arch/alpha/kernel/sys_mikasa.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the MIKASA (AlphaServer 1000).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/mce.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static int cached_irq_mask;
static inline void
mikasa_update_irq_hw(int mask)
{
outw(mask, 0x536);
}
static inline void
mikasa_enable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
}
static void
mikasa_disable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip mikasa_irq_type = {
.name = "MIKASA",
.irq_unmask = mikasa_enable_irq,
.irq_mask = mikasa_disable_irq,
.irq_mask_ack = mikasa_disable_irq,
};
static void
mikasa_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers */
pld = (((~inw(0x534) & 0x0000ffffUL) << 16)
| (((unsigned long) inb(0xa0)) << 8)
| inb(0x20));
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i < 16) {
isa_device_interrupt(vector);
} else {
handle_irq(i);
}
}
}
static void __init
mikasa_init_irq(void)
{
long i;
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) {
irq_set_chip_and_handler(i, &mikasa_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
common_init_isa_dma();
}
/*
* PCI Fixup configuration.
*
* Summary @ 0x536:
* Bit Meaning
* 0 Interrupt Line A from slot 0
* 1 Interrupt Line B from slot 0
* 2 Interrupt Line C from slot 0
* 3 Interrupt Line D from slot 0
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line C from slot 1
* 7 Interrupt Line D from slot 1
* 8 Interrupt Line A from slot 2
* 9 Interrupt Line B from slot 2
*10 Interrupt Line C from slot 2
*11 Interrupt Line D from slot 2
*12 NCR 810 SCSI
*13 Power Supply Fail
*14 Temperature Warn
*15 Reserved
*
* The device to slot mapping looks like:
*
* Slot Device
* 6 NCR SCSI controller
* 7 Intel PCI-EISA bridge chip
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[8][5] __initdata = {
/*INT INTA INTB INTC INTD */
{16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 19, ???? */
{ -1, -1, -1, -1, -1}, /* IdSel 20, ???? */
{ -1, -1, -1, -1, -1}, /* IdSel 21, ???? */
{ 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */
{ 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
{ 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */
};
const long min_idsel = 6, max_idsel = 13, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
static void
mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr)
{
#define MCHK_NO_DEVSEL 0x205U
#define MCHK_NO_TABT 0x204U
struct el_common *mchk_header;
unsigned int code;
mchk_header = (struct el_common *)la_ptr;
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
apecs_pci_clr_err();
wrmces(0x7);
mb();
code = mchk_header->code;
process_mcheck_info(vector, la_ptr, "MIKASA APECS",
(mcheck_expected(0)
&& (code == MCHK_NO_DEVSEL
|| code == MCHK_NO_TABT)));
}
#endif
/*
* The System Vector
*/
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector mikasa_mv __initmv = {
.vector_name = "Mikasa",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = mikasa_apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = mikasa_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = mikasa_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.pci_map_irq = mikasa_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(mikasa)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector mikasa_primo_mv __initmv = {
.vector_name = "Mikasa-Primo",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = mikasa_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = mikasa_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cia_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = mikasa_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(mikasa_primo)
#endif

View file

@ -0,0 +1,283 @@
/*
* linux/arch/alpha/kernel/sys_nautilus.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1998 Richard Henderson
* Copyright (C) 1999 Alpha Processor, Inc.,
* (David Daniel, Stig Telfer, Soohoon Lee)
*
* Code supporting NAUTILUS systems.
*
*
* NAUTILUS has the following I/O features:
*
* a) Driven by AMD 751 aka IRONGATE (northbridge):
* 4 PCI slots
* 1 AGP slot
*
* b) Driven by ALI M1543C (southbridge)
* 2 ISA slots
* 2 IDE connectors
* 1 dual drive capable FDD controller
* 2 serial ports
* 1 ECP/EPP/SP parallel port
* 2 USB ports
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/bootmem.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pci.h>
#include <asm/pgtable.h>
#include <asm/core_irongate.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "err_impl.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static void __init
nautilus_init_irq(void)
{
if (alpha_using_srm) {
alpha_mv.device_interrupt = srm_device_interrupt;
}
init_i8259a_irqs();
common_init_isa_dma();
}
static int __init
nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/* Preserve the IRQ set up by the console. */
u8 irq;
/* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as
console reports. Check the device id of AGP bridge to distinguish
UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */
if (slot == 1 && pin == 2 &&
dev->bus->self && dev->bus->self->device == 0x700f)
return 5;
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
return irq;
}
void
nautilus_kill_arch(int mode)
{
struct pci_bus *bus = pci_isa_hose->bus;
u32 pmuport;
int off;
switch (mode) {
case LINUX_REBOOT_CMD_RESTART:
if (! alpha_using_srm) {
u8 t8;
pci_bus_read_config_byte(bus, 0x38, 0x43, &t8);
pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80);
outb(1, 0x92);
outb(0, 0x92);
/* NOTREACHED */
}
break;
case LINUX_REBOOT_CMD_POWER_OFF:
/* Assume M1543C */
off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */
pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport);
if (!pmuport) {
/* M1535D/D+ */
off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */
pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport);
}
pmuport &= 0xfffe;
outw(0xffff, pmuport); /* Clear pending events. */
outw(off, pmuport + 4);
/* NOTREACHED */
break;
}
}
/* Perform analysis of a machine check that arrived from the system (NMI) */
static void
naut_sys_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs *regs)
{
printk("PC %lx RA %lx\n", regs->pc, regs->r26);
irongate_pci_clr_err();
}
/* Machine checks can come from two sources - those on the CPU and those
in the system. They are analysed separately but all starts here. */
void
nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
{
char *mchk_class;
/* Now for some analysis. Machine checks fall into two classes --
those picked up by the system, and those picked up by the CPU.
Add to that the two levels of severity - correctable or not. */
if (vector == SCB_Q_SYSMCHK
&& ((IRONGATE0->dramms & 0x300) == 0x300)) {
unsigned long nmi_ctl;
/* Clear ALI NMI */
nmi_ctl = inb(0x61);
nmi_ctl |= 0x0c;
outb(nmi_ctl, 0x61);
nmi_ctl &= ~0x0c;
outb(nmi_ctl, 0x61);
/* Write again clears error bits. */
IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100;
mb();
IRONGATE0->stat_cmd;
/* Write again clears error bits. */
IRONGATE0->dramms = IRONGATE0->dramms;
mb();
IRONGATE0->dramms;
draina();
wrmces(0x7);
mb();
return;
}
if (vector == SCB_Q_SYSERR)
mchk_class = "Correctable";
else if (vector == SCB_Q_SYSMCHK)
mchk_class = "Fatal";
else {
ev6_machine_check(vector, la_ptr);
return;
}
printk(KERN_CRIT "NAUTILUS Machine check 0x%lx "
"[%s System Machine Check (NMI)]\n",
vector, mchk_class);
naut_sys_machine_check(vector, la_ptr, get_irq_regs());
/* Tell the PALcode to clear the machine check */
draina();
wrmces(0x7);
mb();
}
extern void pcibios_claim_one_bus(struct pci_bus *);
static struct resource irongate_io = {
.name = "Irongate PCI IO",
.flags = IORESOURCE_IO,
};
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
};
void __init
nautilus_init_pci(void)
{
struct pci_controller *hose = hose_head;
struct pci_bus *bus;
struct pci_dev *irongate;
unsigned long bus_align, bus_size, pci_mem;
unsigned long memtop = max_low_pfn << PAGE_SHIFT;
/* Scan our single hose. */
bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
hose->bus = bus;
pcibios_claim_one_bus(bus);
irongate = pci_get_bus_and_slot(0, 0);
bus->self = irongate;
bus->resource[0] = &irongate_io;
bus->resource[1] = &irongate_mem;
pci_bus_size_bridges(bus);
/* IO port range. */
bus->resource[0]->start = 0;
bus->resource[0]->end = 0xffff;
/* Set up PCI memory range - limit is hardwired to 0xffffffff,
base must be at aligned to 16Mb. */
bus_align = bus->resource[1]->start;
bus_size = bus->resource[1]->end + 1 - bus_align;
if (bus_align < 0x1000000UL)
bus_align = 0x1000000UL;
pci_mem = (0x100000000UL - bus_size) & -bus_align;
bus->resource[1]->start = pci_mem;
bus->resource[1]->end = 0xffffffffUL;
if (request_resource(&iomem_resource, bus->resource[1]) < 0)
printk(KERN_ERR "Failed to request MEM on hose 0\n");
if (pci_mem < memtop)
memtop = pci_mem;
if (memtop > alpha_mv.min_mem_address) {
free_reserved_area(__va(alpha_mv.min_mem_address),
__va(memtop), -1, NULL);
printk("nautilus_init_pci: %ldk freed\n",
(memtop - alpha_mv.min_mem_address) >> 10);
}
if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */
IRONGATE0->pci_mem = pci_mem;
pci_bus_assign_resources(bus);
/* pci_common_swizzle() relies on bus->self being NULL
for the root bus, so just clear it. */
bus->self = NULL;
pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
}
/*
* The System Vectors
*/
struct alpha_machine_vector nautilus_mv __initmv = {
.vector_name = "Nautilus",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_IRONGATE_IO,
.machine_check = nautilus_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = IRONGATE_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = irongate_init_arch,
.init_irq = nautilus_init_irq,
.init_rtc = common_init_rtc,
.init_pci = nautilus_init_pci,
.kill_arch = nautilus_kill_arch,
.pci_map_irq = nautilus_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(nautilus)

View file

@ -0,0 +1,336 @@
/*
* linux/arch/alpha/kernel/sys_noritake.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the NORITAKE (AlphaServer 1000A),
* CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/mce.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static int cached_irq_mask;
static inline void
noritake_update_irq_hw(int irq, int mask)
{
int port = 0x54a;
if (irq >= 32) {
mask >>= 16;
port = 0x54c;
}
outw(mask, port);
}
static void
noritake_enable_irq(struct irq_data *d)
{
noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
}
static void
noritake_disable_irq(struct irq_data *d)
{
noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip noritake_irq_type = {
.name = "NORITAKE",
.irq_unmask = noritake_enable_irq,
.irq_mask = noritake_disable_irq,
.irq_mask_ack = noritake_disable_irq,
};
static void
noritake_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers of NORITAKE */
pld = (((unsigned long) inw(0x54c) << 32)
| ((unsigned long) inw(0x54a) << 16)
| ((unsigned long) inb(0xa0) << 8)
| inb(0x20));
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i < 16) {
isa_device_interrupt(vector);
} else {
handle_irq(i);
}
}
}
static void
noritake_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* I really hate to do this, too, but the NORITAKE SRM console also
* reports PCI vectors *lower* than I expected from the bit numbers
* in the documentation.
* But I really don't want to change the fixup code for allocation
* of IRQs, nor the alpha_irq_mask maintenance stuff, both of which
* look nice and clean now.
* So, here's this additional grotty hack... :-(
*/
if (irq >= 16)
irq = irq + 1;
handle_irq(irq);
}
static void __init
noritake_init_irq(void)
{
long i;
if (alpha_using_srm)
alpha_mv.device_interrupt = noritake_srm_device_interrupt;
outw(0, 0x54a);
outw(0, 0x54c);
for (i = 16; i < 48; ++i) {
irq_set_chip_and_handler(i, &noritake_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
common_init_isa_dma();
}
/*
* PCI Fixup configuration.
*
* Summary @ 0x542, summary register #1:
* Bit Meaning
* 0 All valid ints from summary regs 2 & 3
* 1 QLOGIC ISP1020A SCSI
* 2 Interrupt Line A from slot 0
* 3 Interrupt Line B from slot 0
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line A from slot 2
* 7 Interrupt Line B from slot 2
* 8 Interrupt Line A from slot 3
* 9 Interrupt Line B from slot 3
*10 Interrupt Line A from slot 4
*11 Interrupt Line B from slot 4
*12 Interrupt Line A from slot 5
*13 Interrupt Line B from slot 5
*14 Interrupt Line A from slot 6
*15 Interrupt Line B from slot 6
*
* Summary @ 0x544, summary register #2:
* Bit Meaning
* 0 OR of all unmasked ints in SR #2
* 1 OR of secondary bus ints
* 2 Interrupt Line C from slot 0
* 3 Interrupt Line D from slot 0
* 4 Interrupt Line C from slot 1
* 5 Interrupt line D from slot 1
* 6 Interrupt Line C from slot 2
* 7 Interrupt Line D from slot 2
* 8 Interrupt Line C from slot 3
* 9 Interrupt Line D from slot 3
*10 Interrupt Line C from slot 4
*11 Interrupt Line D from slot 4
*12 Interrupt Line C from slot 5
*13 Interrupt Line D from slot 5
*14 Interrupt Line C from slot 6
*15 Interrupt Line D from slot 6
*
* The device to slot mapping looks like:
*
* Slot Device
* 7 Intel PCI-EISA bridge chip
* 8 DEC PCI-PCI bridge chip
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
/*INT INTA INTB INTC INTD */
/* note: IDSELs 16, 17, and 25 are CORELLE only */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
{ -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 19, PPB */
{ -1, -1, -1, -1, -1}, /* IdSel 20, ???? */
{ -1, -1, -1, -1, -1}, /* IdSel 21, ???? */
{ 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */
{ 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */
{ 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */
{ 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */
/* The following 5 are actually on PCI bus 1, which is
across the built-in bridge of the NORITAKE only. */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
{ 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */
{16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */
{16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */
{16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */
};
const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
noritake_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge */
else if (PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */
}
else
{
/* Must be a card-based bridge. */
do {
if (PCI_SLOT(dev->bus->self->devfn) == 8) {
slot = PCI_SLOT(dev->devfn) + 15;
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
static void
noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr)
{
#define MCHK_NO_DEVSEL 0x205U
#define MCHK_NO_TABT 0x204U
struct el_common *mchk_header;
unsigned int code;
mchk_header = (struct el_common *)la_ptr;
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
apecs_pci_clr_err();
wrmces(0x7);
mb();
code = mchk_header->code;
process_mcheck_info(vector, la_ptr, "NORITAKE APECS",
(mcheck_expected(0)
&& (code == MCHK_NO_DEVSEL
|| code == MCHK_NO_TABT)));
}
#endif
/*
* The System Vectors
*/
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector noritake_mv __initmv = {
.vector_name = "Noritake",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = noritake_apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = noritake_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = noritake_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.pci_map_irq = noritake_map_irq,
.pci_swizzle = noritake_swizzle,
};
ALIAS_MV(noritake)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO)
struct alpha_machine_vector noritake_primo_mv __initmv = {
.vector_name = "Noritake-Primo",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = noritake_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = noritake_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cia_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = noritake_map_irq,
.pci_swizzle = noritake_swizzle,
};
ALIAS_MV(noritake_primo)
#endif

View file

@ -0,0 +1,271 @@
/*
* linux/arch/alpha/kernel/sys_rawhide.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the RAWHIDE.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_mcpcia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/*
* HACK ALERT! only the boot cpu is used for interrupts.
*/
/* Note mask bit is true for ENABLED irqs. */
static unsigned int hose_irq_masks[4] = {
0xff0000, 0xfe0000, 0xff0000, 0xff0000
};
static unsigned int cached_irq_masks[4];
DEFINE_SPINLOCK(rawhide_irq_lock);
static inline void
rawhide_update_irq_hw(int hose, int mask)
{
*(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)) = mask;
mb();
*(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose));
}
#define hose_exists(h) \
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
static inline void
rawhide_enable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
if (!hose_exists(hose)) /* if hose non-existent, exit */
return;
irq -= hose * 24;
mask = 1 << irq;
spin_lock(&rawhide_irq_lock);
mask |= cached_irq_masks[hose];
cached_irq_masks[hose] = mask;
rawhide_update_irq_hw(hose, mask);
spin_unlock(&rawhide_irq_lock);
}
static void
rawhide_disable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
if (!hose_exists(hose)) /* if hose non-existent, exit */
return;
irq -= hose * 24;
mask = ~(1 << irq) | hose_irq_masks[hose];
spin_lock(&rawhide_irq_lock);
mask &= cached_irq_masks[hose];
cached_irq_masks[hose] = mask;
rawhide_update_irq_hw(hose, mask);
spin_unlock(&rawhide_irq_lock);
}
static void
rawhide_mask_and_ack_irq(struct irq_data *d)
{
unsigned int mask, mask1, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
if (!hose_exists(hose)) /* if hose non-existent, exit */
return;
irq -= hose * 24;
mask1 = 1 << irq;
mask = ~mask1 | hose_irq_masks[hose];
spin_lock(&rawhide_irq_lock);
mask &= cached_irq_masks[hose];
cached_irq_masks[hose] = mask;
rawhide_update_irq_hw(hose, mask);
/* Clear the interrupt. */
*(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1;
spin_unlock(&rawhide_irq_lock);
}
static struct irq_chip rawhide_irq_type = {
.name = "RAWHIDE",
.irq_unmask = rawhide_enable_irq,
.irq_mask = rawhide_disable_irq,
.irq_mask_ack = rawhide_mask_and_ack_irq,
};
static void
rawhide_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* The RAWHIDE SRM console reports PCI interrupts with a vector
* 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0)
* shows up as IRQ 24, etc, etc. We adjust it down by 8 to have
* it line up with the actual bit numbers from the REQ registers,
* which is how we manage the interrupts/mask. Sigh...
*
* Also, PCI #1 interrupts are offset some more... :-(
*/
if (irq == 52) {
/* SCSI on PCI1 is special. */
irq = 72;
}
/* Adjust by which hose it is from. */
irq -= ((irq + 16) >> 2) & 0x38;
handle_irq(irq);
}
static void __init
rawhide_init_irq(void)
{
struct pci_controller *hose;
long i;
mcpcia_init_hoses();
/* Clear them all; only hoses that exist will be non-zero. */
for (i = 0; i < MCPCIA_MAX_HOSES; i++) cached_irq_masks[i] = 0;
for (hose = hose_head; hose; hose = hose->next) {
unsigned int h = hose->index;
unsigned int mask = hose_irq_masks[h];
cached_irq_masks[h] = mask;
*(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask;
*(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0;
}
for (i = 16; i < 128; ++i) {
irq_set_chip_and_handler(i, &rawhide_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
common_init_isa_dma();
}
/*
* PCI Fixup configuration.
*
* Summary @ MCPCIA_PCI0_INT_REQ:
* Bit Meaning
* 0 Interrupt Line A from slot 2 PCI0
* 1 Interrupt Line B from slot 2 PCI0
* 2 Interrupt Line C from slot 2 PCI0
* 3 Interrupt Line D from slot 2 PCI0
* 4 Interrupt Line A from slot 3 PCI0
* 5 Interrupt Line B from slot 3 PCI0
* 6 Interrupt Line C from slot 3 PCI0
* 7 Interrupt Line D from slot 3 PCI0
* 8 Interrupt Line A from slot 4 PCI0
* 9 Interrupt Line B from slot 4 PCI0
* 10 Interrupt Line C from slot 4 PCI0
* 11 Interrupt Line D from slot 4 PCI0
* 12 Interrupt Line A from slot 5 PCI0
* 13 Interrupt Line B from slot 5 PCI0
* 14 Interrupt Line C from slot 5 PCI0
* 15 Interrupt Line D from slot 5 PCI0
* 16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1)
* 17-23 NA
*
* IdSel
* 1 EISA bridge (PCI bus 0 only)
* 2 PCI option slot 2
* 3 PCI option slot 3
* 4 PCI option slot 4
* 5 PCI option slot 5
*
*/
static int __init
rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
{ 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
{ 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */
{ 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */
};
const long min_idsel = 1, max_idsel = 5, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq >= 0)
irq += 24 * hose->index;
return irq;
}
/*
* The System Vector
*/
struct alpha_machine_vector rawhide_mv __initmv = {
.vector_name = "Rawhide",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_MCPCIA_IO,
.machine_check = mcpcia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = MCPCIA_DEFAULT_MEM_BASE,
.pci_dac_offset = MCPCIA_DAC_OFFSET,
.nr_irqs = 128,
.device_interrupt = rawhide_srm_device_interrupt,
.init_arch = mcpcia_init_arch,
.init_irq = rawhide_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.pci_map_irq = rawhide_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(rawhide)

View file

@ -0,0 +1,239 @@
/*
* linux/arch/alpha/kernel/sys_ruffian.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the RUFFIAN.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static void __init
ruffian_init_irq(void)
{
/* Invert 6&7 for i82371 */
*(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb();
*(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */
outb(0x11,0xA0);
outb(0x08,0xA1);
outb(0x02,0xA1);
outb(0x01,0xA1);
outb(0xFF,0xA1);
outb(0x11,0x20);
outb(0x00,0x21);
outb(0x04,0x21);
outb(0x01,0x21);
outb(0xFF,0x21);
/* Finish writing the 82C59A PIC Operation Control Words */
outb(0x20,0xA0);
outb(0x20,0x20);
init_i8259a_irqs();
/* Not interested in the bogus interrupts (0,3,6),
NMI (1), HALT (2), flash (5), or 21142 (8). */
init_pyxis_irqs(0x16f0000);
common_init_isa_dma();
}
#define RUFFIAN_LATCH DIV_ROUND_CLOSEST(PIT_TICK_RATE, HZ)
static void __init
ruffian_init_rtc(void)
{
/* Ruffian does not have the RTC connected to the CPU timer
interrupt. Instead, it uses the PIT connected to IRQ 0. */
/* Setup interval timer. */
outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */
outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */
outb(0xb6, 0x43); /* pit counter 2: speaker */
outb(0x31, 0x42);
outb(0x13, 0x42);
setup_irq(0, &timer_irqaction);
}
static void
ruffian_kill_arch (int mode)
{
cia_kill_arch(mode);
#if 0
/* This only causes re-entry to ARCSBIOS */
/* Perhaps this works for other PYXIS as well? */
*(vuip) PYXIS_RESET = 0x0000dead;
mb();
#endif
}
/*
* Interrupt routing:
*
* Primary bus
* IdSel INTA INTB INTC INTD
* 21052 13 - - - -
* SIO 14 23 - - -
* 21143 15 44 - - -
* Slot 0 17 43 42 41 40
*
* Secondary bus
* IdSel INTA INTB INTC INTD
* Slot 0 8 (18) 19 18 17 16
* Slot 1 9 (19) 31 30 29 28
* Slot 2 10 (20) 27 26 25 24
* Slot 3 11 (21) 39 38 37 36
* Slot 4 12 (22) 35 34 33 32
* 53c875 13 (23) 20 - - -
*
*/
static int __init
ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[11][5] __initdata = {
/*INT INTA INTB INTC INTD */
{-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
{-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
{44, 44, 44, 44, 44}, /* IdSel 15, 21143 */
{-1, -1, -1, -1, -1}, /* IdSel 16, none */
{43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */
/* the next 6 are actually on PCI bus 1, across the bridge */
{19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */
{31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */
{27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */
{39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */
{35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */
{20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */
};
const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge. */
else if (PCI_SLOT(dev->bus->self->devfn) == 13) {
slot = PCI_SLOT(dev->devfn) + 10;
}
else
{
/* Must be a card-based bridge. */
do {
if (PCI_SLOT(dev->bus->self->devfn) == 13) {
slot = PCI_SLOT(dev->devfn) + 10;
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
#ifdef BUILDING_FOR_MILO
/*
* The DeskStation Ruffian motherboard firmware does not place
* the memory size in the PALimpure area. Therefore, we use
* the Bank Configuration Registers in PYXIS to obtain the size.
*/
static unsigned long __init
ruffian_get_bank_size(unsigned long offset)
{
unsigned long bank_addr, bank, ret = 0;
/* Valid offsets are: 0x800, 0x840 and 0x880
since Ruffian only uses three banks. */
bank_addr = (unsigned long)PYXIS_MCR + offset;
bank = *(vulp)bank_addr;
/* Check BANK_ENABLE */
if (bank & 0x01) {
static unsigned long size[] __initdata = {
0x40000000UL, /* 0x00, 1G */
0x20000000UL, /* 0x02, 512M */
0x10000000UL, /* 0x04, 256M */
0x08000000UL, /* 0x06, 128M */
0x04000000UL, /* 0x08, 64M */
0x02000000UL, /* 0x0a, 32M */
0x01000000UL, /* 0x0c, 16M */
0x00800000UL, /* 0x0e, 8M */
0x80000000UL, /* 0x10, 2G */
};
bank = (bank & 0x1e) >> 1;
if (bank < ARRAY_SIZE(size))
ret = size[bank];
}
return ret;
}
#endif /* BUILDING_FOR_MILO */
/*
* The System Vector
*/
struct alpha_machine_vector ruffian_mv __initmv = {
.vector_name = "Ruffian",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 48,
.device_interrupt = pyxis_device_interrupt,
.init_arch = pyxis_init_arch,
.init_irq = ruffian_init_irq,
.init_rtc = ruffian_init_rtc,
.init_pci = cia_init_pci,
.kill_arch = ruffian_kill_arch,
.pci_map_irq = ruffian_map_irq,
.pci_swizzle = ruffian_swizzle,
};
ALIAS_MV(ruffian)

View file

@ -0,0 +1,202 @@
/*
* linux/arch/alpha/kernel/sys_rx164.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the RX164 (PCA56+POLARIS).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_polaris.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
static inline void
rx164_update_irq_hw(unsigned long mask)
{
volatile unsigned int *irq_mask;
irq_mask = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x74);
*irq_mask = mask;
mb();
*irq_mask;
}
static inline void
rx164_enable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
rx164_disable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static struct irq_chip rx164_irq_type = {
.name = "RX164",
.irq_unmask = rx164_enable_irq,
.irq_mask = rx164_disable_irq,
.irq_mask_ack = rx164_disable_irq,
};
static void
rx164_device_interrupt(unsigned long vector)
{
unsigned long pld;
volatile unsigned int *dirr;
long i;
/* Read the interrupt summary register. On Polaris, this is
the DIRR register in PCI config space (offset 0x84). */
dirr = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x84);
pld = *dirr;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 20) {
isa_no_iack_sc_device_interrupt(vector);
} else {
handle_irq(16+i);
}
}
}
static void __init
rx164_init_irq(void)
{
long i;
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
common_init_isa_dma();
setup_irq(16+20, &isa_cascade_irqaction);
}
/*
* The RX164 changed its interrupt routing between pass1 and pass2...
*
* PASS1:
*
* Slot IDSEL INTA INTB INTC INTD
* 0 6 5 10 15 20
* 1 7 4 9 14 19
* 2 5 3 8 13 18
* 3 9 2 7 12 17
* 4 10 1 6 11 16
*
* PASS2:
* Slot IDSEL INTA INTB INTC INTD
* 0 5 1 7 12 17
* 1 6 2 8 13 18
* 2 8 3 9 14 19
* 3 9 4 10 15 20
* 4 10 5 11 16 6
*
*/
/*
* IdSel
* 5 32 bit PCI option slot 0
* 6 64 bit PCI option slot 1
* 7 PCI-ISA bridge
* 7 64 bit PCI option slot 2
* 9 32 bit PCI option slot 3
* 10 PCI-PCI bridge
*
*/
static int __init
rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
#if 0
static char irq_tab_pass1[6][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+3, 16+3, 16+8, 16+13, 16+18}, /* IdSel 5, slot 2 */
{ 16+5, 16+5, 16+10, 16+15, 16+20}, /* IdSel 6, slot 0 */
{ 16+4, 16+4, 16+9, 16+14, 16+19}, /* IdSel 7, slot 1 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, PCI/ISA bridge */
{ 16+2, 16+2, 16+7, 16+12, 16+17}, /* IdSel 9, slot 3 */
{ 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
};
#else
static char irq_tab[6][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
{ 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
{ -1, -1, -1, -1, -1}, /* IdSel 7, PCI/ISA bridge */
{ 16+2, 16+2, 16+8, 16+13, 16+18}, /* IdSel 8, slot 2 */
{ 16+3, 16+3, 16+9, 16+14, 16+19}, /* IdSel 9, slot 3 */
{ 16+4, 16+4, 16+10, 16+15, 16+5}, /* IdSel 10, PCI-PCI */
};
#endif
const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5;
/* JRP - Need to figure out how to distinguish pass1 from pass2,
and use the correct table. */
return COMMON_TABLE_LOOKUP;
}
/*
* The System Vector
*/
struct alpha_machine_vector rx164_mv __initmv = {
.vector_name = "RX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_POLARIS_IO,
.machine_check = polaris_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = rx164_device_interrupt,
.init_arch = polaris_init_arch,
.init_irq = rx164_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.pci_map_irq = rx164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(rx164)

View file

@ -0,0 +1,635 @@
/*
* linux/arch/alpha/kernel/sys_sable.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the Sable, Sable-Gamma, and Lynx systems.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_t2.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
DEFINE_SPINLOCK(sable_lynx_irq_lock);
typedef struct irq_swizzle_struct
{
char irq_to_mask[64];
char mask_to_irq[64];
/* Note mask bit is true for DISABLED irqs. */
unsigned long shadow_mask;
void (*update_irq_hw)(unsigned long bit, unsigned long mask);
void (*ack_irq_hw)(unsigned long bit);
} irq_swizzle_t;
static irq_swizzle_t *sable_lynx_irq_swizzle;
static void sable_lynx_init_irq(int nr_of_irqs);
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
/***********************************************************************/
/*
* For SABLE, which is really baroque, we manage 40 IRQ's, but the
* hardware really only supports 24, not via normal ISA PIC,
* but cascaded custom 8259's, etc.
* 0-7 (char at 536)
* 8-15 (char at 53a)
* 16-23 (char at 53c)
*
* Summary Registers (536/53a/53c):
*
* Bit Meaning Kernel IRQ
*------------------------------------------
* 0 PCI slot 0 34
* 1 NCR810 (builtin) 33
* 2 TULIP (builtin) 32
* 3 mouse 12
* 4 PCI slot 1 35
* 5 PCI slot 2 36
* 6 keyboard 1
* 7 floppy 6
* 8 COM2 3
* 9 parallel port 7
*10 EISA irq 3 -
*11 EISA irq 4 -
*12 EISA irq 5 5
*13 EISA irq 6 -
*14 EISA irq 7 -
*15 COM1 4
*16 EISA irq 9 9
*17 EISA irq 10 10
*18 EISA irq 11 11
*19 EISA irq 12 -
*20 EISA irq 13 -
*21 EISA irq 14 14
*22 NC 15
*23 IIC -
*/
static void
sable_update_irq_hw(unsigned long bit, unsigned long mask)
{
int port = 0x537;
if (bit >= 16) {
port = 0x53d;
mask >>= 16;
} else if (bit >= 8) {
port = 0x53b;
mask >>= 8;
}
outb(mask, port);
}
static void
sable_ack_irq_hw(unsigned long bit)
{
int port, val1, val2;
if (bit >= 16) {
port = 0x53c;
val1 = 0xE0 | (bit - 16);
val2 = 0xE0 | 4;
} else if (bit >= 8) {
port = 0x53a;
val1 = 0xE0 | (bit - 8);
val2 = 0xE0 | 3;
} else {
port = 0x536;
val1 = 0xE0 | (bit - 0);
val2 = 0xE0 | 1;
}
outb(val1, port); /* ack the slave */
outb(val2, 0x534); /* ack the master */
}
static irq_swizzle_t sable_irq_swizzle = {
{
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
{
34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
-1,
sable_update_irq_hw,
sable_ack_irq_hw
};
static void __init
sable_init_irq(void)
{
outb(-1, 0x537); /* slave 0 */
outb(-1, 0x53b); /* slave 1 */
outb(-1, 0x53d); /* slave 2 */
outb(0x44, 0x535); /* enable cascades in master */
sable_lynx_irq_swizzle = &sable_irq_swizzle;
sable_lynx_init_irq(40);
}
/*
* PCI Fixup configuration for ALPHA SABLE (2100).
*
* The device to slot mapping looks like:
*
* Slot Device
* 0 TULIP
* 1 SCSI
* 2 PCI-EISA bridge
* 3 none
* 4 none
* 5 none
* 6 PCI on board slot 0
* 7 PCI on board slot 1
* 8 PCI on board slot 2
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
/*
* NOTE: the IRQ assignments below are arbitrary, but need to be consistent
* with the values in the irq swizzling tables above.
*/
static int __init
sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[9][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 2, SIO */
{ -1, -1, -1, -1, -1}, /* IdSel 3, none */
{ -1, -1, -1, -1, -1}, /* IdSel 4, none */
{ -1, -1, -1, -1, -1}, /* IdSel 5, none */
{ 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */
{ 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */
{ 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */
};
long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
/***********************************************************************/
/* LYNX hardware specifics
*/
/*
* For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC.
*
* Bit Meaning Kernel IRQ
*------------------------------------------
* 0
* 1
* 2
* 3 mouse 12
* 4
* 5
* 6 keyboard 1
* 7 floppy 6
* 8 COM2 3
* 9 parallel port 7
*10 EISA irq 3 -
*11 EISA irq 4 -
*12 EISA irq 5 5
*13 EISA irq 6 -
*14 EISA irq 7 -
*15 COM1 4
*16 EISA irq 9 9
*17 EISA irq 10 10
*18 EISA irq 11 11
*19 EISA irq 12 -
*20
*21 EISA irq 14 14
*22 EISA irq 15 15
*23 IIC -
*24 VGA (builtin) -
*25
*26
*27
*28 NCR810 (builtin) 28
*29
*30
*31
*32 PCI 0 slot 4 A primary bus 32
*33 PCI 0 slot 4 B primary bus 33
*34 PCI 0 slot 4 C primary bus 34
*35 PCI 0 slot 4 D primary bus
*36 PCI 0 slot 5 A primary bus
*37 PCI 0 slot 5 B primary bus
*38 PCI 0 slot 5 C primary bus
*39 PCI 0 slot 5 D primary bus
*40 PCI 0 slot 6 A primary bus
*41 PCI 0 slot 6 B primary bus
*42 PCI 0 slot 6 C primary bus
*43 PCI 0 slot 6 D primary bus
*44 PCI 0 slot 7 A primary bus
*45 PCI 0 slot 7 B primary bus
*46 PCI 0 slot 7 C primary bus
*47 PCI 0 slot 7 D primary bus
*48 PCI 0 slot 0 A secondary bus
*49 PCI 0 slot 0 B secondary bus
*50 PCI 0 slot 0 C secondary bus
*51 PCI 0 slot 0 D secondary bus
*52 PCI 0 slot 1 A secondary bus
*53 PCI 0 slot 1 B secondary bus
*54 PCI 0 slot 1 C secondary bus
*55 PCI 0 slot 1 D secondary bus
*56 PCI 0 slot 2 A secondary bus
*57 PCI 0 slot 2 B secondary bus
*58 PCI 0 slot 2 C secondary bus
*59 PCI 0 slot 2 D secondary bus
*60 PCI 0 slot 3 A secondary bus
*61 PCI 0 slot 3 B secondary bus
*62 PCI 0 slot 3 C secondary bus
*63 PCI 0 slot 3 D secondary bus
*/
static void
lynx_update_irq_hw(unsigned long bit, unsigned long mask)
{
/*
* Write the AIR register on the T3/T4 with the
* address of the IC mask register (offset 0x40)
*/
*(vulp)T2_AIR = 0x40;
mb();
*(vulp)T2_AIR; /* re-read to force write */
mb();
*(vulp)T2_DIR = mask;
mb();
mb();
}
static void
lynx_ack_irq_hw(unsigned long bit)
{
*(vulp)T2_VAR = (u_long) bit;
mb();
mb();
}
static irq_swizzle_t lynx_irq_swizzle = {
{ /* irq_to_mask */
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */
-1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
{ /* mask_to_irq */
-1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
-1,
lynx_update_irq_hw,
lynx_ack_irq_hw
};
static void __init
lynx_init_irq(void)
{
sable_lynx_irq_swizzle = &lynx_irq_swizzle;
sable_lynx_init_irq(64);
}
/*
* PCI Fixup configuration for ALPHA LYNX (2100A)
*
* The device to slot mapping looks like:
*
* Slot Device
* 0 none
* 1 none
* 2 PCI-EISA bridge
* 3 PCI-PCI bridge
* 4 NCR 810 (Demi-Lynx only)
* 5 none
* 6 PCI on board slot 4
* 7 PCI on board slot 5
* 8 PCI on board slot 6
* 9 PCI on board slot 7
*
* And behind the PPB we have:
*
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
* 14 PCI on board slot 3
*/
/*
* NOTE: the IRQ assignments below are arbitrary, but need to be consistent
* with the values in the irq swizzling tables above.
*/
static int __init
lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[19][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
{ 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */
{ -1, -1, -1, -1, -1}, /* IdSel 16, none */
{ 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */
{ 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */
{ 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */
{ 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */
{ -1, -1, -1, -1, -1}, /* IdSel 22, none */
/* The following are actually behind the PPB. */
{ -1, -1, -1, -1, -1}, /* IdSel 16 none */
{ 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */
{ -1, -1, -1, -1, -1}, /* IdSel 18 none */
{ -1, -1, -1, -1, -1}, /* IdSel 19 none */
{ -1, -1, -1, -1, -1}, /* IdSel 20 none */
{ -1, -1, -1, -1, -1}, /* IdSel 21 none */
{ 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */
{ 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */
{ 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */
{ 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */
};
const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge */
else if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
}
else
{
/* Must be a card-based bridge. */
do {
if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
break;
}
pin = pci_swizzle_interrupt_pin(dev, pin);
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */
/***********************************************************************/
/* GENERIC irq routines */
static inline void
sable_lynx_enable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n",
__func__, mask, bit, irq);
#endif
}
static void
sable_lynx_disable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n",
__func__, mask, bit, irq);
#endif
}
static void
sable_lynx_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
sable_lynx_irq_swizzle->ack_irq_hw(bit);
spin_unlock(&sable_lynx_irq_lock);
}
static struct irq_chip sable_lynx_irq_type = {
.name = "SABLE/LYNX",
.irq_unmask = sable_lynx_enable_irq,
.irq_mask = sable_lynx_disable_irq,
.irq_mask_ack = sable_lynx_mask_and_ack_irq,
};
static void
sable_lynx_srm_device_interrupt(unsigned long vector)
{
/* Note that the vector reported by the SRM PALcode corresponds
to the interrupt mask bits, but we have to manage via the
so-called legacy IRQs for many common devices. */
int bit, irq;
bit = (vector - 0x800) >> 4;
irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
#if 0
printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
__func__, vector, bit, irq);
#endif
handle_irq(irq);
}
static void __init
sable_lynx_init_irq(int nr_of_irqs)
{
long i;
for (i = 0; i < nr_of_irqs; ++i) {
irq_set_chip_and_handler(i, &sable_lynx_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
}
static void __init
sable_lynx_init_pci(void)
{
common_init_pci();
}
/*****************************************************************/
/*
* The System Vectors
*
* In order that T2_HAE_ADDRESS should be a constant, we play
* these games with GAMMA_BIAS.
*/
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS 0
struct alpha_machine_vector sable_mv __initmv = {
.vector_name = "Sable",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
.machine_check = t2_machine_check,
.max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .t2 = {
.gamma_bias = 0
} }
};
ALIAS_MV(sable)
#endif /* GENERIC || (SABLE && !GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector sable_gamma_mv __initmv = {
.vector_name = "Sable-Gamma",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
.machine_check = t2_machine_check,
.max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .t2 = {
.gamma_bias = _GAMMA_BIAS
} }
};
ALIAS_MV(sable_gamma)
#endif /* GENERIC || (SABLE && GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector lynx_mv __initmv = {
.vector_name = "Lynx",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
.machine_check = t2_machine_check,
.max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 64,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = lynx_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = lynx_map_irq,
.pci_swizzle = lynx_swizzle,
.sys = { .t2 = {
.gamma_bias = _GAMMA_BIAS
} }
};
ALIAS_MV(lynx)
#endif /* GENERIC || LYNX */

461
arch/alpha/kernel/sys_sio.c Normal file
View file

@ -0,0 +1,461 @@
/*
* linux/arch/alpha/kernel/sys_sio.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code for all boards that route the PCI interrupts through the SIO
* PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB),
* Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/screen_info.h>
#include <asm/compiler.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Save LCA configuration data as the console had it set up. */
struct
{
unsigned int orig_route_tab; /* for SAVE/RESTORE */
} saved_config __attribute((common));
#endif
static void __init
sio_init_irq(void)
{
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
init_i8259a_irqs();
common_init_isa_dma();
}
static inline void __init
alphabook1_init_arch(void)
{
/* The AlphaBook1 has LCD video fixed at 800x600,
37 rows and 100 cols. */
screen_info.orig_y = 37;
screen_info.orig_video_cols = 100;
screen_info.orig_video_lines = 37;
lca_init_arch();
}
/*
* sio_route_tab selects irq routing in PCI/ISA bridge so that:
* PIRQ0 -> irq 15
* PIRQ1 -> irq 9
* PIRQ2 -> irq 10
* PIRQ3 -> irq 11
*
* This probably ought to be configurable via MILO. For
* example, sound boards seem to like using IRQ 9.
*
* This is NOT how we should do it. PIRQ0-X should have
* their own IRQs, the way intel uses the IO-APIC IRQs.
*/
static void __init
sio_pci_route(void)
{
unsigned int orig_route_tab;
/* First, ALWAYS read and print the original setting. */
pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
&orig_route_tab);
printk("%s: PIRQ original 0x%x new 0x%x\n", __func__,
orig_route_tab, alpha_mv.sys.sio.route_tab);
#if defined(ALPHA_RESTORE_SRM_SETUP)
saved_config.orig_route_tab = orig_route_tab;
#endif
/* Now override with desired setting. */
pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
alpha_mv.sys.sio.route_tab);
}
static unsigned int __init
sio_collect_irq_levels(void)
{
unsigned int level_bits = 0;
struct pci_dev *dev = NULL;
/* Iterate through the devices, collecting IRQ levels. */
for_each_pci_dev(dev) {
if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
(dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
continue;
if (dev->irq)
level_bits |= (1 << dev->irq);
}
return level_bits;
}
static void __init
sio_fixup_irq_levels(unsigned int level_bits)
{
unsigned int old_level_bits;
/*
* Now, make all PCI interrupts level sensitive. Notice:
* these registers must be accessed byte-wise. inw()/outw()
* don't work.
*
* Make sure to turn off any level bits set for IRQs 9,10,11,15,
* so that the only bits getting set are for devices actually found.
* Note that we do preserve the remainder of the bits, which we hope
* will be set correctly by ARC/SRM.
*
* Note: we at least preserve any level-set bits on AlphaBook1
*/
old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
level_bits |= (old_level_bits & 0x71ff);
outb((level_bits >> 0) & 0xff, 0x4d0);
outb((level_bits >> 8) & 0xff, 0x4d1);
}
static inline int __init
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* The Noname board has 5 PCI slots with each of the 4
* interrupt pins routed to different pins on the PCI/ISA
* bridge (PIRQ0-PIRQ3). The table below is based on
* information available at:
*
* http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt
*
* I have no information on the Avanti interrupt routing, but
* the routing seems to be identical to the Noname except
* that the Avanti has an additional slot whose routing I'm
* unsure of.
*
* pirq_tab[0] is a fake entry to deal with old PCI boards
* that have the interrupt pin number hardwired to 0 (meaning
* that they use the default INTA line, if they are interrupt
* driven at all).
*/
static char irq_tab[][5] __initdata = {
/*INT A B C D */
{ 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
{ 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */
{-1, -1, -1, -1, -1}, /* idsel 9 (unused) */
{-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
{ 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */
{ 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */
{ 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */
{ 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */
};
const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP, tmp;
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
return irq >= 0 ? tmp : -1;
}
static inline int __init
p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[][5] __initdata = {
/*INT A B C D */
{ 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
{ 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */
{ 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */
{-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
{-1, -1, -1, -1, -1}, /* idsel 11 (unused) */
{ 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */
};
const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP, tmp;
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
return irq >= 0 ? tmp : -1;
}
static inline void __init
noname_init_pci(void)
{
common_init_pci();
sio_pci_route();
sio_fixup_irq_levels(sio_collect_irq_levels());
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
/* Enabling things in the Super IO chip doesn't actually
* configure and enable things, the legacy drivers still
* need to do the actual configuration and enabling.
* This only unblocks them.
*/
#if !defined(CONFIG_ALPHA_AVANTI)
/* Don't bother on the Avanti family.
* None of them had on-board IDE.
*/
pc873xx_enable_ide();
#endif
pc873xx_enable_epp19();
}
}
static inline void __init
alphabook1_init_pci(void)
{
struct pci_dev *dev;
unsigned char orig, config;
common_init_pci();
sio_pci_route();
/*
* On the AlphaBook1, the PCMCIA chip (Cirrus 6729)
* is sensitive to PCI bus bursts, so we must DISABLE
* burst mode for the NCR 8xx SCSI... :-(
*
* Note that the NCR810 SCSI driver must preserve the
* setting of the bit in order for this to work. At the
* moment (2.0.29), ncr53c8xx.c does NOT do this, but
* 53c7,8xx.c DOES.
*/
dev = NULL;
while ((dev = pci_get_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
if (dev->device == PCI_DEVICE_ID_NCR_53C810
|| dev->device == PCI_DEVICE_ID_NCR_53C815
|| dev->device == PCI_DEVICE_ID_NCR_53C820
|| dev->device == PCI_DEVICE_ID_NCR_53C825) {
unsigned long io_port;
unsigned char ctest4;
io_port = dev->resource[0].start;
ctest4 = inb(io_port+0x21);
if (!(ctest4 & 0x80)) {
printk("AlphaBook1 NCR init: setting"
" burst disable\n");
outb(ctest4 | 0x80, io_port+0x21);
}
}
}
/* Do not set *ANY* level triggers for AlphaBook1. */
sio_fixup_irq_levels(0);
/* Make sure that register PR1 indicates 1Mb mem */
outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */
outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */
outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */
if ((config & 0xc0) != 0xc0) {
printk("AlphaBook1 VGA init: setting 1Mb memory\n");
config |= 0xc0;
outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */
}
outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */
}
void
sio_kill_arch(int mode)
{
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Since we cannot read the PCI DMA Window CSRs, we
* cannot restore them here.
*
* However, we CAN read the PIRQ route register, so restore it
* now...
*/
pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
saved_config.orig_route_tab);
#endif
}
/*
* The System Vectors
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1)
struct alpha_machine_vector alphabook1_mv __initmv = {
.vector_name = "AlphaBook1",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = alphabook1_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphabook1_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
/* NCR810 SCSI is 14, PCMCIA controller is 15. */
.route_tab = 0x0e0f0a0a,
}}
};
ALIAS_MV(alphabook1)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI)
struct alpha_machine_vector avanti_mv __initmv = {
.vector_name = "Avanti",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */
}}
};
ALIAS_MV(avanti)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME)
struct alpha_machine_vector noname_mv __initmv = {
.vector_name = "Noname",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = srm_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
/* For UDB, the only available PCI slot must not map to IRQ 9,
since that's the builtin MSS sound chip. That PCI slot
will map to PIRQ1 (for INTA at least), so we give it IRQ 15
instead.
Unfortunately we have to do this for NONAME as well, since
they are co-indicated when the platform type "Noname" is
selected... :-( */
.route_tab = 0x0b0a0f0d,
}}
};
ALIAS_MV(noname)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K)
struct alpha_machine_vector p2k_mv __initmv = {
.vector_name = "Platform2000",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = srm_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = p2k_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(p2k)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL)
struct alpha_machine_vector xl_mv __initmv = {
.vector_name = "XL",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = XL_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(xl)
#endif

View file

@ -0,0 +1,178 @@
/*
* linux/arch/alpha/kernel/sys_sx164.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the SX164 (PCA56+PYXIS).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include <asm/special_insns.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static void __init
sx164_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
init_i8259a_irqs();
/* Not interested in the bogus interrupts (0,3,4,5,40-47),
NMI (1), or HALT (2). */
if (alpha_using_srm)
init_srm_irqs(40, 0x3f0000);
else
init_pyxis_irqs(0xff00003f0000UL);
setup_irq(16+6, &timer_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* Summary @ PYXIS_INT_REQ:
* Bit Meaning
* 0 RSVD
* 1 NMI
* 2 Halt/Reset switch
* 3 MBZ
* 4 RAZ
* 5 RAZ
* 6 Interval timer (RTC)
* 7 PCI-ISA Bridge
* 8 Interrupt Line A from slot 3
* 9 Interrupt Line A from slot 2
*10 Interrupt Line A from slot 1
*11 Interrupt Line A from slot 0
*12 Interrupt Line B from slot 3
*13 Interrupt Line B from slot 2
*14 Interrupt Line B from slot 1
*15 Interrupt line B from slot 0
*16 Interrupt Line C from slot 3
*17 Interrupt Line C from slot 2
*18 Interrupt Line C from slot 1
*19 Interrupt Line C from slot 0
*20 Interrupt Line D from slot 3
*21 Interrupt Line D from slot 2
*22 Interrupt Line D from slot 1
*23 Interrupt Line D from slot 0
*
* IdSel
* 5 32 bit PCI option slot 2
* 6 64 bit PCI option slot 0
* 7 64 bit PCI option slot 1
* 8 Cypress I/O
* 9 32 bit PCI option slot 3
*/
static int __init
sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
{ 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
{ 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */
{ -1, -1, -1, -1, -1}, /* IdSel 8 SIO */
{ 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static void __init
sx164_init_pci(void)
{
cia_init_pci();
SMC669_Init(0);
}
static void __init
sx164_init_arch(void)
{
/*
* OSF palcode v1.23 forgets to enable PCA56 Motion Video
* Instructions. Let's enable it.
* We have to check palcode revision because CSERVE interface
* is subject to change without notice. For example, it
* has been changed completely since v1.16 (found in MILO
* distribution). -ink
*/
struct percpu_struct *cpu = (struct percpu_struct*)
((char*)hwrpb + hwrpb->processor_offset);
if (amask(AMASK_MAX) != 0
&& alpha_using_srm
&& (cpu->pal_revision & 0xffff) <= 0x117) {
__asm__ __volatile__(
"lda $16,8($31)\n"
"call_pal 9\n" /* Allow PALRES insns in kernel mode */
".long 0x64000118\n\n" /* hw_mfpr $0,icsr */
"ldah $16,(1<<(19-16))($31)\n"
"or $0,$16,$0\n" /* set MVE bit */
".long 0x74000118\n" /* hw_mtpr $0,icsr */
"lda $16,9($31)\n"
"call_pal 9" /* Disable PALRES insns */
: : : "$0", "$16");
printk("PCA56 MVI set enabled\n");
}
pyxis_init_arch();
}
/*
* The System Vector
*/
struct alpha_machine_vector sx164_mv __initmv = {
.vector_name = "SX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 48,
.device_interrupt = pyxis_device_interrupt,
.init_arch = sx164_init_arch,
.init_irq = sx164_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sx164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = sx164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(sx164)

View file

@ -0,0 +1,288 @@
/*
* linux/arch/alpha/kernel/sys_takara.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the TAKARA.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask[2] = { -1, -1 };
static inline void
takara_update_irq_hw(unsigned long irq, unsigned long mask)
{
int regaddr;
mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
outl(mask & 0xffff0000UL, regaddr);
}
static inline void
takara_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask);
}
static void
takara_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask);
}
static struct irq_chip takara_irq_type = {
.name = "TAKARA",
.irq_unmask = takara_enable_irq,
.irq_mask = takara_disable_irq,
.irq_mask_ack = takara_disable_irq,
};
static void
takara_device_interrupt(unsigned long vector)
{
unsigned intstatus;
/*
* The PALcode will have passed us vectors 0x800 or 0x810,
* which are fairly arbitrary values and serve only to tell
* us whether an interrupt has come in on IRQ0 or IRQ1. If
* it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
* probably ISA, but PCI interrupts can come through IRQ0
* as well if the interrupt controller isn't in accelerated
* mode.
*
* OTOH, the accelerator thing doesn't seem to be working
* overly well, so what we'll do instead is try directly
* examining the Master Interrupt Register to see if it's a
* PCI interrupt, and if _not_ then we'll pass it on to the
* ISA handler.
*/
intstatus = inw(0x500) & 15;
if (intstatus) {
/*
* This is a PCI interrupt. Check each bit and
* despatch an interrupt if it's set.
*/
if (intstatus & 8) handle_irq(16+3);
if (intstatus & 4) handle_irq(16+2);
if (intstatus & 2) handle_irq(16+1);
if (intstatus & 1) handle_irq(16+0);
} else {
isa_device_interrupt (vector);
}
}
static void
takara_srm_device_interrupt(unsigned long vector)
{
int irq = (vector - 0x800) >> 4;
handle_irq(irq);
}
static void __init
takara_init_irq(void)
{
long i;
init_i8259a_irqs();
if (alpha_using_srm) {
alpha_mv.device_interrupt = takara_srm_device_interrupt;
} else {
unsigned int ctlreg = inl(0x500);
/* Return to non-accelerated mode. */
ctlreg &= ~0x8000;
outl(ctlreg, 0x500);
/* Enable the PCI interrupt register. */
ctlreg = 0x05107c00;
outl(ctlreg, 0x500);
}
for (i = 16; i < 128; i += 16)
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
irq_set_chip_and_handler(i, &takara_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
}
/*
* The Takara has PCI devices 1, 2, and 3 configured to slots 20,
* 19, and 18 respectively, in the default configuration. They can
* also be jumpered to slots 8, 7, and 6 respectively, which is fun
* because the SIO ISA bridge can also be slot 7. However, the SIO
* doesn't explicitly generate PCI-type interrupts, so we can
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
static int __init
takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
{ -1, -1, -1, -1, -1}, /* slot 9 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 10 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 11 == nothing */
/* These are behind the bridges. */
{ 12, 12, 13, 14, 15}, /* slot 12 == nothing */
{ 8, 8, 9, 19, 11}, /* slot 13 == nothing */
{ 4, 4, 5, 6, 7}, /* slot 14 == nothing */
{ 0, 0, 1, 2, 3}, /* slot 15 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 16 == nothing */
{64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */
{48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */
{32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */
{16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */
};
const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP;
if (irq >= 0 && irq < 16) {
/* Guess that we are behind a bridge. */
unsigned int busslot = PCI_SLOT(dev->bus->self->devfn);
irq += irq_tab[busslot-min_idsel][0];
}
return irq;
}
static int __init
takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
{ -1, -1, -1, -1, -1}, /* slot 9 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 10 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 11 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 12 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 13 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 14 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 15 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 16 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 17 == nothing */
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */
};
const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
takara_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot = PCI_SLOT(dev->devfn);
int pin = *pinp;
unsigned int ctlreg = inl(0x500);
unsigned int busslot;
if (!dev->bus->self)
return slot;
busslot = PCI_SLOT(dev->bus->self->devfn);
/* Check for built-in bridges. */
if (dev->bus->number != 0
&& busslot > 16
&& ((1<<(36-busslot)) & ctlreg)) {
if (pin == 1)
pin += (20 - busslot);
else {
printk(KERN_WARNING "takara_swizzle: can only "
"handle cards with INTA IRQ pin.\n");
}
} else {
/* Must be a card-based bridge. */
printk(KERN_WARNING "takara_swizzle: cannot handle "
"card-bridge behind builtin bridge yet.\n");
}
*pinp = pin;
return slot;
}
static void __init
takara_init_pci(void)
{
if (alpha_using_srm)
alpha_mv.pci_map_irq = takara_map_irq_srm;
cia_init_pci();
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
pc873xx_enable_ide();
}
}
/*
* The System Vector
*/
struct alpha_machine_vector takara_mv __initmv = {
.vector_name = "Takara",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 128,
.device_interrupt = takara_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = takara_init_irq,
.init_rtc = common_init_rtc,
.init_pci = takara_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = takara_map_irq,
.pci_swizzle = takara_swizzle,
};
ALIAS_MV(takara)

View file

@ -0,0 +1,419 @@
/*
* linux/arch/alpha/kernel/sys_titan.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996, 1999 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
* Copyright (C) 1999, 2000 Jeff Wiedemeier
*
* Code supporting TITAN systems (EV6+TITAN), currently:
* Privateer
* Falcon
* Granite
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_titan.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "err_impl.h"
/*
* Titan generic
*/
/*
* Titan supports up to 4 CPUs
*/
static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
/*
* Mask is set (1) if enabled
*/
static unsigned long titan_cached_irq_mask;
/*
* Need SMP-safe access to interrupt CSRs
*/
DEFINE_SPINLOCK(titan_irq_lock);
static void
titan_update_irq_hw(unsigned long mask)
{
register titan_cchip *cchip = TITAN_cchip;
unsigned long isa_enable = 1UL << 55;
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
cpumask_t cpm;
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
cpumask_copy(&cpm, cpu_present_mask);
mask &= ~isa_enable;
mask0 = mask & titan_cpu_irq_affinity[0];
mask1 = mask & titan_cpu_irq_affinity[1];
mask2 = mask & titan_cpu_irq_affinity[2];
mask3 = mask & titan_cpu_irq_affinity[3];
if (bcpu == 0) mask0 |= isa_enable;
else if (bcpu == 1) mask1 |= isa_enable;
else if (bcpu == 2) mask2 |= isa_enable;
else mask3 |= isa_enable;
dim0 = &cchip->dim0.csr;
dim1 = &cchip->dim1.csr;
dim2 = &cchip->dim2.csr;
dim3 = &cchip->dim3.csr;
if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
*dim0 = mask0;
*dim1 = mask1;
*dim2 = mask2;
*dim3 = mask3;
mb();
*dim0;
*dim1;
*dim2;
*dim3;
#else
volatile unsigned long *dimB;
dimB = &cchip->dim0.csr;
if (bcpu == 1) dimB = &cchip->dim1.csr;
else if (bcpu == 2) dimB = &cchip->dim2.csr;
else if (bcpu == 3) dimB = &cchip->dim3.csr;
*dimB = mask | isa_enable;
mb();
*dimB;
#endif
}
static inline void
titan_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
}
static inline void
titan_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
}
static void
titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
{
int cpu;
for (cpu = 0; cpu < 4; cpu++) {
if (cpumask_test_cpu(cpu, &affinity))
titan_cpu_irq_affinity[cpu] |= 1UL << irq;
else
titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
}
}
static int
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity);
titan_update_irq_hw(titan_cached_irq_mask);
spin_unlock(&titan_irq_lock);
return 0;
}
static void
titan_device_interrupt(unsigned long vector)
{
printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
}
static void
titan_srm_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
handle_irq(irq);
}
static void __init
init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static struct irq_chip titan_irq_type = {
.name = "TITAN",
.irq_unmask = titan_enable_irq,
.irq_mask = titan_disable_irq,
.irq_mask_ack = titan_disable_irq,
.irq_set_affinity = titan_set_irq_affinity,
};
static irqreturn_t
titan_intr_nop(int irq, void *dev_id)
{
/*
* This is a NOP interrupt handler for the purposes of
* event counting -- just return.
*/
return IRQ_HANDLED;
}
static void __init
titan_init_irq(void)
{
if (alpha_using_srm && !alpha_mv.device_interrupt)
alpha_mv.device_interrupt = titan_srm_device_interrupt;
if (!alpha_mv.device_interrupt)
alpha_mv.device_interrupt = titan_device_interrupt;
titan_update_irq_hw(0);
init_titan_irqs(&titan_irq_type, 16, 63 + 16);
}
static void __init
titan_legacy_init_irq(void)
{
/* init the legacy dma controller */
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
/* init the legacy irq controller */
init_i8259a_irqs();
/* init the titan irqs */
titan_init_irq();
}
void
titan_dispatch_irqs(u64 mask)
{
unsigned long vector;
/*
* Mask down to those interrupts which are enable on this processor
*/
mask &= titan_cpu_irq_affinity[smp_processor_id()];
/*
* Dispatch all requested interrupts
*/
while (mask) {
/* convert to SRM vector... priority is <63> -> <0> */
vector = 63 - __kernel_ctlz(mask);
mask &= ~(1UL << vector); /* clear it out */
vector = 0x900 + (vector << 4); /* convert to SRM vector */
/* dispatch it */
alpha_mv.device_interrupt(vector);
}
}
/*
* Titan Family
*/
static void __init
titan_request_irq(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id)
{
int err;
err = request_irq(irq, handler, irqflags, devname, dev_id);
if (err) {
printk("titan_request_irq for IRQ %d returned %d; ignoring\n",
irq, err);
}
}
static void __init
titan_late_init(void)
{
/*
* Enable the system error interrupts. These interrupts are
* all reported to the kernel as machine checks, so the handler
* is a nop so it can be called to count the individual events.
*/
titan_request_irq(63+16, titan_intr_nop, 0,
"CChip Error", NULL);
titan_request_irq(62+16, titan_intr_nop, 0,
"PChip 0 H_Error", NULL);
titan_request_irq(61+16, titan_intr_nop, 0,
"PChip 1 H_Error", NULL);
titan_request_irq(60+16, titan_intr_nop, 0,
"PChip 0 C_Error", NULL);
titan_request_irq(59+16, titan_intr_nop, 0,
"PChip 1 C_Error", NULL);
/*
* Register our error handlers.
*/
titan_register_error_handlers();
/*
* Check if the console left us any error logs.
*/
cdl_check_console_data_log();
}
static int
titan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 intline;
int irq;
/* Get the current intline. */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
irq = intline;
/* Is it explicitly routed through ISA? */
if ((irq & 0xF0) == 0xE0)
return irq;
/* Offset by 16 to make room for ISA interrupts 0 - 15. */
return irq + 16;
}
static void __init
titan_init_pci(void)
{
/*
* This isn't really the right place, but there's some init
* that needs to be done after everything is basically up.
*/
titan_late_init();
/* Indicate that we trust the console to configure things properly */
pci_set_flags(PCI_PROBE_ONLY);
common_init_pci();
SMC669_Init(0);
locate_and_init_vga(NULL);
}
/*
* Privateer
*/
static void __init
privateer_init_pci(void)
{
/*
* Hook a couple of extra err interrupts that the
* common titan code won't.
*/
titan_request_irq(53+16, titan_intr_nop, 0,
"NMI", NULL);
titan_request_irq(50+16, titan_intr_nop, 0,
"Temperature Warning", NULL);
/*
* Finish with the common version.
*/
return titan_init_pci();
}
/*
* The System Vectors.
*/
struct alpha_machine_vector titan_mv __initmv = {
.vector_name = "TITAN",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TITAN_IO,
.machine_check = titan_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TITAN_DAC_OFFSET,
.nr_irqs = 80, /* 64 + 16 */
/* device_interrupt will be filled in by titan_init_irq */
.agp_info = titan_agp_info,
.init_arch = titan_init_arch,
.init_irq = titan_legacy_init_irq,
.init_rtc = common_init_rtc,
.init_pci = titan_init_pci,
.kill_arch = titan_kill_arch,
.pci_map_irq = titan_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(titan)
struct alpha_machine_vector privateer_mv __initmv = {
.vector_name = "PRIVATEER",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_TITAN_IO,
.machine_check = privateer_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = TITAN_DAC_OFFSET,
.nr_irqs = 80, /* 64 + 16 */
/* device_interrupt will be filled in by titan_init_irq */
.agp_info = titan_agp_info,
.init_arch = titan_init_arch,
.init_irq = titan_legacy_init_irq,
.init_rtc = common_init_rtc,
.init_pci = privateer_init_pci,
.kill_arch = titan_kill_arch,
.pci_map_irq = titan_map_irq,
.pci_swizzle = common_swizzle,
};
/* No alpha_mv alias for privateer since we compile it
in unconditionally with titan; setup_arch knows how to cope. */

View file

@ -0,0 +1,349 @@
/*
* linux/arch/alpha/kernel/sys_wildfire.c
*
* Wildfire support.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_wildfire.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)];
DEFINE_SPINLOCK(wildfire_irq_lock);
static int doing_init_irq_hw = 0;
static void
wildfire_update_irq_hw(unsigned int irq)
{
int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1);
int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1);
wildfire_pca *pca;
volatile unsigned long * enable0;
if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
if (!doing_init_irq_hw) {
printk(KERN_ERR "wildfire_update_irq_hw:"
" got irq %d for non-existent PCA %d"
" on QBB %d.\n",
irq, pcano, qbbno);
}
return;
}
pca = WILDFIRE_pca(qbbno, pcano);
enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */
*enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano];
mb();
*enable0;
}
static void __init
wildfire_init_irq_hw(void)
{
#if 0
register wildfire_pca * pca = WILDFIRE_pca(0, 0);
volatile unsigned long * enable0, * enable1, * enable2, *enable3;
volatile unsigned long * target0, * target1, * target2, *target3;
enable0 = (unsigned long *) &pca->pca_int[0].enable;
enable1 = (unsigned long *) &pca->pca_int[1].enable;
enable2 = (unsigned long *) &pca->pca_int[2].enable;
enable3 = (unsigned long *) &pca->pca_int[3].enable;
target0 = (unsigned long *) &pca->pca_int[0].target;
target1 = (unsigned long *) &pca->pca_int[1].target;
target2 = (unsigned long *) &pca->pca_int[2].target;
target3 = (unsigned long *) &pca->pca_int[3].target;
*enable0 = *enable1 = *enable2 = *enable3 = 0;
*target0 = (1UL<<8) | WILDFIRE_QBB(0);
*target1 = *target2 = *target3 = 0;
mb();
*enable0; *enable1; *enable2; *enable3;
*target0; *target1; *target2; *target3;
#else
int i;
doing_init_irq_hw = 1;
/* Need to update only once for every possible PCA. */
for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA)
wildfire_update_irq_hw(i);
doing_init_irq_hw = 0;
#endif
}
static void
wildfire_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_enable_irq(d);
spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static void
wildfire_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_disable_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static void
wildfire_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_mask_and_ack_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE",
.irq_unmask = wildfire_enable_irq,
.irq_mask = wildfire_disable_irq,
.irq_mask_ack = wildfire_mask_and_ack_irq,
};
static void __init
wildfire_init_irq_per_pca(int qbbno, int pcano)
{
int i, irq_bias;
static struct irqaction isa_enable = {
.handler = no_action,
.name = "isa_enable",
};
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ pcano * WILDFIRE_IRQ_PER_PCA;
#if 0
unsigned long io_bias;
/* Only need the following for first PCI bus per PCA. */
io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
outb(0, DMA1_RESET_REG + io_bias);
outb(0, DMA2_RESET_REG + io_bias);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
outb(0, DMA2_MASK_REG + io_bias);
#endif
#if 0
/* ??? Not sure how to do this, yet... */
init_i8259a_irqs(); /* ??? */
#endif
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
setup_irq(32+irq_bias, &isa_enable);
}
static void __init
wildfire_init_irq(void)
{
int qbbno, pcano;
#if 1
wildfire_init_irq_hw();
init_i8259a_irqs();
#endif
for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
if (WILDFIRE_QBB_EXISTS(qbbno)) {
for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
wildfire_init_irq_per_pca(qbbno, pcano);
}
}
}
}
}
static void
wildfire_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* bits 10-8: source QBB ID
* bits 7-6: PCA
* bits 5-0: irq in PCA
*/
handle_irq(irq);
return;
}
/*
* PCI Fixup configuration.
*
* Summary per PCA (2 PCI or HIPPI buses):
*
* Bit Meaning
* 0-15 ISA
*
*32 ISA summary
*33 SMI
*34 NMI
*36 builtin QLogic SCSI (or slot 0 if no IO module)
*40 Interrupt Line A from slot 2 PCI0
*41 Interrupt Line B from slot 2 PCI0
*42 Interrupt Line C from slot 2 PCI0
*43 Interrupt Line D from slot 2 PCI0
*44 Interrupt Line A from slot 3 PCI0
*45 Interrupt Line B from slot 3 PCI0
*46 Interrupt Line C from slot 3 PCI0
*47 Interrupt Line D from slot 3 PCI0
*
*48 Interrupt Line A from slot 4 PCI1
*49 Interrupt Line B from slot 4 PCI1
*50 Interrupt Line C from slot 4 PCI1
*51 Interrupt Line D from slot 4 PCI1
*52 Interrupt Line A from slot 5 PCI1
*53 Interrupt Line B from slot 5 PCI1
*54 Interrupt Line C from slot 5 PCI1
*55 Interrupt Line D from slot 5 PCI1
*56 Interrupt Line A from slot 6 PCI1
*57 Interrupt Line B from slot 6 PCI1
*58 Interrupt Line C from slot 6 PCI1
*50 Interrupt Line D from slot 6 PCI1
*60 Interrupt Line A from slot 7 PCI1
*61 Interrupt Line B from slot 7 PCI1
*62 Interrupt Line C from slot 7 PCI1
*63 Interrupt Line D from slot 7 PCI1
*
*
* IdSel
* 0 Cypress Bridge I/O (ISA summary interrupt)
* 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin)
* 2 64 bit PCI 0 option slot 2
* 3 64 bit PCI 0 option slot 3
* 4 64 bit PCI 1 option slot 4
* 5 64 bit PCI 1 option slot 5
* 6 64 bit PCI 1 option slot 6
* 7 64 bit PCI 1 option slot 7
*/
static int __init
wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[8][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
{ 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
{ 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */
{ 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */
{ 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */
{ 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */
{ 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */
{ 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */
};
long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0) {
int qbbno = hose->index >> 3;
int pcano = (hose->index >> 1) & 3;
irq += (qbbno << 8) + (pcano << 6);
}
return irq;
}
/*
* The System Vectors
*/
struct alpha_machine_vector wildfire_mv __initmv = {
.vector_name = "WILDFIRE",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_WILDFIRE_IO,
.machine_check = wildfire_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.nr_irqs = WILDFIRE_NR_IRQS,
.device_interrupt = wildfire_device_interrupt,
.init_arch = wildfire_init_arch,
.init_irq = wildfire_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = wildfire_kill_arch,
.pci_map_irq = wildfire_map_irq,
.pci_swizzle = common_swizzle,
.pa_to_nid = wildfire_pa_to_nid,
.cpuid_to_nid = wildfire_cpuid_to_nid,
.node_mem_start = wildfire_node_mem_start,
.node_mem_size = wildfire_node_mem_size,
};
ALIAS_MV(wildfire)

539
arch/alpha/kernel/systbls.S Normal file
View file

@ -0,0 +1,539 @@
/*
* arch/alpha/kernel/systbls.S
*
* The system call table.
*/
#include <asm/unistd.h>
.data
.align 3
.globl sys_call_table
sys_call_table:
.quad alpha_ni_syscall /* 0 */
.quad sys_exit
.quad alpha_fork
.quad sys_read
.quad sys_write
.quad alpha_ni_syscall /* 5 */
.quad sys_close
.quad sys_osf_wait4
.quad alpha_ni_syscall
.quad sys_link
.quad sys_unlink /* 10 */
.quad alpha_ni_syscall
.quad sys_chdir
.quad sys_fchdir
.quad sys_mknod
.quad sys_chmod /* 15 */
.quad sys_chown
.quad sys_osf_brk
.quad alpha_ni_syscall
.quad sys_lseek
.quad sys_getxpid /* 20 */
.quad sys_osf_mount
.quad sys_umount
.quad sys_setuid
.quad sys_getxuid
.quad alpha_ni_syscall /* 25 */
.quad sys_ptrace
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 30 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_access
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 35 */
.quad sys_sync
.quad sys_kill
.quad alpha_ni_syscall
.quad sys_setpgid
.quad alpha_ni_syscall /* 40 */
.quad sys_dup
.quad sys_alpha_pipe
.quad sys_osf_set_program_attributes
.quad alpha_ni_syscall
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
.quad sys_osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct
.quad sys_sigpending
.quad alpha_ni_syscall
.quad sys_ioctl
.quad alpha_ni_syscall /* 55 */
.quad alpha_ni_syscall
.quad sys_symlink
.quad sys_readlink
.quad sys_execve
.quad sys_umask /* 60 */
.quad sys_chroot
.quad alpha_ni_syscall
.quad sys_getpgrp
.quad sys_getpagesize
.quad alpha_ni_syscall /* 65 */
.quad alpha_vfork
.quad sys_newstat
.quad sys_newlstat
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 70 */
.quad sys_osf_mmap
.quad alpha_ni_syscall
.quad sys_munmap
.quad sys_mprotect
.quad sys_madvise /* 75 */
.quad sys_vhangup
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_getgroups
/* map BSD's setpgrp to sys_setpgid for binary compatibility: */
.quad sys_setgroups /* 80 */
.quad alpha_ni_syscall
.quad sys_setpgid
.quad sys_osf_setitimer
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 85 */
.quad sys_osf_getitimer
.quad sys_gethostname
.quad sys_sethostname
.quad sys_getdtablesize
.quad sys_dup2 /* 90 */
.quad sys_newfstat
.quad sys_fcntl
.quad sys_osf_select
.quad sys_poll
.quad sys_fsync /* 95 */
.quad sys_setpriority
.quad sys_socket
.quad sys_connect
.quad sys_accept
.quad sys_osf_getpriority /* 100 */
.quad sys_send
.quad sys_recv
.quad sys_sigreturn
.quad sys_bind
.quad sys_setsockopt /* 105 */
.quad sys_listen
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 110 */
.quad sys_sigsuspend
.quad sys_osf_sigstack
.quad sys_recvmsg
.quad sys_sendmsg
.quad alpha_ni_syscall /* 115 */
.quad sys_osf_gettimeofday
.quad sys_osf_getrusage
.quad sys_getsockopt
.quad alpha_ni_syscall
#ifdef CONFIG_OSF4_COMPAT
.quad sys_osf_readv /* 120 */
.quad sys_osf_writev
#else
.quad sys_readv /* 120 */
.quad sys_writev
#endif
.quad sys_osf_settimeofday
.quad sys_fchown
.quad sys_fchmod
.quad sys_recvfrom /* 125 */
.quad sys_setreuid
.quad sys_setregid
.quad sys_rename
.quad sys_truncate
.quad sys_ftruncate /* 130 */
.quad sys_flock
.quad sys_setgid
.quad sys_sendto
.quad sys_shutdown
.quad sys_socketpair /* 135 */
.quad sys_mkdir
.quad sys_rmdir
.quad sys_osf_utimes
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 140 */
.quad sys_getpeername
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_getrlimit
.quad sys_setrlimit /* 145 */
.quad alpha_ni_syscall
.quad sys_setsid
.quad sys_quotactl
.quad alpha_ni_syscall
.quad sys_getsockname /* 150 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 155 */
.quad sys_osf_sigaction
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_osf_getdirentries
.quad sys_osf_statfs /* 160 */
.quad sys_osf_fstatfs
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_osf_getdomainname /* 165 */
.quad sys_setdomainname
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 170 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 175 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 180 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 185 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 190 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 195 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
/* The OSF swapon has two extra arguments, but we ignore them. */
.quad sys_swapon
.quad sys_msgctl /* 200 */
.quad sys_msgget
.quad sys_msgrcv
.quad sys_msgsnd
.quad sys_semctl
.quad sys_semget /* 205 */
.quad sys_semop
.quad sys_osf_utsname
.quad sys_lchown
.quad sys_shmat
.quad sys_shmctl /* 210 */
.quad sys_shmdt
.quad sys_shmget
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 215 */
.quad alpha_ni_syscall
.quad sys_msync
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 220 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_osf_stat
.quad sys_osf_lstat /* 225 */
.quad sys_osf_fstat
.quad sys_osf_statfs64
.quad sys_osf_fstatfs64
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 230 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_getpgid
.quad sys_getsid
.quad sys_sigaltstack /* 235 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 240 */
.quad sys_osf_sysinfo
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_osf_proplist_syscall
.quad alpha_ni_syscall /* 245 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 250 */
.quad sys_osf_usleep_thread
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad sys_sysfs
.quad alpha_ni_syscall /* 255 */
.quad sys_osf_getsysinfo
.quad sys_osf_setsysinfo
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 260 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 265 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 270 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 275 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 280 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 285 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 290 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 295 */
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
/* linux-specific system calls start at 300 */
.quad sys_bdflush /* 300 */
.quad sys_sethae
.quad sys_mount
.quad sys_old_adjtimex
.quad sys_swapoff
.quad sys_getdents /* 305 */
.quad sys_ni_syscall /* 306: old create_module */
.quad sys_init_module
.quad sys_delete_module
.quad sys_ni_syscall /* 309: old get_kernel_syms */
.quad sys_syslog /* 310 */
.quad sys_reboot
.quad alpha_clone
.quad sys_uselib
.quad sys_mlock
.quad sys_munlock /* 315 */
.quad sys_mlockall
.quad sys_munlockall
.quad sys_sysinfo
.quad sys_sysctl
.quad sys_ni_syscall /* 320 */
.quad sys_oldumount
.quad sys_swapon
.quad sys_times
.quad sys_personality
.quad sys_setfsuid /* 325 */
.quad sys_setfsgid
.quad sys_ustat
.quad sys_statfs
.quad sys_fstatfs
.quad sys_sched_setparam /* 330 */
.quad sys_sched_getparam
.quad sys_sched_setscheduler
.quad sys_sched_getscheduler
.quad sys_sched_yield
.quad sys_sched_get_priority_max /* 335 */
.quad sys_sched_get_priority_min
.quad sys_sched_rr_get_interval
.quad sys_ni_syscall /* sys_afs_syscall */
.quad sys_newuname
.quad sys_nanosleep /* 340 */
.quad sys_mremap
.quad sys_ni_syscall /* old nfsservctl */
.quad sys_setresuid
.quad sys_getresuid
.quad sys_pciconfig_read /* 345 */
.quad sys_pciconfig_write
.quad sys_ni_syscall /* 347: old query_module */
.quad sys_prctl
.quad sys_pread64
.quad sys_pwrite64 /* 350 */
.quad sys_rt_sigreturn
.quad sys_rt_sigaction
.quad sys_rt_sigprocmask
.quad sys_rt_sigpending
.quad sys_rt_sigtimedwait /* 355 */
.quad sys_rt_sigqueueinfo
.quad sys_rt_sigsuspend
.quad sys_select
.quad sys_gettimeofday
.quad sys_settimeofday /* 360 */
.quad sys_getitimer
.quad sys_setitimer
.quad sys_utimes
.quad sys_getrusage
.quad sys_wait4 /* 365 */
.quad sys_adjtimex
.quad sys_getcwd
.quad sys_capget
.quad sys_capset
.quad sys_sendfile64 /* 370 */
.quad sys_setresgid
.quad sys_getresgid
.quad sys_ni_syscall /* sys_dipc */
.quad sys_pivot_root
.quad sys_mincore /* 375 */
.quad sys_pciconfig_iobase
.quad sys_getdents64
.quad sys_gettid
.quad sys_readahead
.quad sys_ni_syscall /* 380 */
.quad sys_tkill
.quad sys_setxattr
.quad sys_lsetxattr
.quad sys_fsetxattr
.quad sys_getxattr /* 385 */
.quad sys_lgetxattr
.quad sys_fgetxattr
.quad sys_listxattr
.quad sys_llistxattr
.quad sys_flistxattr /* 390 */
.quad sys_removexattr
.quad sys_lremovexattr
.quad sys_fremovexattr
.quad sys_futex
.quad sys_sched_setaffinity /* 395 */
.quad sys_sched_getaffinity
.quad sys_ni_syscall /* 397, tux */
.quad sys_io_setup
.quad sys_io_destroy
.quad sys_io_getevents /* 400 */
.quad sys_io_submit
.quad sys_io_cancel
.quad sys_ni_syscall /* 403, sys_alloc_hugepages */
.quad sys_ni_syscall /* 404, sys_free_hugepages */
.quad sys_exit_group /* 405 */
.quad sys_lookup_dcookie
.quad sys_epoll_create
.quad sys_epoll_ctl
.quad sys_epoll_wait
.quad sys_remap_file_pages /* 410 */
.quad sys_set_tid_address
.quad sys_restart_syscall
.quad sys_fadvise64
.quad sys_timer_create
.quad sys_timer_settime /* 415 */
.quad sys_timer_gettime
.quad sys_timer_getoverrun
.quad sys_timer_delete
.quad sys_clock_settime
.quad sys_clock_gettime /* 420 */
.quad sys_clock_getres
.quad sys_clock_nanosleep
.quad sys_semtimedop
.quad sys_tgkill
.quad sys_stat64 /* 425 */
.quad sys_lstat64
.quad sys_fstat64
.quad sys_ni_syscall /* sys_vserver */
.quad sys_ni_syscall /* sys_mbind */
.quad sys_ni_syscall /* sys_get_mempolicy */
.quad sys_ni_syscall /* sys_set_mempolicy */
.quad sys_mq_open
.quad sys_mq_unlink
.quad sys_mq_timedsend
.quad sys_mq_timedreceive /* 435 */
.quad sys_mq_notify
.quad sys_mq_getsetattr
.quad sys_waitid
.quad sys_add_key
.quad sys_request_key /* 440 */
.quad sys_keyctl
.quad sys_ioprio_set
.quad sys_ioprio_get
.quad sys_inotify_init
.quad sys_inotify_add_watch /* 445 */
.quad sys_inotify_rm_watch
.quad sys_fdatasync
.quad sys_kexec_load
.quad sys_migrate_pages
.quad sys_openat /* 450 */
.quad sys_mkdirat
.quad sys_mknodat
.quad sys_fchownat
.quad sys_futimesat
.quad sys_fstatat64 /* 455 */
.quad sys_unlinkat
.quad sys_renameat
.quad sys_linkat
.quad sys_symlinkat
.quad sys_readlinkat /* 460 */
.quad sys_fchmodat
.quad sys_faccessat
.quad sys_pselect6
.quad sys_ppoll
.quad sys_unshare /* 465 */
.quad sys_set_robust_list
.quad sys_get_robust_list
.quad sys_splice
.quad sys_sync_file_range
.quad sys_tee /* 470 */
.quad sys_vmsplice
.quad sys_move_pages
.quad sys_getcpu
.quad sys_epoll_pwait
.quad sys_utimensat /* 475 */
.quad sys_signalfd
.quad sys_ni_syscall /* sys_timerfd */
.quad sys_eventfd
.quad sys_recvmmsg
.quad sys_fallocate /* 480 */
.quad sys_timerfd_create
.quad sys_timerfd_settime
.quad sys_timerfd_gettime
.quad sys_signalfd4
.quad sys_eventfd2 /* 485 */
.quad sys_epoll_create1
.quad sys_dup3
.quad sys_pipe2
.quad sys_inotify_init1
.quad sys_preadv /* 490 */
.quad sys_pwritev
.quad sys_rt_tgsigqueueinfo
.quad sys_perf_event_open
.quad sys_fanotify_init
.quad sys_fanotify_mark /* 495 */
.quad sys_prlimit64
.quad sys_name_to_handle_at
.quad sys_open_by_handle_at
.quad sys_clock_adjtime
.quad sys_syncfs /* 500 */
.quad sys_setns
.quad sys_accept4
.quad sys_sendmmsg
.quad sys_process_vm_readv
.quad sys_process_vm_writev /* 505 */
.quad sys_kcmp
.quad sys_finit_module
.quad sys_sched_setattr
.quad sys_sched_getattr
.quad sys_renameat2 /* 510 */
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
/* Remember to update everything, kids. */
.ifne (. - sys_call_table) - (NR_SYSCALLS * 8)
.err
.endif

465
arch/alpha/kernel/time.c Normal file
View file

@ -0,0 +1,465 @@
/*
* linux/arch/alpha/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds
*
* This file contains the clocksource time handling.
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* 1997-01-09 Adrian Sun
* use interval timer if CONFIG_RTC=y
* 1997-10-29 John Bowman (bowman@math.ualberta.ca)
* fixed tick loss calculation in timer_interrupt
* (round system clock to nearest tick instead of truncating)
* fixed algorithm in time_init for getting time from CMOS clock
* 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net)
* fixed algorithm in do_gettimeofday() for calculating the precise time
* from processor cycle counter (now taking lost_ticks into account)
* 2003-06-03 R. Scott Bailey <scott.bailey@eds.com>
* Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
#include <linux/irq_work.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include "proto.h"
#include "irq_impl.h"
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
unsigned long est_cycle_freq;
#ifdef CONFIG_IRQ_WORK
DEFINE_PER_CPU(u8, irq_work_pending);
#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
void arch_irq_work_raise(void)
{
set_irq_work_pending_flag();
}
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
#endif /* CONFIG_IRQ_WORK */
static inline __u32 rpcc(void)
{
return __builtin_alpha_rpcc();
}
/*
* The RTC as a clock_event_device primitive.
*/
static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
irqreturn_t
rtc_timer_interrupt(int irq, void *dev)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
/* Don't run the hook for UNUSED or SHUTDOWN. */
if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
ce->event_handler(ce);
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
}
return IRQ_HANDLED;
}
static void
rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
{
/* The mode member of CE is updated in generic code.
Since we only support periodic events, nothing to do. */
}
static int
rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
/* This hook is for oneshot mode, which we don't support. */
return -EINVAL;
}
static void __init
init_rtc_clockevent(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
*ce = (struct clock_event_device){
.name = "rtc",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 100,
.cpumask = cpumask_of(cpu),
.set_mode = rtc_ce_set_mode,
.set_next_event = rtc_ce_set_next_event,
};
clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
}
/*
* The QEMU clock as a clocksource primitive.
*/
static cycle_t
qemu_cs_read(struct clocksource *cs)
{
return qemu_get_vmtime();
}
static struct clocksource qemu_cs = {
.name = "qemu",
.rating = 400,
.read = qemu_cs_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.max_idle_ns = LONG_MAX
};
/*
* The QEMU alarm as a clock_event_device primitive.
*/
static void
qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
{
/* The mode member of CE is updated for us in generic code.
Just make sure that the event is disabled. */
qemu_set_alarm_abs(0);
}
static int
qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
qemu_set_alarm_rel(evt);
return 0;
}
static irqreturn_t
qemu_timer_interrupt(int irq, void *dev)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
ce->event_handler(ce);
return IRQ_HANDLED;
}
static void __init
init_qemu_clockevent(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
*ce = (struct clock_event_device){
.name = "qemu",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.cpumask = cpumask_of(cpu),
.set_mode = qemu_ce_set_mode,
.set_next_event = qemu_ce_set_next_event,
};
clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX);
}
void __init
common_init_rtc(void)
{
unsigned char x, sel = 0;
/* Reset periodic interrupt frequency. */
#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
/* Test includes known working values on various platforms
where 0x26 is wrong; we refuse to change those. */
if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
sel = RTC_REF_CLCK_32KHZ + 6;
}
#elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32
sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ);
#else
# error "Unknown HZ from arch/alpha/Kconfig"
#endif
if (sel) {
printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n",
CONFIG_HZ, sel);
CMOS_WRITE(sel, RTC_FREQ_SELECT);
}
/* Turn on periodic interrupts. */
x = CMOS_READ(RTC_CONTROL);
if (!(x & RTC_PIE)) {
printk("Turning on RTC interrupts.\n");
x |= RTC_PIE;
x &= ~(RTC_AIE | RTC_UIE);
CMOS_WRITE(x, RTC_CONTROL);
}
(void) CMOS_READ(RTC_INTR_FLAGS);
outb(0x36, 0x43); /* pit counter 0: system timer */
outb(0x00, 0x40);
outb(0x00, 0x40);
outb(0xb6, 0x43); /* pit counter 2: speaker */
outb(0x31, 0x42);
outb(0x13, 0x42);
init_rtc_irq();
}
#ifndef CONFIG_ALPHA_WTINT
/*
* The RPCC as a clocksource primitive.
*
* While we have free-running timecounters running on all CPUs, and we make
* a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter
* with the wall clock, that initialization isn't kept up-to-date across
* different time counters in SMP mode. Therefore we can only use this
* method when there's only one CPU enabled.
*
* When using the WTINT PALcall, the RPCC may shift to a lower frequency,
* or stop altogether, while waiting for the interrupt. Therefore we cannot
* use this method when WTINT is in use.
*/
static cycle_t read_rpcc(struct clocksource *cs)
{
return rpcc();
}
static struct clocksource clocksource_rpcc = {
.name = "rpcc",
.rating = 300,
.read = read_rpcc,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
};
#endif /* ALPHA_WTINT */
/* Validate a computed cycle counter result against the known bounds for
the given processor core. There's too much brokenness in the way of
timing hardware for any one method to work everywhere. :-(
Return 0 if the result cannot be trusted, otherwise return the argument. */
static unsigned long __init
validate_cc_value(unsigned long cc)
{
static struct bounds {
unsigned int min, max;
} cpu_hz[] __initdata = {
[EV3_CPU] = { 50000000, 200000000 }, /* guess */
[EV4_CPU] = { 100000000, 300000000 },
[LCA4_CPU] = { 100000000, 300000000 }, /* guess */
[EV45_CPU] = { 200000000, 300000000 },
[EV5_CPU] = { 250000000, 433000000 },
[EV56_CPU] = { 333000000, 667000000 },
[PCA56_CPU] = { 400000000, 600000000 }, /* guess */
[PCA57_CPU] = { 500000000, 600000000 }, /* guess */
[EV6_CPU] = { 466000000, 600000000 },
[EV67_CPU] = { 600000000, 750000000 },
[EV68AL_CPU] = { 750000000, 940000000 },
[EV68CB_CPU] = { 1000000000, 1333333333 },
/* None of the following are shipping as of 2001-11-01. */
[EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */
[EV69_CPU] = { 1000000000, 1700000000 }, /* guess */
[EV7_CPU] = { 800000000, 1400000000 }, /* guess */
[EV79_CPU] = { 1000000000, 2000000000 }, /* guess */
};
/* Allow for some drift in the crystal. 10MHz is more than enough. */
const unsigned int deviation = 10000000;
struct percpu_struct *cpu;
unsigned int index;
cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
index = cpu->type & 0xffffffff;
/* If index out of bounds, no way to validate. */
if (index >= ARRAY_SIZE(cpu_hz))
return cc;
/* If index contains no data, no way to validate. */
if (cpu_hz[index].max == 0)
return cc;
if (cc < cpu_hz[index].min - deviation
|| cc > cpu_hz[index].max + deviation)
return 0;
return cc;
}
/*
* Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
* arch/i386/time.c.
*/
#define CALIBRATE_LATCH 0xffff
#define TIMEOUT_COUNT 0x100000
static unsigned long __init
calibrate_cc_with_pit(void)
{
int cc, count = 0;
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
/*
* Now let's take care of CTC channel 2
*
* Set the Gate high, program CTC channel 2 for mode 0,
* (interrupt on terminal count mode), binary count,
* load 5 * LATCH count, (LSB and MSB) to begin countdown.
*/
outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
cc = rpcc();
do {
count++;
} while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
cc = rpcc() - cc;
/* Error: ECTCNEVERSET or ECPUTOOFAST. */
if (count <= 1 || count == TIMEOUT_COUNT)
return 0;
return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
}
/* The Linux interpretation of the CMOS clock register contents:
When the Update-In-Progress (UIP) flag goes from 1 to 0, the
RTC registers show the second which has precisely just started.
Let's hope other operating systems interpret the RTC the same way. */
static unsigned long __init
rpcc_after_update_in_progress(void)
{
do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
return rpcc();
}
void __init
time_init(void)
{
unsigned int cc1, cc2;
unsigned long cycle_freq, tolerance;
long diff;
if (alpha_using_qemu) {
clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
init_qemu_clockevent();
timer_irqaction.handler = qemu_timer_interrupt;
init_rtc_irq();
return;
}
/* Calibrate CPU clock -- attempt #1. */
if (!est_cycle_freq)
est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
cc1 = rpcc();
/* Calibrate CPU clock -- attempt #2. */
if (!est_cycle_freq) {
cc1 = rpcc_after_update_in_progress();
cc2 = rpcc_after_update_in_progress();
est_cycle_freq = validate_cc_value(cc2 - cc1);
cc1 = cc2;
}
cycle_freq = hwrpb->cycle_freq;
if (est_cycle_freq) {
/* If the given value is within 250 PPM of what we calculated,
accept it. Otherwise, use what we found. */
tolerance = cycle_freq / 4000;
diff = cycle_freq - est_cycle_freq;
if (diff < 0)
diff = -diff;
if ((unsigned long)diff > tolerance) {
cycle_freq = est_cycle_freq;
printk("HWRPB cycle frequency bogus. "
"Estimated %lu Hz\n", cycle_freq);
} else {
est_cycle_freq = 0;
}
} else if (! validate_cc_value (cycle_freq)) {
printk("HWRPB cycle frequency bogus, "
"and unable to estimate a proper value!\n");
}
/* See above for restrictions on using clocksource_rpcc. */
#ifndef CONFIG_ALPHA_WTINT
if (hwrpb->nr_processors == 1)
clocksource_register_hz(&clocksource_rpcc, cycle_freq);
#endif
/* Startup the timer source. */
alpha_mv.init_rtc();
init_rtc_clockevent();
}
/* Initialize the clock_event_device for secondary cpus. */
#ifdef CONFIG_SMP
void __init
init_clockevent(void)
{
if (alpha_using_qemu)
init_qemu_clockevent();
else
init_rtc_clockevent();
}
#endif

1093
arch/alpha/kernel/traps.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,77 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/setup.h>
OUTPUT_FORMAT("elf64-alpha")
OUTPUT_ARCH(alpha)
ENTRY(__start)
PHDRS { kernel PT_LOAD; note PT_NOTE; }
jiffies = jiffies_64;
SECTIONS
{
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
. = 0xfffffc0000310000;
#else
. = 0xfffffc0001010000;
#endif
_text = .; /* Text and read-only data */
.text : {
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
} :kernel
swapper_pg_dir = SWAPPER_PGD;
_etext = .; /* End of text section */
NOTES :kernel :note
.dummy : {
*(.dummy)
} :kernel
RODATA
EXCEPTION_TABLE(16)
/* Will be freed after init */
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU_SECTION(L1_CACHE_BYTES)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);
__init_end = .;
/* Freed after init ends here */
_sdata = .; /* Start of rw data section */
_data = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)
}
.sdata : {
*(.sdata)
}
_edata = .; /* End of data section */
BSS_SECTION(0, 0, 0)
_end = .;
.mdebug 0 : {
*(.mdebug)
}
.note 0 : {
*(.note)
}
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}