Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,21 @@
# MIPS headers
generic-y += cputime.h
generic-y += current.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += user.h
generic-y += xor.h

View file

@ -0,0 +1,25 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005, 06 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 MIPS Technologies, Inc.
*/
#ifndef _ASM_ABI_H
#define _ASM_ABI_H
#include <asm/signal.h>
#include <asm/siginfo.h>
struct mips_abi {
int (* const setup_frame)(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set);
const unsigned long signal_return_offset;
int (* const setup_rt_frame)(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set);
const unsigned long rt_signal_return_offset;
const unsigned long restart;
};
#endif /* _ASM_ABI_H */

View file

@ -0,0 +1,154 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 99 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
*/
#ifndef _ASM_ADDRSPACE_H
#define _ASM_ADDRSPACE_H
#include <spaces.h>
/*
* Configure language
*/
#ifdef __ASSEMBLY__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
#ifdef CONFIG_64BIT
#define _CONST64_(x) x ## L
#else
#define _CONST64_(x) x ## LL
#endif
#endif
/*
* 32-bit MIPS address spaces
*/
#ifdef __ASSEMBLY__
#define _ACAST32_
#define _ACAST64_
#else
#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
#endif
/*
* Returns the kernel segment base of a given address
*/
#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
/*
* Returns the physical address of a CKSEGx / XKPHYS address
*/
#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define XPHYSADDR(a) ((_ACAST64_(a)) & \
_CONST64_(0x0000ffffffffffff))
#ifdef CONFIG_64BIT
/*
* Memory segments (64bit kernel mode addresses)
* The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
#define XKSSEG _CONST64_(0x4000000000000000)
#define XKPHYS _CONST64_(0x8000000000000000)
#define XKSEG _CONST64_(0xc000000000000000)
#define CKSEG0 _CONST64_(0xffffffff80000000)
#define CKSEG1 _CONST64_(0xffffffffa0000000)
#define CKSSEG _CONST64_(0xffffffffc0000000)
#define CKSEG3 _CONST64_(0xffffffffe0000000)
#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0)
#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1)
#define CKSEG2ADDR(a) (CPHYSADDR(a) | CKSEG2)
#define CKSEG3ADDR(a) (CPHYSADDR(a) | CKSEG3)
#else
#define CKSEG0ADDR(a) (CPHYSADDR(a) | KSEG0)
#define CKSEG1ADDR(a) (CPHYSADDR(a) | KSEG1)
#define CKSEG2ADDR(a) (CPHYSADDR(a) | KSEG2)
#define CKSEG3ADDR(a) (CPHYSADDR(a) | KSEG3)
/*
* Map an address to a certain kernel segment
*/
#define KSEG0ADDR(a) (CPHYSADDR(a) | KSEG0)
#define KSEG1ADDR(a) (CPHYSADDR(a) | KSEG1)
#define KSEG2ADDR(a) (CPHYSADDR(a) | KSEG2)
#define KSEG3ADDR(a) (CPHYSADDR(a) | KSEG3)
/*
* Memory segments (32bit kernel mode addresses)
* These are the traditional names used in the 32-bit universe.
*/
#define KUSEG 0x00000000
#define KSEG0 0x80000000
#define KSEG1 0xa0000000
#define KSEG2 0xc0000000
#define KSEG3 0xe0000000
#define CKUSEG 0x00000000
#define CKSEG0 0x80000000
#define CKSEG1 0xa0000000
#define CKSEG2 0xc0000000
#define CKSEG3 0xe0000000
#endif
/*
* Cache modes for XKPHYS address conversion macros
*/
#define K_CALG_COH_EXCL1_NOL2 0
#define K_CALG_COH_SHRL1_NOL2 1
#define K_CALG_UNCACHED 2
#define K_CALG_NONCOHERENT 3
#define K_CALG_COH_EXCL 4
#define K_CALG_COH_SHAREABLE 5
#define K_CALG_NOTUSED 6
#define K_CALG_UNCACHED_ACCEL 7
/*
* 64-bit address conversions
*/
#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \
(_CONST64_(cm) << 59) | (a))
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
* the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
#ifndef CONFIG_CPU_R8000
/*
* The R8000 doesn't have the 32-bit compat spaces so we don't define them
* in order to catch bugs in the source code.
*/
#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
#endif
#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK)
#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE)
#endif /* _ASM_ADDRSPACE_H */

View file

@ -0,0 +1,12 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Imagination Technologies Ltd.
*
* Arbitrary Monitor Support (AMON)
*/
int amon_cpu_avail(int cpu);
int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp,
unsigned long gp, unsigned long a0);

View file

@ -0,0 +1,38 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#ifndef _ASM_ARCH_HWEIGHT_H
#define _ASM_ARCH_HWEIGHT_H
#ifdef ARCH_HAS_USABLE_BUILTIN_POPCOUNT
#include <asm/types.h>
static inline unsigned int __arch_hweight32(unsigned int w)
{
return __builtin_popcount(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __builtin_popcount(w & 0xffff);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __builtin_popcount(w & 0xff);
}
static inline unsigned long __arch_hweight64(__u64 w)
{
return __builtin_popcountll(w);
}
#else
#include <asm-generic/bitops/arch_hweight.h>
#endif
#endif /* _ASM_ARCH_HWEIGHT_H */

View file

@ -0,0 +1,184 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014 Imagination Technologies Ltd.
*
*/
#ifndef __ASM_ASM_EVA_H
#define __ASM_ASM_EVA_H
#ifndef __ASSEMBLY__
/* Kernel variants */
#define kernel_cache(op, base) "cache " op ", " base "\n"
#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define kernel_sd(reg, addr) user_sw(reg, addr)
#define kernel_ld(reg, addr) user_lw(reg, addr)
#else
#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
#endif /* CONFIG_32BIT */
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
" .set push\n" \
" .set mips0\n" \
" .set eva\n" \
" "insn" "reg", "addr "\n" \
" .set pop\n"
#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
#define user_lwl(reg, addr) __BUILD_EVA_INSN("lwle", reg, addr)
#define user_lwr(reg, addr) __BUILD_EVA_INSN("lwre", reg, addr)
#define user_lh(reg, addr) __BUILD_EVA_INSN("lhe", reg, addr)
#define user_lb(reg, addr) __BUILD_EVA_INSN("lbe", reg, addr)
#define user_lbu(reg, addr) __BUILD_EVA_INSN("lbue", reg, addr)
/* No 64-bit EVA instruction for loading double words */
#define user_ld(reg, addr) user_lw(reg, addr)
#define user_sw(reg, addr) __BUILD_EVA_INSN("swe", reg, addr)
#define user_swl(reg, addr) __BUILD_EVA_INSN("swle", reg, addr)
#define user_swr(reg, addr) __BUILD_EVA_INSN("swre", reg, addr)
#define user_sh(reg, addr) __BUILD_EVA_INSN("she", reg, addr)
#define user_sb(reg, addr) __BUILD_EVA_INSN("sbe", reg, addr)
/* No 64-bit EVA instruction for storing double words */
#define user_sd(reg, addr) user_sw(reg, addr)
#else
#define user_cache(op, base) kernel_cache(op, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
#define user_lh(reg, addr) kernel_lh(reg, addr)
#define user_lb(reg, addr) kernel_lb(reg, addr)
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
#define user_sw(reg, addr) kernel_sw(reg, addr)
#define user_swl(reg, addr) kernel_swl(reg, addr)
#define user_swr(reg, addr) kernel_swr(reg, addr)
#define user_sh(reg, addr) kernel_sh(reg, addr)
#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
#define user_sd(reg, addr) kernel_sw(reg, addr)
#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
#define user_sd(reg, addr) kernel_sd(reg, addr)
#define user_ld(reg, addr) kernel_ld(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
#else /* __ASSEMBLY__ */
#define kernel_cache(op, base) cache op, base
#define kernel_ll(reg, addr) ll reg, addr
#define kernel_sc(reg, addr) sc reg, addr
#define kernel_lw(reg, addr) lw reg, addr
#define kernel_lwl(reg, addr) lwl reg, addr
#define kernel_lwr(reg, addr) lwr reg, addr
#define kernel_lh(reg, addr) lh reg, addr
#define kernel_lb(reg, addr) lb reg, addr
#define kernel_lbu(reg, addr) lbu reg, addr
#define kernel_sw(reg, addr) sw reg, addr
#define kernel_swl(reg, addr) swl reg, addr
#define kernel_swr(reg, addr) swr reg, addr
#define kernel_sh(reg, addr) sh reg, addr
#define kernel_sb(reg, addr) sb reg, addr
#ifdef CONFIG_32BIT
/*
* No 'sd' or 'ld' instructions in 32-bit but the code will
* do the correct thing
*/
#define kernel_sd(reg, addr) user_sw(reg, addr)
#define kernel_ld(reg, addr) user_lw(reg, addr)
#else
#define kernel_sd(reg, addr) sd reg, addr
#define kernel_ld(reg, addr) ld reg, addr
#endif /* CONFIG_32BIT */
#ifdef CONFIG_EVA
#define __BUILD_EVA_INSN(insn, reg, addr) \
.set push; \
.set mips0; \
.set eva; \
insn reg, addr; \
.set pop;
#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
#define user_lwl(reg, addr) __BUILD_EVA_INSN(lwle, reg, addr)
#define user_lwr(reg, addr) __BUILD_EVA_INSN(lwre, reg, addr)
#define user_lh(reg, addr) __BUILD_EVA_INSN(lhe, reg, addr)
#define user_lb(reg, addr) __BUILD_EVA_INSN(lbe, reg, addr)
#define user_lbu(reg, addr) __BUILD_EVA_INSN(lbue, reg, addr)
/* No 64-bit EVA instruction for loading double words */
#define user_ld(reg, addr) user_lw(reg, addr)
#define user_sw(reg, addr) __BUILD_EVA_INSN(swe, reg, addr)
#define user_swl(reg, addr) __BUILD_EVA_INSN(swle, reg, addr)
#define user_swr(reg, addr) __BUILD_EVA_INSN(swre, reg, addr)
#define user_sh(reg, addr) __BUILD_EVA_INSN(she, reg, addr)
#define user_sb(reg, addr) __BUILD_EVA_INSN(sbe, reg, addr)
/* No 64-bit EVA instruction for loading double words */
#define user_sd(reg, addr) user_sw(reg, addr)
#else
#define user_cache(op, base) kernel_cache(op, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
#define user_lwl(reg, addr) kernel_lwl(reg, addr)
#define user_lwr(reg, addr) kernel_lwr(reg, addr)
#define user_lh(reg, addr) kernel_lh(reg, addr)
#define user_lb(reg, addr) kernel_lb(reg, addr)
#define user_lbu(reg, addr) kernel_lbu(reg, addr)
#define user_sw(reg, addr) kernel_sw(reg, addr)
#define user_swl(reg, addr) kernel_swl(reg, addr)
#define user_swr(reg, addr) kernel_swr(reg, addr)
#define user_sh(reg, addr) kernel_sh(reg, addr)
#define user_sb(reg, addr) kernel_sb(reg, addr)
#ifdef CONFIG_32BIT
#define user_sd(reg, addr) kernel_sw(reg, addr)
#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
#define user_sd(reg, addr) kernel_sd(reg, addr)
#define user_ld(reg, addr) kernel_sd(reg, addr)
#endif /* CONFIG_32BIT */
#endif /* CONFIG_EVA */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EVA_H */

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

420
arch/mips/include/asm/asm.h Normal file
View file

@ -0,0 +1,420 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
* Copyright (C) 1999 by Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2002 Maciej W. Rozycki
*
* Some useful macros for MIPS assembler code
*
* Some of the routines below contain useless nops that will be optimized
* away by gas in -O mode. These nops are however required to fill delay
* slots in noreorder mode.
*/
#ifndef __ASM_ASM_H
#define __ASM_ASM_H
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
#ifndef CAT
#ifdef __STDC__
#define __CAT(str1, str2) str1##str2
#else
#define __CAT(str1, str2) str1/**/str2
#endif
#define CAT(str1, str2) __CAT(str1, str2)
#endif
/*
* PIC specific declarations
* Not used for the kernel but here seems to be the right place.
*/
#ifdef __PIC__
#define CPRESTORE(register) \
.cprestore register
#define CPADD(register) \
.cpadd register
#define CPLOAD(register) \
.cpload register
#else
#define CPRESTORE(register)
#define CPADD(register)
#define CPLOAD(register)
#endif
/*
* LEAF - declare leaf routine
*/
#define LEAF(symbol) \
.globl symbol; \
.align 2; \
.type symbol, @function; \
.ent symbol, 0; \
symbol: .frame sp, 0, ra
/*
* NESTED - declare nested routine entry point
*/
#define NESTED(symbol, framesize, rpc) \
.globl symbol; \
.align 2; \
.type symbol, @function; \
.ent symbol, 0; \
symbol: .frame sp, framesize, rpc
/*
* END - mark end of function
*/
#define END(function) \
.end function; \
.size function, .-function
/*
* EXPORT - export definition of symbol
*/
#define EXPORT(symbol) \
.globl symbol; \
symbol:
/*
* FEXPORT - export definition of a function symbol
*/
#define FEXPORT(symbol) \
.globl symbol; \
.type symbol, @function; \
symbol:
/*
* ABS - export absolute symbol
*/
#define ABS(symbol,value) \
.globl symbol; \
symbol = value
#define PANIC(msg) \
.set push; \
.set reorder; \
PTR_LA a0, 8f; \
jal panic; \
9: b 9b; \
.set pop; \
TEXT(msg)
/*
* Print formatted string
*/
#ifdef CONFIG_PRINTK
#define PRINT(string) \
.set push; \
.set reorder; \
PTR_LA a0, 8f; \
jal printk; \
.set pop; \
TEXT(string)
#else
#define PRINT(string)
#endif
#define TEXT(msg) \
.pushsection .data; \
8: .asciiz msg; \
.popsection;
/*
* Build text tables
*/
#define TTABLE(string) \
.pushsection .text; \
.word 1f; \
.popsection \
.pushsection .data; \
1: .asciiz string; \
.popsection
/*
* MIPS IV pref instruction.
* Use with .set noreorder only!
*
* MIPS IV implementations are free to treat this as a nop. The R5000
* is one of them. So we should have an option not to use this instruction.
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
#define PREF(hint,addr) \
.set push; \
.set arch=r5000; \
pref hint, addr; \
.set pop
#define PREFE(hint, addr) \
.set push; \
.set mips0; \
.set eva; \
prefe hint, addr; \
.set pop
#define PREFX(hint,addr) \
.set push; \
.set arch=r5000; \
prefx hint, addr; \
.set pop
#else /* !CONFIG_CPU_HAS_PREFETCH */
#define PREF(hint, addr)
#define PREFE(hint, addr)
#define PREFX(hint, addr)
#endif /* !CONFIG_CPU_HAS_PREFETCH */
/*
* MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
*/
#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
#define MOVN(rd, rs, rt) \
.set push; \
.set reorder; \
beqz rt, 9f; \
move rd, rs; \
.set pop; \
9:
#define MOVZ(rd, rs, rt) \
.set push; \
.set reorder; \
bnez rt, 9f; \
move rd, rs; \
.set pop; \
9:
#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
#define MOVN(rd, rs, rt) \
.set push; \
.set noreorder; \
bnezl rt, 9f; \
move rd, rs; \
.set pop; \
9:
#define MOVZ(rd, rs, rt) \
.set push; \
.set noreorder; \
beqzl rt, 9f; \
move rd, rs; \
.set pop; \
9:
#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
(_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
#define MOVN(rd, rs, rt) \
movn rd, rs, rt
#define MOVZ(rd, rs, rt) \
movz rd, rs, rt
#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
/*
* Stack alignment
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
#define ALSZ 7
#define ALMASK ~7
#endif
#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
#define ALSZ 15
#define ALMASK ~15
#endif
/*
* Macros to handle different pointer/register sizes for 32/64-bit code
*/
/*
* Size of a register
*/
#ifdef __mips64
#define SZREG 8
#else
#define SZREG 4
#endif
/*
* Use the following macros in assemblercode to load/store registers,
* pointers etc.
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
#define REG_S sw
#define REG_L lw
#define REG_SUBU subu
#define REG_ADDU addu
#endif
#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
#define REG_S sd
#define REG_L ld
#define REG_SUBU dsubu
#define REG_ADDU daddu
#endif
/*
* How to add/sub/load/store/shift C int variables.
*/
#if (_MIPS_SZINT == 32)
#define INT_ADD add
#define INT_ADDU addu
#define INT_ADDI addi
#define INT_ADDIU addiu
#define INT_SUB sub
#define INT_SUBU subu
#define INT_L lw
#define INT_S sw
#define INT_SLL sll
#define INT_SLLV sllv
#define INT_SRL srl
#define INT_SRLV srlv
#define INT_SRA sra
#define INT_SRAV srav
#endif
#if (_MIPS_SZINT == 64)
#define INT_ADD dadd
#define INT_ADDU daddu
#define INT_ADDI daddi
#define INT_ADDIU daddiu
#define INT_SUB dsub
#define INT_SUBU dsubu
#define INT_L ld
#define INT_S sd
#define INT_SLL dsll
#define INT_SLLV dsllv
#define INT_SRL dsrl
#define INT_SRLV dsrlv
#define INT_SRA dsra
#define INT_SRAV dsrav
#endif
/*
* How to add/sub/load/store/shift C long variables.
*/
#if (_MIPS_SZLONG == 32)
#define LONG_ADD add
#define LONG_ADDU addu
#define LONG_ADDI addi
#define LONG_ADDIU addiu
#define LONG_SUB sub
#define LONG_SUBU subu
#define LONG_L lw
#define LONG_S sw
#define LONG_SP swp
#define LONG_SLL sll
#define LONG_SLLV sllv
#define LONG_SRL srl
#define LONG_SRLV srlv
#define LONG_SRA sra
#define LONG_SRAV srav
#define LONG .word
#define LONGSIZE 4
#define LONGMASK 3
#define LONGLOG 2
#endif
#if (_MIPS_SZLONG == 64)
#define LONG_ADD dadd
#define LONG_ADDU daddu
#define LONG_ADDI daddi
#define LONG_ADDIU daddiu
#define LONG_SUB dsub
#define LONG_SUBU dsubu
#define LONG_L ld
#define LONG_S sd
#define LONG_SP sdp
#define LONG_SLL dsll
#define LONG_SLLV dsllv
#define LONG_SRL dsrl
#define LONG_SRLV dsrlv
#define LONG_SRA dsra
#define LONG_SRAV dsrav
#define LONG .dword
#define LONGSIZE 8
#define LONGMASK 7
#define LONGLOG 3
#endif
/*
* How to add/sub/load/store/shift pointers.
*/
#if (_MIPS_SZPTR == 32)
#define PTR_ADD add
#define PTR_ADDU addu
#define PTR_ADDI addi
#define PTR_ADDIU addiu
#define PTR_SUB sub
#define PTR_SUBU subu
#define PTR_L lw
#define PTR_S sw
#define PTR_LA la
#define PTR_LI li
#define PTR_SLL sll
#define PTR_SLLV sllv
#define PTR_SRL srl
#define PTR_SRLV srlv
#define PTR_SRA sra
#define PTR_SRAV srav
#define PTR_SCALESHIFT 2
#define PTR .word
#define PTRSIZE 4
#define PTRLOG 2
#endif
#if (_MIPS_SZPTR == 64)
#define PTR_ADD dadd
#define PTR_ADDU daddu
#define PTR_ADDI daddi
#define PTR_ADDIU daddiu
#define PTR_SUB dsub
#define PTR_SUBU dsubu
#define PTR_L ld
#define PTR_S sd
#define PTR_LA dla
#define PTR_LI dli
#define PTR_SLL dsll
#define PTR_SLLV dsllv
#define PTR_SRL dsrl
#define PTR_SRLV dsrlv
#define PTR_SRA dsra
#define PTR_SRAV dsrav
#define PTR_SCALESHIFT 3
#define PTR .dword
#define PTRSIZE 8
#define PTRLOG 3
#endif
/*
* Some cp0 registers were extended to 64bit for MIPS III.
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
#define MFC0 mfc0
#define MTC0 mtc0
#endif
#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
#define MFC0 dmfc0
#define MTC0 dmtc0
#endif
#define SSNOP sll zero, zero, 1
#ifdef CONFIG_SGI_IP28
/* Inhibit speculative stores to volatile (e.g.DMA) or invalid addresses. */
#include <asm/cacheops.h>
#define R10KCBARRIER(addr) cache Cache_Barrier, addr;
#else
#define R10KCBARRIER(addr)
#endif
#endif /* __ASM_ASM_H */

View file

@ -0,0 +1,122 @@
/*
* asmmacro.h: Assembler macros to make things easier to read.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999, 2003 Ralf Baechle
*/
#ifndef _ASM_ASMMACRO_32_H
#define _ASM_ASMMACRO_32_H
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
.macro fpu_save_single thread tmp=t0
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
swc1 $f0, THREAD_FPR0_LS64(\thread)
swc1 $f1, THREAD_FPR1_LS64(\thread)
swc1 $f2, THREAD_FPR2_LS64(\thread)
swc1 $f3, THREAD_FPR3_LS64(\thread)
swc1 $f4, THREAD_FPR4_LS64(\thread)
swc1 $f5, THREAD_FPR5_LS64(\thread)
swc1 $f6, THREAD_FPR6_LS64(\thread)
swc1 $f7, THREAD_FPR7_LS64(\thread)
swc1 $f8, THREAD_FPR8_LS64(\thread)
swc1 $f9, THREAD_FPR9_LS64(\thread)
swc1 $f10, THREAD_FPR10_LS64(\thread)
swc1 $f11, THREAD_FPR11_LS64(\thread)
swc1 $f12, THREAD_FPR12_LS64(\thread)
swc1 $f13, THREAD_FPR13_LS64(\thread)
swc1 $f14, THREAD_FPR14_LS64(\thread)
swc1 $f15, THREAD_FPR15_LS64(\thread)
swc1 $f16, THREAD_FPR16_LS64(\thread)
swc1 $f17, THREAD_FPR17_LS64(\thread)
swc1 $f18, THREAD_FPR18_LS64(\thread)
swc1 $f19, THREAD_FPR19_LS64(\thread)
swc1 $f20, THREAD_FPR20_LS64(\thread)
swc1 $f21, THREAD_FPR21_LS64(\thread)
swc1 $f22, THREAD_FPR22_LS64(\thread)
swc1 $f23, THREAD_FPR23_LS64(\thread)
swc1 $f24, THREAD_FPR24_LS64(\thread)
swc1 $f25, THREAD_FPR25_LS64(\thread)
swc1 $f26, THREAD_FPR26_LS64(\thread)
swc1 $f27, THREAD_FPR27_LS64(\thread)
swc1 $f28, THREAD_FPR28_LS64(\thread)
swc1 $f29, THREAD_FPR29_LS64(\thread)
swc1 $f30, THREAD_FPR30_LS64(\thread)
swc1 $f31, THREAD_FPR31_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
.macro fpu_restore_single thread tmp=t0
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
lwc1 $f0, THREAD_FPR0_LS64(\thread)
lwc1 $f1, THREAD_FPR1_LS64(\thread)
lwc1 $f2, THREAD_FPR2_LS64(\thread)
lwc1 $f3, THREAD_FPR3_LS64(\thread)
lwc1 $f4, THREAD_FPR4_LS64(\thread)
lwc1 $f5, THREAD_FPR5_LS64(\thread)
lwc1 $f6, THREAD_FPR6_LS64(\thread)
lwc1 $f7, THREAD_FPR7_LS64(\thread)
lwc1 $f8, THREAD_FPR8_LS64(\thread)
lwc1 $f9, THREAD_FPR9_LS64(\thread)
lwc1 $f10, THREAD_FPR10_LS64(\thread)
lwc1 $f11, THREAD_FPR11_LS64(\thread)
lwc1 $f12, THREAD_FPR12_LS64(\thread)
lwc1 $f13, THREAD_FPR13_LS64(\thread)
lwc1 $f14, THREAD_FPR14_LS64(\thread)
lwc1 $f15, THREAD_FPR15_LS64(\thread)
lwc1 $f16, THREAD_FPR16_LS64(\thread)
lwc1 $f17, THREAD_FPR17_LS64(\thread)
lwc1 $f18, THREAD_FPR18_LS64(\thread)
lwc1 $f19, THREAD_FPR19_LS64(\thread)
lwc1 $f20, THREAD_FPR20_LS64(\thread)
lwc1 $f21, THREAD_FPR21_LS64(\thread)
lwc1 $f22, THREAD_FPR22_LS64(\thread)
lwc1 $f23, THREAD_FPR23_LS64(\thread)
lwc1 $f24, THREAD_FPR24_LS64(\thread)
lwc1 $f25, THREAD_FPR25_LS64(\thread)
lwc1 $f26, THREAD_FPR26_LS64(\thread)
lwc1 $f27, THREAD_FPR27_LS64(\thread)
lwc1 $f28, THREAD_FPR28_LS64(\thread)
lwc1 $f29, THREAD_FPR29_LS64(\thread)
lwc1 $f30, THREAD_FPR30_LS64(\thread)
lwc1 $f31, THREAD_FPR31_LS64(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
.macro cpu_save_nonscratch thread
LONG_S s0, THREAD_REG16(\thread)
LONG_S s1, THREAD_REG17(\thread)
LONG_S s2, THREAD_REG18(\thread)
LONG_S s3, THREAD_REG19(\thread)
LONG_S s4, THREAD_REG20(\thread)
LONG_S s5, THREAD_REG21(\thread)
LONG_S s6, THREAD_REG22(\thread)
LONG_S s7, THREAD_REG23(\thread)
LONG_S sp, THREAD_REG29(\thread)
LONG_S fp, THREAD_REG30(\thread)
.endm
.macro cpu_restore_nonscratch thread
LONG_L s0, THREAD_REG16(\thread)
LONG_L s1, THREAD_REG17(\thread)
LONG_L s2, THREAD_REG18(\thread)
LONG_L s3, THREAD_REG19(\thread)
LONG_L s4, THREAD_REG20(\thread)
LONG_L s5, THREAD_REG21(\thread)
LONG_L s6, THREAD_REG22(\thread)
LONG_L s7, THREAD_REG23(\thread)
LONG_L sp, THREAD_REG29(\thread)
LONG_L fp, THREAD_REG30(\thread)
LONG_L ra, THREAD_REG31(\thread)
.endm
#endif /* _ASM_ASMMACRO_32_H */

View file

@ -0,0 +1,43 @@
/*
* asmmacro.h: Assembler macros to make things easier to read.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_ASMMACRO_64_H
#define _ASM_ASMMACRO_64_H
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
.macro cpu_save_nonscratch thread
LONG_S s0, THREAD_REG16(\thread)
LONG_S s1, THREAD_REG17(\thread)
LONG_S s2, THREAD_REG18(\thread)
LONG_S s3, THREAD_REG19(\thread)
LONG_S s4, THREAD_REG20(\thread)
LONG_S s5, THREAD_REG21(\thread)
LONG_S s6, THREAD_REG22(\thread)
LONG_S s7, THREAD_REG23(\thread)
LONG_S sp, THREAD_REG29(\thread)
LONG_S fp, THREAD_REG30(\thread)
.endm
.macro cpu_restore_nonscratch thread
LONG_L s0, THREAD_REG16(\thread)
LONG_L s1, THREAD_REG17(\thread)
LONG_L s2, THREAD_REG18(\thread)
LONG_L s3, THREAD_REG19(\thread)
LONG_L s4, THREAD_REG20(\thread)
LONG_L s5, THREAD_REG21(\thread)
LONG_L s6, THREAD_REG22(\thread)
LONG_L s7, THREAD_REG23(\thread)
LONG_L sp, THREAD_REG29(\thread)
LONG_L fp, THREAD_REG30(\thread)
LONG_L ra, THREAD_REG31(\thread)
.endm
#endif /* _ASM_ASMMACRO_64_H */

View file

@ -0,0 +1,467 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Ralf Baechle
*/
#ifndef _ASM_ASMMACRO_H
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
#include <asm/asm-offsets.h>
#include <asm/msa.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
#endif
#ifdef CONFIG_64BIT
#include <asm/asmmacro-64.h>
#endif
#ifdef CONFIG_CPU_MIPSR2
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
.endm
.macro local_irq_disable reg=t0
di
irq_disable_hazard
.endm
#else
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_enable_hazard
.endm
.macro local_irq_disable reg=t0
#ifdef CONFIG_PREEMPT
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, 1
sw \reg, TI_PRE_COUNT($28)
#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
#ifdef CONFIG_PREEMPT
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, -1
sw \reg, TI_PRE_COUNT($28)
#endif
.endm
#endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
sdc1 $f0, THREAD_FPR0_LS64(\thread)
sdc1 $f2, THREAD_FPR2_LS64(\thread)
sdc1 $f4, THREAD_FPR4_LS64(\thread)
sdc1 $f6, THREAD_FPR6_LS64(\thread)
sdc1 $f8, THREAD_FPR8_LS64(\thread)
sdc1 $f10, THREAD_FPR10_LS64(\thread)
sdc1 $f12, THREAD_FPR12_LS64(\thread)
sdc1 $f14, THREAD_FPR14_LS64(\thread)
sdc1 $f16, THREAD_FPR16_LS64(\thread)
sdc1 $f18, THREAD_FPR18_LS64(\thread)
sdc1 $f20, THREAD_FPR20_LS64(\thread)
sdc1 $f22, THREAD_FPR22_LS64(\thread)
sdc1 $f24, THREAD_FPR24_LS64(\thread)
sdc1 $f26, THREAD_FPR26_LS64(\thread)
sdc1 $f28, THREAD_FPR28_LS64(\thread)
sdc1 $f30, THREAD_FPR30_LS64(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
.macro fpu_save_16odd thread
.set push
.set mips64r2
SET_HARDFLOAT
sdc1 $f1, THREAD_FPR1_LS64(\thread)
sdc1 $f3, THREAD_FPR3_LS64(\thread)
sdc1 $f5, THREAD_FPR5_LS64(\thread)
sdc1 $f7, THREAD_FPR7_LS64(\thread)
sdc1 $f9, THREAD_FPR9_LS64(\thread)
sdc1 $f11, THREAD_FPR11_LS64(\thread)
sdc1 $f13, THREAD_FPR13_LS64(\thread)
sdc1 $f15, THREAD_FPR15_LS64(\thread)
sdc1 $f17, THREAD_FPR17_LS64(\thread)
sdc1 $f19, THREAD_FPR19_LS64(\thread)
sdc1 $f21, THREAD_FPR21_LS64(\thread)
sdc1 $f23, THREAD_FPR23_LS64(\thread)
sdc1 $f25, THREAD_FPR25_LS64(\thread)
sdc1 $f27, THREAD_FPR27_LS64(\thread)
sdc1 $f29, THREAD_FPR29_LS64(\thread)
sdc1 $f31, THREAD_FPR31_LS64(\thread)
.set pop
.endm
.macro fpu_save_double thread status tmp
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
10:
#endif
fpu_save_16even \thread \tmp
.endm
.macro fpu_restore_16even thread tmp=t0
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
ldc1 $f0, THREAD_FPR0_LS64(\thread)
ldc1 $f2, THREAD_FPR2_LS64(\thread)
ldc1 $f4, THREAD_FPR4_LS64(\thread)
ldc1 $f6, THREAD_FPR6_LS64(\thread)
ldc1 $f8, THREAD_FPR8_LS64(\thread)
ldc1 $f10, THREAD_FPR10_LS64(\thread)
ldc1 $f12, THREAD_FPR12_LS64(\thread)
ldc1 $f14, THREAD_FPR14_LS64(\thread)
ldc1 $f16, THREAD_FPR16_LS64(\thread)
ldc1 $f18, THREAD_FPR18_LS64(\thread)
ldc1 $f20, THREAD_FPR20_LS64(\thread)
ldc1 $f22, THREAD_FPR22_LS64(\thread)
ldc1 $f24, THREAD_FPR24_LS64(\thread)
ldc1 $f26, THREAD_FPR26_LS64(\thread)
ldc1 $f28, THREAD_FPR28_LS64(\thread)
ldc1 $f30, THREAD_FPR30_LS64(\thread)
ctc1 \tmp, fcr31
.endm
.macro fpu_restore_16odd thread
.set push
.set mips64r2
SET_HARDFLOAT
ldc1 $f1, THREAD_FPR1_LS64(\thread)
ldc1 $f3, THREAD_FPR3_LS64(\thread)
ldc1 $f5, THREAD_FPR5_LS64(\thread)
ldc1 $f7, THREAD_FPR7_LS64(\thread)
ldc1 $f9, THREAD_FPR9_LS64(\thread)
ldc1 $f11, THREAD_FPR11_LS64(\thread)
ldc1 $f13, THREAD_FPR13_LS64(\thread)
ldc1 $f15, THREAD_FPR15_LS64(\thread)
ldc1 $f17, THREAD_FPR17_LS64(\thread)
ldc1 $f19, THREAD_FPR19_LS64(\thread)
ldc1 $f21, THREAD_FPR21_LS64(\thread)
ldc1 $f23, THREAD_FPR23_LS64(\thread)
ldc1 $f25, THREAD_FPR25_LS64(\thread)
ldc1 $f27, THREAD_FPR27_LS64(\thread)
ldc1 $f29, THREAD_FPR29_LS64(\thread)
ldc1 $f31, THREAD_FPR31_LS64(\thread)
.set pop
.endm
.macro fpu_restore_double thread status tmp
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
fpu_restore_16odd \thread
10:
#endif
fpu_restore_16even \thread \tmp
.endm
#ifdef CONFIG_CPU_MIPSR2
.macro _EXT rd, rs, p, s
ext \rd, \rs, \p, \s
.endm
#else /* !CONFIG_CPU_MIPSR2 */
.macro _EXT rd, rs, p, s
srl \rd, \rs, \p
andi \rd, \rd, (1 << \s) - 1
.endm
#endif /* !CONFIG_CPU_MIPSR2 */
/*
* Temporary until all gas have MT ASE support
*/
.macro DMT reg=0
.word 0x41600bc1 | (\reg << 16)
.endm
.macro EMT reg=0
.word 0x41600be1 | (\reg << 16)
.endm
.macro DVPE reg=0
.word 0x41600001 | (\reg << 16)
.endm
.macro EVPE reg=0
.word 0x41600021 | (\reg << 16)
.endm
.macro MFTR rt=0, rd=0, u=0, sel=0
.word 0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.endm
.macro MTTR rt=0, rd=0, u=0, sel=0
.word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA
.macro ld_d wd, off, base
.set push
.set mips32r2
.set msa
ld.d $w\wd, \off(\base)
.set pop
.endm
.macro st_d wd, off, base
.set push
.set mips32r2
.set msa
st.d $w\wd, \off(\base)
.set pop
.endm
.macro copy_u_w rd, ws, n
.set push
.set mips32r2
.set msa
copy_u.w \rd, $w\ws[\n]
.set pop
.endm
.macro copy_u_d rd, ws, n
.set push
.set mips64r2
.set msa
copy_u.d \rd, $w\ws[\n]
.set pop
.endm
.macro insert_w wd, n, rs
.set push
.set mips32r2
.set msa
insert.w $w\wd[\n], \rs
.set pop
.endm
.macro insert_d wd, n, rs
.set push
.set mips64r2
.set msa
insert.d $w\wd[\n], \rs
.set pop
.endm
#else
#ifdef CONFIG_CPU_MICROMIPS
#define CFC_MSA_INSN 0x587e0056
#define CTC_MSA_INSN 0x583e0816
#define LDD_MSA_INSN 0x58000837
#define STD_MSA_INSN 0x5800083f
#define COPY_UW_MSA_INSN 0x58f00056
#define COPY_UD_MSA_INSN 0x58f80056
#define INSERT_W_MSA_INSN 0x59300816
#define INSERT_D_MSA_INSN 0x59380816
#else
#define CFC_MSA_INSN 0x787e0059
#define CTC_MSA_INSN 0x783e0819
#define LDD_MSA_INSN 0x78000823
#define STD_MSA_INSN 0x78000827
#define COPY_UW_MSA_INSN 0x78f00059
#define COPY_UD_MSA_INSN 0x78f80059
#define INSERT_W_MSA_INSN 0x79300819
#define INSERT_D_MSA_INSN 0x79380819
#endif
/*
* Temporary until all toolchains in use include MSA support.
*/
.macro cfcmsa rd, cs
.set push
.set noat
SET_HARDFLOAT
.insn
.word CFC_MSA_INSN | (\cs << 11)
move \rd, $1
.set pop
.endm
.macro ctcmsa cd, rs
.set push
.set noat
SET_HARDFLOAT
move $1, \rs
.word CTC_MSA_INSN | (\cd << 6)
.set pop
.endm
.macro ld_d wd, off, base
.set push
.set noat
SET_HARDFLOAT
addu $1, \base, \off
.word LDD_MSA_INSN | (\wd << 6)
.set pop
.endm
.macro st_d wd, off, base
.set push
.set noat
SET_HARDFLOAT
addu $1, \base, \off
.word STD_MSA_INSN | (\wd << 6)
.set pop
.endm
.macro copy_u_w rd, ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop
.endm
.macro copy_u_d rd, ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop
.endm
.macro insert_w wd, n, rs
.set push
.set noat
SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
.macro insert_d wd, n, rs
.set push
.set noat
SET_HARDFLOAT
/* move triggers an assembler bug... */
or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
#endif
.macro msa_save_all thread
st_d 0, THREAD_FPR0, \thread
st_d 1, THREAD_FPR1, \thread
st_d 2, THREAD_FPR2, \thread
st_d 3, THREAD_FPR3, \thread
st_d 4, THREAD_FPR4, \thread
st_d 5, THREAD_FPR5, \thread
st_d 6, THREAD_FPR6, \thread
st_d 7, THREAD_FPR7, \thread
st_d 8, THREAD_FPR8, \thread
st_d 9, THREAD_FPR9, \thread
st_d 10, THREAD_FPR10, \thread
st_d 11, THREAD_FPR11, \thread
st_d 12, THREAD_FPR12, \thread
st_d 13, THREAD_FPR13, \thread
st_d 14, THREAD_FPR14, \thread
st_d 15, THREAD_FPR15, \thread
st_d 16, THREAD_FPR16, \thread
st_d 17, THREAD_FPR17, \thread
st_d 18, THREAD_FPR18, \thread
st_d 19, THREAD_FPR19, \thread
st_d 20, THREAD_FPR20, \thread
st_d 21, THREAD_FPR21, \thread
st_d 22, THREAD_FPR22, \thread
st_d 23, THREAD_FPR23, \thread
st_d 24, THREAD_FPR24, \thread
st_d 25, THREAD_FPR25, \thread
st_d 26, THREAD_FPR26, \thread
st_d 27, THREAD_FPR27, \thread
st_d 28, THREAD_FPR28, \thread
st_d 29, THREAD_FPR29, \thread
st_d 30, THREAD_FPR30, \thread
st_d 31, THREAD_FPR31, \thread
.set push
.set noat
SET_HARDFLOAT
cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
.set pop
.endm
.macro msa_restore_all thread
.set push
.set noat
SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
ctcmsa MSA_CSR, $1
.set pop
ld_d 0, THREAD_FPR0, \thread
ld_d 1, THREAD_FPR1, \thread
ld_d 2, THREAD_FPR2, \thread
ld_d 3, THREAD_FPR3, \thread
ld_d 4, THREAD_FPR4, \thread
ld_d 5, THREAD_FPR5, \thread
ld_d 6, THREAD_FPR6, \thread
ld_d 7, THREAD_FPR7, \thread
ld_d 8, THREAD_FPR8, \thread
ld_d 9, THREAD_FPR9, \thread
ld_d 10, THREAD_FPR10, \thread
ld_d 11, THREAD_FPR11, \thread
ld_d 12, THREAD_FPR12, \thread
ld_d 13, THREAD_FPR13, \thread
ld_d 14, THREAD_FPR14, \thread
ld_d 15, THREAD_FPR15, \thread
ld_d 16, THREAD_FPR16, \thread
ld_d 17, THREAD_FPR17, \thread
ld_d 18, THREAD_FPR18, \thread
ld_d 19, THREAD_FPR19, \thread
ld_d 20, THREAD_FPR20, \thread
ld_d 21, THREAD_FPR21, \thread
ld_d 22, THREAD_FPR22, \thread
ld_d 23, THREAD_FPR23, \thread
ld_d 24, THREAD_FPR24, \thread
ld_d 25, THREAD_FPR25, \thread
ld_d 26, THREAD_FPR26, \thread
ld_d 27, THREAD_FPR27, \thread
ld_d 28, THREAD_FPR28, \thread
ld_d 29, THREAD_FPR29, \thread
ld_d 30, THREAD_FPR30, \thread
ld_d 31, THREAD_FPR31, \thread
.endm
.macro msa_init_upper wd
#ifdef CONFIG_64BIT
insert_d \wd, 1
#else
insert_w \wd, 2
insert_w \wd, 3
#endif
.if 31-\wd
msa_init_upper (\wd+1)
.endif
.endm
.macro msa_init_all_upper
.set push
.set noat
SET_HARDFLOAT
not $1, zero
msa_init_upper 0
.set pop
.endm
#endif /* _ASM_ASMMACRO_H */

View file

@ -0,0 +1,581 @@
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
* than regular operations.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
#include <asm/war.h>
#define ATOMIC_INIT(i) { (i) }
/*
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) ((v)->counter = (i))
#define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set arch=r4000 \n" \
" ll %0, %1 # atomic_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter c_op i; \
raw_local_irq_restore(flags); \
} \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
{ \
int result; \
\
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" beqzl %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set arch=r4000 \n" \
" ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
result = v->counter; \
result c_op i; \
v->counter = result; \
raw_local_irq_restore(flags); \
} \
\
smp_llsc_mb(); \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
int result;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
}
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
/*
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
/*
* atomic_dec_and_test - decrement by 1 and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*/
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
/*
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1, (v))
/*
* atomic_dec - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1, (v))
/*
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
/*
* atomic64_read - read atomic variable
* @v: pointer of type atomic64_t
*
*/
#define atomic64_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic64_set - set atomic variable
* @v: pointer of type atomic64_t
* @i: required value
*/
#define atomic64_set(v, i) ((v)->counter = (i))
#define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set arch=r4000 \n" \
" lld %0, %1 # atomic64_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter c_op i; \
raw_local_irq_restore(flags); \
} \
} \
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \
long result; \
\
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" beqzl %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set arch=r4000 \n" \
" lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "=m" (v->counter) \
: "Ir" (i), "m" (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
result = temp; result c_op i; \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
result = v->counter; \
result c_op i; \
v->counter = result; \
raw_local_irq_restore(flags); \
} \
\
smp_llsc_mb(); \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
ATOMIC64_OP_RETURN(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic64_t
*
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
long result;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set noreorder \n"
" beqzl %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set noreorder \n"
" beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
}
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
/*
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
/*
* atomic64_inc_and_test - increment and test
* @v: pointer of type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
/*
* atomic64_dec_and_test - decrement by 1 and test
* @v: pointer of type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*/
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
/*
* atomic64_inc - increment atomic variable
* @v: pointer of type atomic64_t
*
* Atomically increments @v by 1.
*/
#define atomic64_inc(v) atomic64_add(1, (v))
/*
* atomic64_dec - decrement and test
* @v: pointer of type atomic64_t
*
* Atomically decrements @v by 1.
*/
#define atomic64_dec(v) atomic64_sub(1, (v))
/*
* atomic64_add_negative - add and test if negative
* @v: pointer of type atomic64_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
#endif /* CONFIG_64BIT */
#endif /* _ASM_ATOMIC_H */

View file

@ -0,0 +1,201 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#include <asm/addrspace.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
".set mips2\n\t" \
"sync\n\t" \
".set pop" \
: /* no output */ \
: /* no input */ \
: "memory")
#else
#define __sync() do { } while(0)
#endif
#define __fast_iob() \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"lw $0,%0\n\t" \
"nop\n\t" \
".set pop" \
: /* no output */ \
: "m" (*(int *)CKSEG1) \
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
# define fast_wmb() __syncw()
# define fast_rmb() barrier()
# define fast_mb() __sync()
# define fast_iob() do { } while (0)
#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
# define fast_wmb() __sync()
# define fast_rmb() __sync()
# define fast_mb() __sync()
# ifdef CONFIG_SGI_IP28
# define fast_iob() \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"lw $0,%0\n\t" \
"sync\n\t" \
"lw $0,%0\n\t" \
".set pop" \
: /* no output */ \
: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
: "memory")
# else
# define fast_iob() \
do { \
__sync(); \
__fast_iob(); \
} while (0)
# endif
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
#ifdef CONFIG_CPU_HAS_WB
#include <asm/wbflush.h>
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() wbflush()
#define iob() wbflush()
#else /* !CONFIG_CPU_HAS_WB */
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() fast_mb()
#define iob() fast_iob()
#endif /* !CONFIG_CPU_HAS_WB */
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
# ifdef CONFIG_CPU_CAVIUM_OCTEON
# define smp_mb() __sync()
# define smp_rmb() barrier()
# define smp_wmb() __syncw()
# else
# define smp_mb() __asm__ __volatile__("sync" : : :"memory")
# define smp_rmb() __asm__ __volatile__("sync" : : :"memory")
# define smp_wmb() __asm__ __volatile__("sync" : : :"memory")
# endif
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
#define __WEAK_LLSC_MB " sync \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif
#define set_mb(var, value) \
do { var = value; smp_mb(); } while (0)
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define smp_mb__before_llsc() smp_wmb()
/* Cause previous writes to become visible on all CPUs as soon as possible */
#define nudge_writes() __asm__ __volatile__(".set push\n\t" \
".set arch=octeon\n\t" \
"syncw\n\t" \
".set pop" : : : "memory")
#else
#define smp_mb__before_llsc() smp_llsc_mb()
#define nudge_writes() mb()
#endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#define smp_mb__before_atomic() smp_mb__before_llsc()
#define smp_mb__after_atomic() smp_llsc_mb()
#endif /* __ASM_BARRIER_H */

View file

@ -0,0 +1,60 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1997, 1999 by Ralf Baechle
* Copyright (c) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_BCACHE_H
#define _ASM_BCACHE_H
/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
chipset implemented caches. On machines with other CPUs the CPU does the
cache thing itself. */
struct bcache_ops {
void (*bc_enable)(void);
void (*bc_disable)(void);
void (*bc_wback_inv)(unsigned long page, unsigned long size);
void (*bc_inv)(unsigned long page, unsigned long size);
};
extern void indy_sc_init(void);
#ifdef CONFIG_BOARD_SCACHE
extern struct bcache_ops *bcops;
static inline void bc_enable(void)
{
bcops->bc_enable();
}
static inline void bc_disable(void)
{
bcops->bc_disable();
}
static inline void bc_wback_inv(unsigned long page, unsigned long size)
{
bcops->bc_wback_inv(page, size);
}
static inline void bc_inv(unsigned long page, unsigned long size)
{
bcops->bc_inv(page, size);
}
#else /* !defined(CONFIG_BOARD_SCACHE) */
/* Not R4000 / R4400 / R4600 / R5000. */
#define bc_enable() do { } while (0)
#define bc_disable() do { } while (0)
#define bc_wback_inv(page, size) do { } while (0)
#define bc_inv(page, size) do { } while (0)
#endif /* !defined(CONFIG_BOARD_SCACHE) */
#endif /* _ASM_BCACHE_H */

View file

@ -0,0 +1,632 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/cpu-features.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
#if _MIPS_SZLONG == 32
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
#endif
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
" " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %3, %2, 1 \n"
" " __SC "%0, %1 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (bit), "r" (~0));
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
__mips_set_bit(nr, addr);
}
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
" " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n"
" " __SC "%0, %1 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (bit));
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else
__mips_clear_bit(nr, addr);
}
/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
smp_mb__before_atomic();
clear_bit(nr, addr);
}
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
__mips_change_bit(nr, addr);
}
/*
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else
res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
return res != 0;
}
/*
* test_and_set_bit_lock - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and implies acquire ordering semantics
* after the memory operation.
*/
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else
res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
return res != 0;
}
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" " __LL "%0, %1 # test_and_clear_bit \n"
" " __EXT "%2, %0, %3, 1 \n"
" " __INS "%0, $0, %3, 1 \n"
" " __SC "%0, %1 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "ir" (bit)
: "memory");
} while (unlikely(!temp));
#endif
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else
res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
return res != 0;
}
/*
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else
res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();
return res != 0;
}
#include <asm-generic/bitops/non-atomic.h>
/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*/
static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
smp_mb();
__clear_bit(nr, addr);
}
/*
* Return the bit position (0..63) of the most significant 1 bit in a word
* Returns -1 if no 1 bit exists
*/
static inline unsigned long __fls(unsigned long word)
{
int num;
if (BITS_PER_LONG == 32 &&
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
" .set mips32 \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
return 31 - num;
}
if (BITS_PER_LONG == 64 &&
__builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
__asm__(
" .set push \n"
" .set mips64 \n"
" dclz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
return 63 - num;
}
num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}
#endif
if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (BITS_PER_LONG-1))))
num -= 1;
return num;
}
/*
* __ffs - find first bit in word.
* @word: The word to search
*
* Returns 0..SZLONG-1
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
return __fls(word & -word);
}
/*
* fls - find last bit set.
* @word: The word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline int fls(int x)
{
int r;
if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
" .set mips32 \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (x)
: "r" (x));
return 32 - x;
}
r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#include <asm-generic/bitops/fls64.h>
/*
* ffs - find first bit set.
* @word: The word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int word)
{
if (!word)
return 0;
return fls(word & -word);
}
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __KERNEL__ */
#endif /* _ASM_BITOPS_H */

View file

@ -0,0 +1,126 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
*
* Definitions for BMIPS processors
*/
#ifndef _ASM_BMIPS_H
#define _ASM_BMIPS_H
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/addrspace.h>
#include <asm/mipsregs.h>
#include <asm/hazards.h>
/* NOTE: the CBR register returns a PA, and it can be above 0xff00_0000 */
#define BMIPS_GET_CBR() ((void __iomem *)(CKSEG1 | \
(unsigned long) \
((read_c0_brcm_cbr() >> 18) << 18)))
#define BMIPS_RAC_CONFIG 0x00000000
#define BMIPS_RAC_ADDRESS_RANGE 0x00000004
#define BMIPS_RAC_CONFIG_1 0x00000008
#define BMIPS_L2_CONFIG 0x0000000c
#define BMIPS_LMB_CONTROL 0x0000001c
#define BMIPS_SYSTEM_BASE 0x00000020
#define BMIPS_PERF_GLOBAL_CONTROL 0x00020000
#define BMIPS_PERF_CONTROL_0 0x00020004
#define BMIPS_PERF_CONTROL_1 0x00020008
#define BMIPS_PERF_COUNTER_0 0x00020010
#define BMIPS_PERF_COUNTER_1 0x00020014
#define BMIPS_PERF_COUNTER_2 0x00020018
#define BMIPS_PERF_COUNTER_3 0x0002001c
#define BMIPS_RELO_VECTOR_CONTROL_0 0x00030000
#define BMIPS_RELO_VECTOR_CONTROL_1 0x00038000
#define BMIPS_NMI_RESET_VEC 0x80000000
#define BMIPS_WARM_RESTART_VEC 0x80000380
#define ZSCM_REG_BASE 0x97000000
#if !defined(__ASSEMBLY__)
#include <linux/cpumask.h>
#include <asm/r4kcache.h>
#include <asm/smp-ops.h>
extern struct plat_smp_ops bmips43xx_smp_ops;
extern struct plat_smp_ops bmips5000_smp_ops;
static inline int register_bmips_smp_ops(void)
{
#if IS_ENABLED(CONFIG_CPU_BMIPS) && IS_ENABLED(CONFIG_SMP)
switch (current_cpu_type()) {
case CPU_BMIPS32:
case CPU_BMIPS3300:
return register_up_smp_ops();
case CPU_BMIPS4350:
case CPU_BMIPS4380:
register_smp_ops(&bmips43xx_smp_ops);
break;
case CPU_BMIPS5000:
register_smp_ops(&bmips5000_smp_ops);
break;
default:
return -ENODEV;
}
return 0;
#else
return -ENODEV;
#endif
}
extern char bmips_reset_nmi_vec;
extern char bmips_reset_nmi_vec_end;
extern char bmips_smp_movevec;
extern char bmips_smp_int_vec;
extern char bmips_smp_int_vec_end;
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
extern cpumask_t bmips_booted_mask;
extern void bmips_ebase_setup(void);
extern asmlinkage void plat_wired_tlb_setup(void);
static inline unsigned long bmips_read_zscm_reg(unsigned int offset)
{
unsigned long ret;
barrier();
cache_op(Index_Load_Tag_S, ZSCM_REG_BASE + offset);
__sync();
_ssnop();
_ssnop();
_ssnop();
_ssnop();
_ssnop();
_ssnop();
_ssnop();
ret = read_c0_ddatalo();
_ssnop();
return ret;
}
static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
{
write_c0_ddatalo(data);
_ssnop();
_ssnop();
_ssnop();
cache_op(Index_Store_Tag_S, ZSCM_REG_BASE + offset);
_ssnop();
_ssnop();
_ssnop();
barrier();
}
#endif /* !defined(__ASSEMBLY__) */
#endif /* _ASM_BMIPS_H */

View file

@ -0,0 +1,150 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 2003 by Ralf Baechle
* Copyright (C) 1995, 1996 Andreas Busse
* Copyright (C) 1995, 1996 Stoned Elipot
* Copyright (C) 1995, 1996 Paul M. Antoine.
* Copyright (C) 2009 Zhang Le
*/
#ifndef _ASM_BOOTINFO_H
#define _ASM_BOOTINFO_H
#include <linux/types.h>
#include <asm/setup.h>
/*
* The MACH_ IDs are sort of equivalent to PCI product IDs. As such the
* numbers do not necessarily reflect technical relations or similarities
* between systems.
*/
/*
* Valid machtype values for group unknown
*/
#define MACH_UNKNOWN 0 /* whatever... */
/*
* Valid machtype for group DEC
*/
#define MACH_DSUNKNOWN 0
#define MACH_DS23100 1 /* DECstation 2100 or 3100 */
#define MACH_DS5100 2 /* DECsystem 5100 */
#define MACH_DS5000_200 3 /* DECstation 5000/200 */
#define MACH_DS5000_1XX 4 /* DECstation 5000/120, 125, 133, 150 */
#define MACH_DS5000_XX 5 /* DECstation 5000/20, 25, 33, 50 */
#define MACH_DS5000_2X0 6 /* DECstation 5000/240, 260 */
#define MACH_DS5400 7 /* DECsystem 5400 */
#define MACH_DS5500 8 /* DECsystem 5500 */
#define MACH_DS5800 9 /* DECsystem 5800 */
#define MACH_DS5900 10 /* DECsystem 5900 */
/*
* Valid machtype for group PMC-MSP
*/
#define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */
#define MACH_MSP4200_GW 1 /* PMC-Sierra MSP4200 Gateway demo */
#define MACH_MSP4200_FPGA 2 /* PMC-Sierra MSP4200 Emulation */
#define MACH_MSP7120_EVAL 3 /* PMC-Sierra MSP7120 Evaluation */
#define MACH_MSP7120_GW 4 /* PMC-Sierra MSP7120 Residential GW */
#define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */
#define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */
/*
* Valid machtype for group Mikrotik
*/
#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
/*
* Valid machtype for Loongson family
*/
enum loongson_machine_type {
MACH_LOONGSON_UNKNOWN,
MACH_LEMOTE_FL2E,
MACH_LEMOTE_FL2F,
MACH_LEMOTE_ML2F7,
MACH_LEMOTE_YL2F89,
MACH_DEXXON_GDIUM2F10,
MACH_LEMOTE_NAS,
MACH_LEMOTE_LL2F,
MACH_LEMOTE_A1004,
MACH_LEMOTE_A1101,
MACH_LEMOTE_A1201,
MACH_LEMOTE_A1205,
MACH_LOONGSON_END
};
/*
* Valid machtype for group INGENIC
*/
#define MACH_INGENIC_JZ4730 0 /* JZ4730 SOC */
#define MACH_INGENIC_JZ4740 1 /* JZ4740 SOC */
extern char *system_type;
const char *get_system_type(void);
extern unsigned long mips_machtype;
#define BOOT_MEM_MAP_MAX 32
#define BOOT_MEM_RAM 1
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
/*
* A memory map that's built upon what was determined
* or specified on the command line.
*/
struct boot_mem_map {
int nr_map;
struct boot_mem_map_entry {
phys_t addr; /* start of memory segment */
phys_t size; /* size of memory segment */
long type; /* type of memory segment */
} map[BOOT_MEM_MAP_MAX];
};
extern struct boot_mem_map boot_mem_map;
extern void add_memory_region(phys_t start, phys_t size, long type);
extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max);
extern void prom_init(void);
extern void prom_free_prom_memory(void);
extern void free_init_pages(const char *what,
unsigned long begin, unsigned long end);
extern void (*free_init_pages_eva)(void *begin, void *end);
/*
* Initial kernel command line, usually setup by prom_init()
*/
extern char arcs_cmdline[COMMAND_LINE_SIZE];
/*
* Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
/*
* Platform memory detection hook called by setup_arch
*/
extern void plat_mem_setup(void);
#ifdef CONFIG_SWIOTLB
/*
* Optional platform hook to call swiotlb_setup().
*/
extern void plat_swiotlb_setup(void);
#else
static inline void plat_swiotlb_setup(void) {}
#endif /* CONFIG_SWIOTLB */
#endif /* _ASM_BOOTINFO_H */

View file

@ -0,0 +1,103 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 1998, 2001 by Ralf Baechle
*/
#ifndef _ASM_BRANCH_H
#define _ASM_BRANCH_H
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/ptrace.h>
#include <asm/inst.h>
extern int __isa_exception_epc(struct pt_regs *regs);
extern int __compute_return_epc(struct pt_regs *regs);
extern int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn);
extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
/*
* microMIPS bitfields
*/
#define MM_POOL32A_MINOR_MASK 0x3f
#define MM_POOL32A_MINOR_SHIFT 0x6
#define MM_MIPS32_COND_FC 0x30
extern int __mm_isBranchInstr(struct pt_regs *regs,
struct mm_decoded_insn dec_insn, unsigned long *contpc);
static inline int mm_isBranchInstr(struct pt_regs *regs,
struct mm_decoded_insn dec_insn, unsigned long *contpc)
{
if (!cpu_has_mmips)
return 0;
return __mm_isBranchInstr(regs, dec_insn, contpc);
}
static inline int delay_slot(struct pt_regs *regs)
{
return regs->cp0_cause & CAUSEF_BD;
}
static inline void clear_delay_slot(struct pt_regs *regs)
{
regs->cp0_cause &= ~CAUSEF_BD;
}
static inline void set_delay_slot(struct pt_regs *regs)
{
regs->cp0_cause |= CAUSEF_BD;
}
static inline unsigned long exception_epc(struct pt_regs *regs)
{
if (likely(!delay_slot(regs)))
return regs->cp0_epc;
if (get_isa16_mode(regs->cp0_epc))
return __isa_exception_epc(regs);
return regs->cp0_epc + 4;
}
#define BRANCH_LIKELY_TAKEN 0x0001
static inline int compute_return_epc(struct pt_regs *regs)
{
if (get_isa16_mode(regs->cp0_epc)) {
if (cpu_has_mmips)
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
return regs->cp0_epc;
}
if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}
return __compute_return_epc(regs);
}
static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
union mips16e_instruction *inst)
{
if (likely(!delay_slot(regs))) {
if (inst->ri.opcode == MIPS16e_extend_op) {
regs->cp0_epc += 4;
return 0;
}
regs->cp0_epc += 2;
return 0;
}
return __MIPS16e_compute_return_epc(regs);
}
#endif /* _ASM_BRANCH_H */

View file

@ -0,0 +1,26 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 2003 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef __ASM_BREAK_H
#define __ASM_BREAK_H
#ifdef __UAPI_ASM_BREAK_H
#error "Error: Do not directly include <uapi/asm/break.h>"
#endif
#include <uapi/asm/break.h>
/*
* Break codes used internally to the kernel.
*/
#define BRK_KDB 513 /* Used in KDB_ENTER() */
#define BRK_MEMU 514 /* Used by FPU emulator */
#define BRK_KPROBE_BP 515 /* Kprobe break */
#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
#define BRK_MULOVF 1023 /* Multiply overflow */
#endif /* __ASM_BREAK_H */

View file

@ -0,0 +1,43 @@
#ifndef __ASM_BUG_H
#define __ASM_BUG_H
#include <linux/compiler.h>
#include <asm/sgidefs.h>
#ifdef CONFIG_BUG
#include <asm/break.h>
static inline void __noreturn BUG(void)
{
__asm__ __volatile__("break %0" : : "i" (BRK_BUG));
unreachable();
}
#define HAVE_ARCH_BUG
#if (_MIPS_ISA > _MIPS_ISA_MIPS1)
static inline void __BUG_ON(unsigned long condition)
{
if (__builtin_constant_p(condition)) {
if (condition)
BUG();
else
return;
}
__asm__ __volatile__("tne $0, %0, %1"
: : "r" (condition), "i" (BRK_BUG));
}
#define BUG_ON(C) __BUG_ON((unsigned long)(C))
#define HAVE_ARCH_BUG_ON
#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */
#endif
#include <asm-generic/bug.h>
#endif /* __ASM_BUG_H */

View file

@ -0,0 +1,54 @@
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Copyright (C) 2007 Maciej W. Rozycki
*
* Needs:
* void check_bugs(void);
*/
#ifndef _ASM_BUGS_H
#define _ASM_BUGS_H
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
extern int daddiu_bug;
extern void check_bugs64_early(void);
extern void check_bugs32(void);
extern void check_bugs64(void);
static inline void check_bugs_early(void)
{
#ifdef CONFIG_64BIT
check_bugs64_early();
#endif
}
static inline void check_bugs(void)
{
unsigned int cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
check_bugs32();
#ifdef CONFIG_64BIT
check_bugs64();
#endif
}
static inline int r4k_daddiu_bug(void)
{
#ifdef CONFIG_64BIT
WARN_ON(daddiu_bug < 0);
return daddiu_bug != 0;
#else
return 0;
#endif
}
#endif /* _ASM_BUGS_H */

View file

@ -0,0 +1,22 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 98, 99, 2000, 2003 Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
#include <kmalloc.h>
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_CACHE_H */

View file

@ -0,0 +1,147 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
#include <asm/cpu-features.h>
/* Cache flushing:
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
*
* MIPS specific flush operations:
*
* - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
*/
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
extern void __flush_dcache_page(struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
__flush_dcache_page(page);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page(struct page *, unsigned long);
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (cpu_has_dc_aliases && PageAnon(page))
__flush_anon_page(page, vmaddr);
}
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
extern void (*__flush_cache_vmap)(void);
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
if (cpu_has_dc_aliases)
__flush_cache_vmap();
}
extern void (*__flush_cache_vunmap)(void);
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (cpu_has_dc_aliases)
__flush_cache_vunmap();
}
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
#define Page_dcache_dirty(page) \
test_bit(PG_dcache_dirty, &(page)->flags)
#define SetPageDcacheDirty(page) \
set_bit(PG_dcache_dirty, &(page)->flags)
#define ClearPageDcacheDirty(page) \
clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */
unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);
extern void *kmap_noncoherent(struct page *page, unsigned long addr);
static inline void kunmap_noncoherent(void)
{
kunmap_coherent();
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
}
/*
* For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
* cache writeback and invalidate operation.
*/
extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
if (cpu_has_dc_aliases)
__flush_kernel_vmap_range((unsigned long) vaddr, size);
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
if (cpu_has_dc_aliases)
__flush_kernel_vmap_range((unsigned long) vaddr, size);
}
#endif /* _ASM_CACHEFLUSH_H */

View file

@ -0,0 +1,88 @@
/*
* Cache operations for the cache instruction.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle
* (C) Copyright 1999 Silicon Graphics, Inc.
*/
#ifndef __ASM_CACHEOPS_H
#define __ASM_CACHEOPS_H
/*
* Cache Operations available on all MIPS processors with R4000-style caches
*/
#define Index_Invalidate_I 0x00
#define Index_Writeback_Inv_D 0x01
#define Index_Load_Tag_I 0x04
#define Index_Load_Tag_D 0x05
#define Index_Store_Tag_I 0x08
#define Index_Store_Tag_D 0x09
#define Hit_Invalidate_I 0x10
#define Hit_Invalidate_D 0x11
#define Hit_Writeback_Inv_D 0x15
/*
* R4000-specific cacheops
*/
#define Create_Dirty_Excl_D 0x0d
#define Fill 0x14
#define Hit_Writeback_I 0x18
#define Hit_Writeback_D 0x19
/*
* R4000SC and R4400SC-specific cacheops
*/
#define Index_Invalidate_SI 0x02
#define Index_Writeback_Inv_SD 0x03
#define Index_Load_Tag_SI 0x06
#define Index_Load_Tag_SD 0x07
#define Index_Store_Tag_SI 0x0A
#define Index_Store_Tag_SD 0x0B
#define Create_Dirty_Excl_SD 0x0f
#define Hit_Invalidate_SI 0x12
#define Hit_Invalidate_SD 0x13
#define Hit_Writeback_Inv_SD 0x17
#define Hit_Writeback_SD 0x1b
#define Hit_Set_Virtual_SI 0x1e
#define Hit_Set_Virtual_SD 0x1f
/*
* R5000-specific cacheops
*/
#define R5K_Page_Invalidate_S 0x17
/*
* RM7000-specific cacheops
*/
#define Page_Invalidate_T 0x16
#define Index_Store_Tag_T 0x0a
#define Index_Load_Tag_T 0x06
/*
* R10000-specific cacheops
*
* Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
* Most of the _S cacheops are identical to the R4000SC _SD cacheops.
*/
#define Index_Writeback_Inv_S 0x03
#define Index_Load_Tag_S 0x07
#define Index_Store_Tag_S 0x0B
#define Hit_Invalidate_S 0x13
#define Cache_Barrier 0x14
#define Hit_Writeback_Inv_S 0x17
#define Index_Load_Data_I 0x18
#define Index_Load_Data_D 0x19
#define Index_Load_Data_S 0x1b
#define Index_Store_Data_I 0x1c
#define Index_Store_Data_D 0x1d
#define Index_Store_Data_S 0x1f
/*
* Loongson2-specific cacheops
*/
#define Hit_Invalidate_I_Loongson2 0x00
#endif /* __ASM_CACHEOPS_H */

View file

@ -0,0 +1,49 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Kevin D. Kissell
*/
/*
* Definitions used for common event timer implementation
* for MIPS 4K-type processors and their MIPS MT variants.
* Avoids unsightly extern declarations in C files.
*/
#ifndef __ASM_CEVT_R4K_H
#define __ASM_CEVT_R4K_H
#include <linux/clockchips.h>
#include <asm/time.h>
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
void mips_event_handler(struct clock_event_device *dev);
int c0_compare_int_usable(void);
void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
irqreturn_t c0_compare_interrupt(int, void *);
extern struct irqaction c0_compare_irqaction;
extern int cp0_timer_irq_installed;
/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (cp0_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
#endif /* __ASM_CEVT_R4K_H */

View file

@ -0,0 +1,290 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2001 Thiemo Seufer.
* Copyright (C) 2002 Maciej W. Rozycki
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
#include <linux/in6.h>
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
__wsum __csum_partial_copy_kernel(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
__wsum __csum_partial_copy_from_user(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
__wsum __csum_partial_copy_to_user(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
if (segment_eq(get_fs(), get_ds()))
return __csum_partial_copy_kernel((__force void *)src, dst,
len, sum, err_ptr);
else
return __csum_partial_copy_from_user((__force void *)src, dst,
len, sum, err_ptr);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_READ, src, len))
return csum_partial_copy_from_user(src, dst, len, sum,
err_ptr);
if (len)
*err_ptr = -EFAULT;
return sum;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
if (access_ok(VERIFY_WRITE, dst, len)) {
if (segment_eq(get_fs(), get_ds()))
return __csum_partial_copy_kernel(src,
(__force void *)dst,
len, sum, err_ptr);
else
return __csum_partial_copy_to_user(src,
(__force void *)dst,
len, sum, err_ptr);
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
" .set push # csum_fold\n"
" .set noat \n"
" sll $1, %0, 16 \n"
" addu %0, $1 \n"
" sltu $1, %0, $1 \n"
" srl %0, %0, 16 \n"
" addu %0, $1 \n"
" xori %0, 0xffff \n"
" .set pop"
: "=r" (sum)
: "0" (sum));
return (__force __sum16)sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
const unsigned int *word = iph;
const unsigned int *stop = word + ihl;
unsigned int csum;
int carry;
csum = word[0];
csum += word[1];
carry = (csum < word[1]);
csum += carry;
csum += word[2];
carry = (csum < word[2]);
csum += carry;
csum += word[3];
carry = (csum < word[3]);
csum += carry;
word += 4;
do {
csum += *word;
carry = (csum < *word);
csum += carry;
word++;
} while (word != stop);
return csum_fold(csum);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr,
__be32 daddr, unsigned short len, unsigned short proto,
__wsum sum)
{
__asm__(
" .set push # csum_tcpudp_nofold\n"
" .set noat \n"
#ifdef CONFIG_32BIT
" addu %0, %2 \n"
" sltu $1, %0, %2 \n"
" addu %0, $1 \n"
" addu %0, %3 \n"
" sltu $1, %0, %3 \n"
" addu %0, $1 \n"
" addu %0, %4 \n"
" sltu $1, %0, %4 \n"
" addu %0, $1 \n"
#endif
#ifdef CONFIG_64BIT
" daddu %0, %2 \n"
" daddu %0, %3 \n"
" daddu %0, %4 \n"
" dsll32 $1, %0, 0 \n"
" daddu %0, $1 \n"
" dsra32 %0, %0, 0 \n"
#endif
" .set pop"
: "=r" (sum)
: "0" ((__force unsigned long)daddr),
"r" ((__force unsigned long)saddr),
#ifdef __MIPSEL__
"r" ((proto + len) << 8),
#else
"r" (proto + len),
#endif
"r" ((__force unsigned long)sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__(
" .set push # csum_ipv6_magic\n"
" .set noreorder \n"
" .set noat \n"
" addu %0, %5 # proto (long in network byte order)\n"
" sltu $1, %0, %5 \n"
" addu %0, $1 \n"
" addu %0, %6 # csum\n"
" sltu $1, %0, %6 \n"
" lw %1, 0(%2) # four words source address\n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 4(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 8(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 12(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 0(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 4(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 8(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 12(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" addu %0, $1 # Add final carry\n"
" .set pop"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
return csum_fold(sum);
}
#endif /* _ASM_CHECKSUM_H */

View file

@ -0,0 +1,27 @@
/*
* based on arch/arm/include/asm/clkdev.h
*
* Copyright (C) 2008 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __ASM_CLKDEV_H
#define __ASM_CLKDEV_H
#include <linux/slab.h>
#ifndef CONFIG_COMMON_CLK
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
#endif
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
#endif

View file

@ -0,0 +1,51 @@
#ifndef __ASM_MIPS_CLOCK_H
#define __ASM_MIPS_CLOCK_H
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
struct clk;
struct clk_ops {
void (*init) (struct clk *clk);
void (*enable) (struct clk *clk);
void (*disable) (struct clk *clk);
void (*recalc) (struct clk *clk);
int (*set_rate) (struct clk *clk, unsigned long rate, int algo_id);
long (*round_rate) (struct clk *clk, unsigned long rate);
};
struct clk {
struct list_head node;
const char *name;
int id;
struct module *owner;
struct clk *parent;
struct clk_ops *ops;
struct kref kref;
unsigned long rate;
unsigned long flags;
};
#define CLK_ALWAYS_ENABLED (1 << 0)
#define CLK_RATE_PROPAGATES (1 << 1)
/* Should be defined by processor-specific code */
void arch_init_clk_ops(struct clk_ops **, int type);
int clk_init(void);
int __clk_enable(struct clk *);
void __clk_disable(struct clk *);
void clk_recalc_rate(struct clk *);
int clk_register(struct clk *);
void clk_unregister(struct clk *);
#endif /* __ASM_MIPS_CLOCK_H */

View file

@ -0,0 +1,17 @@
#ifndef _ASM_CMP_H
#define _ASM_CMP_H
/*
* Definitions for CMP multitasking on MIPS cores
*/
struct task_struct;
extern void cmp_smp_setup(void);
extern void cmp_smp_finish(void);
extern void cmp_boot_secondary(int cpu, struct task_struct *t);
extern void cmp_init_secondary(void);
extern void cmp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */
extern void cmp_send_ipi(int cpu, unsigned int action);
#endif /* _ASM_CMP_H */

View file

@ -0,0 +1,246 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
#include <linux/irqflags.h>
#include <asm/war.h>
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set arch=r4000 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set arch=r4000 \n"
" sc %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
unsigned long flags;
raw_local_irq_save(flags);
retval = *m;
*m = val;
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_llsc_mb();
return retval;
}
#ifdef CONFIG_64BIT
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
__u64 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
unsigned long flags;
raw_local_irq_save(flags);
retval = *m;
*m = val;
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_llsc_mb();
return retval;
}
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
#endif
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return __xchg_u32(ptr, x);
case 8:
return __xchg_u64(ptr, x);
}
return x;
}
#define xchg(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
\
((__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
#define __HAVE_ARCH_CMPXCHG 1
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set arch=r4000 \n" \
" " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set arch=r4000 \n" \
" " st " $1, %1 \n" \
" beqz $1, 1b \n" \
" .set pop \n" \
"2: \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else { \
unsigned long __flags; \
\
raw_local_irq_save(__flags); \
__ret = *m; \
if (__ret == old) \
*m = new; \
raw_local_irq_restore(__flags); \
} \
\
__ret; \
})
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern void __cmpxchg_called_with_bad_pointer(void);
#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __res = 0; \
\
pre_barrier; \
\
switch (sizeof(*(__ptr))) { \
case 4: \
__res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
break; \
case 8: \
if (sizeof(long) == 8) { \
__res = __cmpxchg_asm("lld", "scd", __ptr, \
__old, __new); \
break; \
} \
default: \
__cmpxchg_called_with_bad_pointer(); \
break; \
} \
\
post_barrier; \
\
__res; \
})
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#endif /* __ASM_CMPXCHG_H */

View file

@ -0,0 +1,57 @@
#ifndef __ASM_COMPAT_SIGNAL_H
#define __ASM_COMPAT_SIGNAL_H
#include <linux/bug.h>
#include <linux/compat.h>
#include <linux/compiler.h>
#include <asm/signal.h>
#include <asm/siginfo.h>
#include <asm/uaccess.h>
static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
const sigset_t *s)
{
int err;
BUG_ON(sizeof(*d) != sizeof(*s));
BUG_ON(_NSIG_WORDS != 2);
err = __put_user(s->sig[0], &d->sig[0]);
err |= __put_user(s->sig[0] >> 32, &d->sig[1]);
err |= __put_user(s->sig[1], &d->sig[2]);
err |= __put_user(s->sig[1] >> 32, &d->sig[3]);
return err;
}
static inline int __copy_conv_sigset_from_user(sigset_t *d,
const compat_sigset_t __user *s)
{
int err;
union sigset_u {
sigset_t s;
compat_sigset_t c;
} *u = (union sigset_u *) d;
BUG_ON(sizeof(*d) != sizeof(*s));
BUG_ON(_NSIG_WORDS != 2);
#ifdef CONFIG_CPU_BIG_ENDIAN
err = __get_user(u->c.sig[1], &s->sig[0]);
err |= __get_user(u->c.sig[0], &s->sig[1]);
err |= __get_user(u->c.sig[3], &s->sig[2]);
err |= __get_user(u->c.sig[2], &s->sig[3]);
#endif
#ifdef CONFIG_CPU_LITTLE_ENDIAN
err = __get_user(u->c.sig[0], &s->sig[0]);
err |= __get_user(u->c.sig[1], &s->sig[1]);
err |= __get_user(u->c.sig[2], &s->sig[2]);
err |= __get_user(u->c.sig[3], &s->sig[3]);
#endif
return err;
}
#endif /* __ASM_COMPAT_SIGNAL_H */

View file

@ -0,0 +1,304 @@
#ifndef _ASM_COMPAT_H
#define _ASM_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/thread_info.h>
#include <linux/types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "mips\0\0\0"
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_suseconds_t;
typedef s32 compat_pid_t;
typedef s32 __compat_uid_t;
typedef s32 __compat_gid_t;
typedef __compat_uid_t __compat_uid32_t;
typedef __compat_gid_t __compat_gid32_t;
typedef u32 compat_mode_t;
typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u32 compat_nlink_t;
typedef s32 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef s32 compat_caddr_t;
typedef struct {
s32 val[2];
} compat_fsid_t;
typedef s32 compat_timer_t;
typedef s32 compat_key_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
s32 st_pad1[3];
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
s32 st_pad2[2];
compat_off_t st_size;
s32 st_pad3;
compat_time_t st_atime;
s32 st_atime_nsec;
compat_time_t st_mtime;
s32 st_mtime_nsec;
compat_time_t st_ctime;
s32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
s32 st_pad4[14];
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
s32 l_sysid;
compat_pid_t l_pid;
short __unused;
s32 pad[4];
};
#define F_GETLK64 33
#define F_SETLK64 34
#define F_SETLKW64 35
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
struct compat_statfs {
int f_type;
int f_bsize;
int f_frsize;
int f_blocks;
int f_bfree;
int f_files;
int f_ffree;
int f_bavail;
compat_fsid_t f_fsid;
int f_namelen;
int f_flags;
int f_spare[5];
};
#define COMPAT_RLIM_INFINITY 0x7fffffffUL
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
} compat_sigval_t;
#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
typedef struct compat_siginfo {
int si_signo;
int si_code;
int si_errno;
union {
int _pad[SI_PAD_SIZE32];
/* kill() */
struct {
compat_pid_t _pid; /* sender's pid */
__compat_uid_t _uid; /* sender's uid */
} _kill;
/* SIGCHLD */
struct {
compat_pid_t _pid; /* which child */
__compat_uid_t _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* IRIX SIGCHLD */
struct {
compat_pid_t _pid; /* which child */
compat_clock_t _utime;
int _status; /* exit code */
compat_clock_t _stime;
} _irix_sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
s32 _addr; /* faulting insn/memory ref. */
} _sigfault;
/* SIGPOLL, SIGXFSZ (To do ...) */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
/* POSIX.1b timers */
struct {
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval;/* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
compat_pid_t _pid; /* sender's pid */
__compat_uid_t _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
} _sifields;
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
/* cast to a __user pointer via "unsigned long" makes sparse happy */
return (void __user *)(unsigned long)(long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
return (void __user *) (regs->regs[29] - len);
}
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
compat_mode_t mode;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_time_t sem_ctime;
compat_ulong_t sem_nsems;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused1;
#endif
compat_time_t msg_stime;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused1;
#endif
#ifndef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused2;
#endif
compat_time_t msg_rtime;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused2;
#endif
#ifndef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused3;
#endif
compat_time_t msg_ctime;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
compat_ulong_t __unused3;
#endif
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_time_t shm_dtime;
compat_time_t shm_ctime;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
/* MIPS has unusual order of fields in stack_t */
typedef struct compat_sigaltstack {
compat_uptr_t ss_sp;
compat_size_t ss_size;
int ss_flags;
} compat_stack_t;
#define compat_sigaltstack compat_sigaltstack
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */

View file

@ -0,0 +1,19 @@
/*
* Copyright (C) 2004, 2007 Maciej W. Rozycki
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_COMPILER_H
#define _ASM_COMPILER_H
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
#define GCC_IMM_ASM() "n"
#define GCC_REG_ACCUM "$0"
#else
#define GCC_IMM_ASM() "rn"
#define GCC_REG_ACCUM "accum"
#endif
#endif /* _ASM_COMPILER_H */

View file

@ -0,0 +1,72 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#ifndef __ASM_COP2_H
#define __ASM_COP2_H
#include <linux/notifier.h>
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
extern void octeon_cop2_save(struct octeon_cop2_state *);
extern void octeon_cop2_restore(struct octeon_cop2_state *);
#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2)
#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2)
#define cop2_present 1
#define cop2_lazy_restore 1
#elif defined(CONFIG_CPU_XLP)
extern void nlm_cop2_save(struct nlm_cop2_state *);
extern void nlm_cop2_restore(struct nlm_cop2_state *);
#define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2)
#define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2)
#define cop2_present 1
#define cop2_lazy_restore 0
#elif defined(CONFIG_CPU_LOONGSON3)
#define cop2_present 1
#define cop2_lazy_restore 1
#define cop2_save(r) do { (void)(r); } while (0)
#define cop2_restore(r) do { (void)(r); } while (0)
#else
#define cop2_present 0
#define cop2_lazy_restore 0
#define cop2_save(r) do { (void)(r); } while (0)
#define cop2_restore(r) do { (void)(r); } while (0)
#endif
enum cu2_ops {
CU2_EXCEPTION,
CU2_LWC2_OP,
CU2_LDC2_OP,
CU2_SWC2_OP,
CU2_SDC2_OP,
};
extern int register_cu2_notifier(struct notifier_block *nb);
extern int cu2_notifier_call_chain(unsigned long val, void *v);
#define cu2_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
\
register_cu2_notifier(&fn##_nb); \
})
#endif /* __ASM_COP2_H */

View file

@ -0,0 +1,347 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 2004 Ralf Baechle
* Copyright (C) 2004 Maciej W. Rozycki
*/
#ifndef __ASM_CPU_FEATURES_H
#define __ASM_CPU_FEATURES_H
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <cpu-feature-overrides.h>
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
*/
#ifndef cpu_has_tlb
#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
#endif
#ifndef cpu_has_tlbinv
#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
#endif
#ifndef cpu_has_segments
#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
#endif
#ifndef cpu_has_eva
#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
#endif
#ifndef cpu_has_htw
#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW)
#endif
#ifndef cpu_has_rixiex
#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX)
#endif
#ifndef cpu_has_maar
#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
#endif
/*
* For the moment we don't consider R6000 and R8000 so we can assume that
* anything that doesn't support R4000-style exceptions and interrupts is
* R3000-like. Users should still treat these two macro definitions as
* opaque.
*/
#ifndef cpu_has_3kex
#define cpu_has_3kex (!cpu_has_4kex)
#endif
#ifndef cpu_has_4kex
#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
#endif
#ifndef cpu_has_3k_cache
#define cpu_has_3k_cache (cpu_data[0].options & MIPS_CPU_3K_CACHE)
#endif
#define cpu_has_6k_cache 0
#define cpu_has_8k_cache 0
#ifndef cpu_has_4k_cache
#define cpu_has_4k_cache (cpu_data[0].options & MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_tx39_cache
#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#endif
#ifndef cpu_has_fpu
#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
#else
#define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
#endif
#ifndef cpu_has_counter
#define cpu_has_counter (cpu_data[0].options & MIPS_CPU_COUNTER)
#endif
#ifndef cpu_has_watch
#define cpu_has_watch (cpu_data[0].options & MIPS_CPU_WATCH)
#endif
#ifndef cpu_has_divec
#define cpu_has_divec (cpu_data[0].options & MIPS_CPU_DIVEC)
#endif
#ifndef cpu_has_vce
#define cpu_has_vce (cpu_data[0].options & MIPS_CPU_VCE)
#endif
#ifndef cpu_has_cache_cdex_p
#define cpu_has_cache_cdex_p (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_P)
#endif
#ifndef cpu_has_cache_cdex_s
#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
#endif
#ifndef cpu_has_mcheck
#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
#endif
#ifndef cpu_has_ejtag
#define cpu_has_ejtag (cpu_data[0].options & MIPS_CPU_EJTAG)
#endif
#ifndef cpu_has_llsc
#define cpu_has_llsc (cpu_data[0].options & MIPS_CPU_LLSC)
#endif
#ifndef kernel_uses_llsc
#define kernel_uses_llsc cpu_has_llsc
#endif
#ifndef cpu_has_mips16
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mdmx
#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
#endif
#ifndef cpu_has_rixi
# ifdef CONFIG_64BIT
# define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
# else /* CONFIG_32BIT */
# define cpu_has_rixi ((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits)
# endif
#endif
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
# else
# define cpu_has_mmips 0
# endif
#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
#ifndef cpu_has_dc_aliases
#define cpu_has_dc_aliases (cpu_data[0].dcache.flags & MIPS_CACHE_ALIASES)
#endif
#ifndef cpu_has_ic_fills_f_dc
#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
#endif
#ifndef cpu_has_pindexed_dcache
#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
#ifndef cpu_has_local_ebase
#define cpu_has_local_ebase 1
#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
* such as the R10000 have I-Caches that snoop local stores; the embedded ones
* don't. For maintaining I-cache coherency this means we need to flush the
* D-cache all the way back to whever the I-cache does refills from, so the
* I-cache has a chance to see the new data at all. Then we have to flush the
* I-cache also.
* Note we may have been rescheduled and may no longer be running on the CPU
* that did the store so we can't optimize this into only doing the flush on
* the local CPU.
*/
#ifndef cpu_icache_snoops_remote_store
#ifdef CONFIG_SMP
#define cpu_icache_snoops_remote_store (cpu_data[0].icache.flags & MIPS_IC_SNOOPS_REMOTE)
#else
#define cpu_icache_snoops_remote_store 1
#endif
#endif
#ifndef cpu_has_mips_2
# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
#endif
#ifndef cpu_has_mips_3
# define cpu_has_mips_3 (cpu_data[0].isa_level & MIPS_CPU_ISA_III)
#endif
#ifndef cpu_has_mips_4
# define cpu_has_mips_4 (cpu_data[0].isa_level & MIPS_CPU_ISA_IV)
#endif
#ifndef cpu_has_mips_5
# define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V)
#endif
#ifndef cpu_has_mips32r1
# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
#endif
#ifndef cpu_has_mips32r2
# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
#endif
#ifndef cpu_has_mips64r1
# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
#endif
#ifndef cpu_has_mips64r2
# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
#endif
/*
* Shortcuts ...
*/
#define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5)
#define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5)
#define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5)
#define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r)
#define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r)
#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2)
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
cpu_has_mips64r1 | cpu_has_mips64r2)
#ifndef cpu_has_mips_r2_exec_hazard
#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
#endif
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
* pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
#ifndef cpu_has_clo_clz
#define cpu_has_clo_clz cpu_has_mips_r
#endif
/*
* MIPS32 R2, MIPS64 R2, Loongson 3A and Octeon have WSBH.
* MIPS64 R2, Loongson 3A and Octeon have WSBH, DSBH and DSHD.
* This indicates the availability of WSBH and in case of 64 bit CPUs also
* DSBH and DSHD.
*/
#ifndef cpu_has_wsbh
#define cpu_has_wsbh cpu_has_mips_r2
#endif
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif
#ifndef cpu_has_dsp2
#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
#endif
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_userlocal
#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
#endif
#ifdef CONFIG_32BIT
# ifndef cpu_has_nofpuex
# define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX)
# endif
# ifndef cpu_has_64bits
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_zero_reg
# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_gp_regs
# define cpu_has_64bit_gp_regs 0
# endif
# ifndef cpu_has_64bit_addresses
# define cpu_has_64bit_addresses 0
# endif
# ifndef cpu_vmbits
# define cpu_vmbits 31
# endif
#endif
#ifdef CONFIG_64BIT
# ifndef cpu_has_nofpuex
# define cpu_has_nofpuex 0
# endif
# ifndef cpu_has_64bits
# define cpu_has_64bits 1
# endif
# ifndef cpu_has_64bit_zero_reg
# define cpu_has_64bit_zero_reg 1
# endif
# ifndef cpu_has_64bit_gp_regs
# define cpu_has_64bit_gp_regs 1
# endif
# ifndef cpu_has_64bit_addresses
# define cpu_has_64bit_addresses 1
# endif
# ifndef cpu_vmbits
# define cpu_vmbits cpu_data[0].vmbits
# define __NEED_VMBITS_PROBE
# endif
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint)
# define cpu_has_vint (cpu_data[0].options & MIPS_CPU_VINT)
#elif !defined(cpu_has_vint)
# define cpu_has_vint 0
#endif
#if defined(CONFIG_CPU_MIPSR2_IRQ_EI) && !defined(cpu_has_veic)
# define cpu_has_veic (cpu_data[0].options & MIPS_CPU_VEIC)
#elif !defined(cpu_has_veic)
# define cpu_has_veic 0
#endif
#ifndef cpu_has_inclusive_pcaches
#define cpu_has_inclusive_pcaches (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
#endif
#ifndef cpu_dcache_line_size
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#endif
#ifndef cpu_icache_line_size
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#endif
#ifndef cpu_scache_line_size
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#endif
#ifndef cpu_hwrena_impl_bits
#define cpu_hwrena_impl_bits 0
#endif
#ifndef cpu_has_perf_cntr_intr_bit
#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
#endif
#ifndef cpu_has_vz
#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
#endif
#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
#elif !defined(cpu_has_msa)
# define cpu_has_msa 0
#endif
#endif /* __ASM_CPU_FEATURES_H */

View file

@ -0,0 +1,132 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 Waldorf GMBH
* Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Maciej W. Rozycki
*/
#ifndef __ASM_CPU_INFO_H
#define __ASM_CPU_INFO_H
#include <linux/types.h>
#include <asm/cache.h>
/*
* Descriptor for a cache
*/
struct cache_desc {
unsigned int waysize; /* Bytes per way */
unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */
};
/*
* Flag definitions
*/
#define MIPS_CACHE_NOT_PRESENT 0x00000001
#define MIPS_CACHE_VTAG 0x00000002 /* Virtually tagged cache */
#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */
#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
struct cpuinfo_mips {
unsigned long asid_cache;
/*
* Capability and feature descriptor structure for MIPS CPU
*/
unsigned long ases;
unsigned long long options;
unsigned int udelay_val;
unsigned int processor_id;
unsigned int fpu_id;
unsigned int msa_id;
unsigned int cputype;
int isa_level;
int tlbsize;
int tlbsizevtlb;
int tlbsizeftlbsets;
int tlbsizeftlbways;
struct cache_desc icache; /* Primary I-cache */
struct cache_desc dcache; /* Primary D or combined I/D cache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int srsets; /* Shadow register sets */
int package;/* physical package number */
int core; /* physical core number */
#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
#endif
#ifdef CONFIG_MIPS_MT_SMP
/*
* There is not necessarily a 1:1 mapping of VPE num to CPU number
* in particular on multi-core systems.
*/
int vpe_id; /* Virtual Processor number */
#endif
void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
#define NUM_WATCH_REGS 4
u16 watch_reg_masks[NUM_WATCH_REGS];
unsigned int kscratch_mask; /* Usable KScratch mask. */
/*
* Cache Coherency attribute for write-combine memory writes.
* (shifted by _CACHE_SHIFT)
*/
unsigned int writecombine;
/*
* Simple counter to prevent enabling HTW in nested
* htw_start/htw_stop calls
*/
unsigned int htw_seq;
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
#define boot_cpu_data cpu_data[0]
extern void cpu_probe(void);
extern void cpu_report(void);
extern const char *__cpu_name[];
#define cpu_name_string() __cpu_name[smp_processor_id()]
struct seq_file;
struct notifier_block;
extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
#define proc_cpuinfo_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
\
register_proc_cpuinfo_notifier(&fn##_nb); \
})
struct proc_cpuinfo_notifier_args {
struct seq_file *m;
unsigned long n;
};
#ifdef CONFIG_MIPS_MT_SMP
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else
# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; })
#endif
#endif /* __ASM_CPU_INFO_H */

View file

@ -0,0 +1,216 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 2004 Ralf Baechle
* Copyright (C) 2004 Maciej W. Rozycki
*/
#ifndef __ASM_CPU_TYPE_H
#define __ASM_CPU_TYPE_H
#include <linux/smp.h>
#include <linux/compiler.h>
static inline int __pure __get_cpu_type(const int cpu_type)
{
switch (cpu_type) {
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
case CPU_LOONGSON2:
#endif
#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
case CPU_LOONGSON3:
#endif
#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
case CPU_LOONGSON1:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
case CPU_4KC:
case CPU_ALCHEMY:
case CPU_PR4450:
case CPU_JZRISC:
#endif
#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \
defined(CONFIG_SYS_HAS_CPU_MIPS32_R2)
case CPU_4KEC:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2
case CPU_4KSC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
case CPU_74K:
case CPU_M14KC:
case CPU_M14KEC:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_M5150:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
case CPU_5KC:
case CPU_5KE:
case CPU_20KC:
case CPU_25KF:
case CPU_SB1:
case CPU_SB1A:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2
/*
* All MIPS64 R2 processors have their own special symbols. That is,
* there currently is no pure R2 core
*/
#endif
#ifdef CONFIG_SYS_HAS_CPU_R3000
case CPU_R2000:
case CPU_R3000:
case CPU_R3000A:
case CPU_R3041:
case CPU_R3051:
case CPU_R3052:
case CPU_R3081:
case CPU_R3081E:
#endif
#ifdef CONFIG_SYS_HAS_CPU_TX39XX
case CPU_TX3912:
case CPU_TX3922:
case CPU_TX3927:
#endif
#ifdef CONFIG_SYS_HAS_CPU_VR41XX
case CPU_VR41XX:
case CPU_VR4111:
case CPU_VR4121:
case CPU_VR4122:
case CPU_VR4131:
case CPU_VR4133:
case CPU_VR4181:
case CPU_VR4181A:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R4300
case CPU_R4300:
case CPU_R4310:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R4X00
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4200:
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
case CPU_R4600:
case CPU_R4700:
case CPU_R4640:
case CPU_R4650:
#endif
#ifdef CONFIG_SYS_HAS_CPU_TX49XX
case CPU_TX49XX:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R5000
case CPU_R5000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R5432
case CPU_R5432:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R5500
case CPU_R5500:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R6000
case CPU_R6000:
case CPU_R6000A:
#endif
#ifdef CONFIG_SYS_HAS_CPU_NEVADA
case CPU_NEVADA:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R8000
case CPU_R8000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R10000
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_RM7000
case CPU_RM7000:
case CPU_SR71000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_SB1
case CPU_SB1:
case CPU_SB1A:
#endif
#ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
#endif
#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
defined (CONFIG_SYS_HAS_CPU_MIPS32_R1)
case CPU_BMIPS32:
case CPU_BMIPS3300:
#endif
#ifdef CONFIG_SYS_HAS_CPU_BMIPS4350
case CPU_BMIPS4350:
#endif
#ifdef CONFIG_SYS_HAS_CPU_BMIPS4380
case CPU_BMIPS4380:
#endif
#ifdef CONFIG_SYS_HAS_CPU_BMIPS5000
case CPU_BMIPS5000:
#endif
#ifdef CONFIG_SYS_HAS_CPU_XLP
case CPU_XLP:
#endif
#ifdef CONFIG_SYS_HAS_CPU_XLR
case CPU_XLR:
#endif
break;
default:
unreachable();
}
return cpu_type;
}
static inline int __pure current_cpu_type(void)
{
const int cpu_type = current_cpu_data.cputype;
return __get_cpu_type(cpu_type);
}
static inline int __pure boot_cpu_type(void)
{
const int cpu_type = cpu_data[0].cputype;
return __get_cpu_type(cpu_type);
}
#endif /* __ASM_CPU_TYPE_H */

385
arch/mips/include/asm/cpu.h Normal file
View file

@ -0,0 +1,385 @@
/*
* cpu.h: Values of the PRId register used to match up
* various MIPS cpu types.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2004, 2013 Maciej W. Rozycki
*/
#ifndef _ASM_CPU_H
#define _ASM_CPU_H
/*
As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0
register 15, select 0) is defined in this (backwards compatible) way:
+----------------+----------------+----------------+----------------+
| Company Options| Company ID | Processor ID | Revision |
+----------------+----------------+----------------+----------------+
31 24 23 16 15 8 7
I don't have docs for all the previous processors, but my impression is
that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
spec.
*/
#define PRID_OPT_MASK 0xff000000
/*
* Assigned Company values for bits 23:16 of the PRId register.
*/
#define PRID_COMP_MASK 0xff0000
#define PRID_COMP_LEGACY 0x000000
#define PRID_COMP_MIPS 0x010000
#define PRID_COMP_BROADCOM 0x020000
#define PRID_COMP_ALCHEMY 0x030000
#define PRID_COMP_SIBYTE 0x040000
#define PRID_COMP_SANDCRAFT 0x050000
#define PRID_COMP_NXP 0x060000
#define PRID_COMP_TOSHIBA 0x070000
#define PRID_COMP_LSI 0x080000
#define PRID_COMP_LEXRA 0x0b0000
#define PRID_COMP_NETLOGIC 0x0c0000
#define PRID_COMP_CAVIUM 0x0d0000
#define PRID_COMP_INGENIC 0xd00000
/*
* Assigned Processor ID (implementation) values for bits 15:8 of the PRId
* register. In order to detect a certain CPU type exactly eventually
* additional registers may need to be examined.
*/
#define PRID_IMP_MASK 0xff00
/*
* These are valid when 23:16 == PRID_COMP_LEGACY
*/
#define PRID_IMP_R2000 0x0100
#define PRID_IMP_AU1_REV1 0x0100
#define PRID_IMP_AU1_REV2 0x0200
#define PRID_IMP_R3000 0x0200 /* Same as R2000A */
#define PRID_IMP_R6000 0x0300 /* Same as R3000A */
#define PRID_IMP_R4000 0x0400
#define PRID_IMP_R6000A 0x0600
#define PRID_IMP_R10000 0x0900
#define PRID_IMP_R4300 0x0b00
#define PRID_IMP_VR41XX 0x0c00
#define PRID_IMP_R12000 0x0e00
#define PRID_IMP_R14000 0x0f00
#define PRID_IMP_R8000 0x1000
#define PRID_IMP_PR4450 0x1200
#define PRID_IMP_R4600 0x2000
#define PRID_IMP_R4700 0x2100
#define PRID_IMP_TX39 0x2200
#define PRID_IMP_R4640 0x2200
#define PRID_IMP_R4650 0x2200 /* Same as R4640 */
#define PRID_IMP_R5000 0x2300
#define PRID_IMP_TX49 0x2d00
#define PRID_IMP_SONIC 0x2400
#define PRID_IMP_MAGIC 0x2500
#define PRID_IMP_RM7000 0x2700
#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
#define PRID_IMP_RM9000 0x3400
#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
#define PRID_IMP_R5432 0x5400
#define PRID_IMP_R5500 0x5500
#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
#define PRID_IMP_UNKNOWN 0xff00
/*
* These are the PRID's for when 23:16 == PRID_COMP_MIPS
*/
#define PRID_IMP_4KC 0x8000
#define PRID_IMP_5KC 0x8100
#define PRID_IMP_20KC 0x8200
#define PRID_IMP_4KEC 0x8400
#define PRID_IMP_4KSC 0x8600
#define PRID_IMP_25KF 0x8800
#define PRID_IMP_5KE 0x8900
#define PRID_IMP_4KECR2 0x9000
#define PRID_IMP_4KEMPR2 0x9100
#define PRID_IMP_4KSD 0x9200
#define PRID_IMP_24K 0x9300
#define PRID_IMP_34K 0x9500
#define PRID_IMP_24KE 0x9600
#define PRID_IMP_74K 0x9700
#define PRID_IMP_1004K 0x9900
#define PRID_IMP_1074K 0x9a00
#define PRID_IMP_M14KC 0x9c00
#define PRID_IMP_M14KEC 0x9e00
#define PRID_IMP_INTERAPTIV_UP 0xa000
#define PRID_IMP_INTERAPTIV_MP 0xa100
#define PRID_IMP_PROAPTIV_UP 0xa200
#define PRID_IMP_PROAPTIV_MP 0xa300
#define PRID_IMP_M5150 0xa700
#define PRID_IMP_P5600 0xa800
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
*/
#define PRID_IMP_SB1 0x0100
#define PRID_IMP_SB1A 0x1100
/*
* These are the PRID's for when 23:16 == PRID_COMP_SANDCRAFT
*/
#define PRID_IMP_SR71000 0x0400
/*
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
*/
#define PRID_IMP_BMIPS32_REV4 0x4000
#define PRID_IMP_BMIPS32_REV8 0x8000
#define PRID_IMP_BMIPS3300 0x9000
#define PRID_IMP_BMIPS3300_ALT 0x9100
#define PRID_IMP_BMIPS3300_BUG 0x0000
#define PRID_IMP_BMIPS43XX 0xa000
#define PRID_IMP_BMIPS5000 0x5a00
#define PRID_REV_BMIPS4380_LO 0x0040
#define PRID_REV_BMIPS4380_HI 0x006f
/*
* These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
*/
#define PRID_IMP_CAVIUM_CN38XX 0x0000
#define PRID_IMP_CAVIUM_CN31XX 0x0100
#define PRID_IMP_CAVIUM_CN30XX 0x0200
#define PRID_IMP_CAVIUM_CN58XX 0x0300
#define PRID_IMP_CAVIUM_CN56XX 0x0400
#define PRID_IMP_CAVIUM_CN50XX 0x0600
#define PRID_IMP_CAVIUM_CN52XX 0x0700
#define PRID_IMP_CAVIUM_CN63XX 0x9000
#define PRID_IMP_CAVIUM_CN68XX 0x9100
#define PRID_IMP_CAVIUM_CN66XX 0x9200
#define PRID_IMP_CAVIUM_CN61XX 0x9300
#define PRID_IMP_CAVIUM_CNF71XX 0x9400
#define PRID_IMP_CAVIUM_CN78XX 0x9500
#define PRID_IMP_CAVIUM_CN70XX 0x9600
/*
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC
*/
#define PRID_IMP_JZRISC 0x0200
/*
* These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
*/
#define PRID_IMP_NETLOGIC_XLR732 0x0000
#define PRID_IMP_NETLOGIC_XLR716 0x0200
#define PRID_IMP_NETLOGIC_XLR532 0x0900
#define PRID_IMP_NETLOGIC_XLR308 0x0600
#define PRID_IMP_NETLOGIC_XLR532C 0x0800
#define PRID_IMP_NETLOGIC_XLR516C 0x0a00
#define PRID_IMP_NETLOGIC_XLR508C 0x0b00
#define PRID_IMP_NETLOGIC_XLR308C 0x0f00
#define PRID_IMP_NETLOGIC_XLS608 0x8000
#define PRID_IMP_NETLOGIC_XLS408 0x8800
#define PRID_IMP_NETLOGIC_XLS404 0x8c00
#define PRID_IMP_NETLOGIC_XLS208 0x8e00
#define PRID_IMP_NETLOGIC_XLS204 0x8f00
#define PRID_IMP_NETLOGIC_XLS108 0xce00
#define PRID_IMP_NETLOGIC_XLS104 0xcf00
#define PRID_IMP_NETLOGIC_XLS616B 0x4000
#define PRID_IMP_NETLOGIC_XLS608B 0x4a00
#define PRID_IMP_NETLOGIC_XLS416B 0x4400
#define PRID_IMP_NETLOGIC_XLS412B 0x4c00
#define PRID_IMP_NETLOGIC_XLS408B 0x4e00
#define PRID_IMP_NETLOGIC_XLS404B 0x4f00
#define PRID_IMP_NETLOGIC_AU13XX 0x8000
#define PRID_IMP_NETLOGIC_XLP8XX 0x1000
#define PRID_IMP_NETLOGIC_XLP3XX 0x1100
#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
#define PRID_IMP_NETLOGIC_XLP9XX 0x1500
#define PRID_IMP_NETLOGIC_XLP5XX 0x1300
/*
* Particular Revision values for bits 7:0 of the PRId register.
*/
#define PRID_REV_MASK 0x00ff
/*
* Definitions for 7:0 on legacy processors
*/
#define PRID_REV_TX4927 0x0022
#define PRID_REV_TX4937 0x0030
#define PRID_REV_R4400 0x0040
#define PRID_REV_R3000A 0x0030
#define PRID_REV_R3000 0x0020
#define PRID_REV_R2000A 0x0010
#define PRID_REV_TX3912 0x0010
#define PRID_REV_TX3922 0x0030
#define PRID_REV_TX3927 0x0040
#define PRID_REV_VR4111 0x0050
#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
#define PRID_REV_VR4121 0x0060
#define PRID_REV_VR4122 0x0070
#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
#define PRID_REV_VR4130 0x0080
#define PRID_REV_34K_V1_0_2 0x0022
#define PRID_REV_LOONGSON1B 0x0020
#define PRID_REV_LOONGSON2E 0x0002
#define PRID_REV_LOONGSON2F 0x0003
#define PRID_REV_LOONGSON3A 0x0005
#define PRID_REV_LOONGSON3B_R1 0x0006
#define PRID_REV_LOONGSON3B_R2 0x0007
/*
* Older processors used to encode processor version and revision in two
* 4-bit bitfields, the 4K seems to simply count up and even newer MTI cores
* have switched to use the 8-bits as 3:3:2 bitfield with the last field as
* the patch number. *ARGH*
*/
#define PRID_REV_ENCODE_44(ver, rev) \
((ver) << 4 | (rev))
#define PRID_REV_ENCODE_332(ver, rev, patch) \
((ver) << 5 | (rev) << 2 | (patch))
/*
* FPU implementation/revision register (CP1 control register 0).
*
* +---------------------------------+----------------+----------------+
* | 0 | Implementation | Revision |
* +---------------------------------+----------------+----------------+
* 31 16 15 8 7 0
*/
#define FPIR_IMP_MASK 0xff00
#define FPIR_IMP_NONE 0x0000
#if !defined(__ASSEMBLY__)
enum cpu_type_enum {
CPU_UNKNOWN,
/*
* R2000 class processors
*/
CPU_R2000, CPU_R3000, CPU_R3000A, CPU_R3041, CPU_R3051, CPU_R3052,
CPU_R3081, CPU_R3081E,
/*
* R6000 class processors
*/
CPU_R6000, CPU_R6000A,
/*
* R4000 class processors
*/
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_TX49XX,
/*
* R8000 class processors
*/
CPU_R8000,
/*
* TX3900 class processors
*/
CPU_TX3912, CPU_TX3922, CPU_TX3927,
/*
* MIPS32 class processors
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
/*
* MIPS64 class processors
*/
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
CPU_LAST
};
#endif /* !__ASSEMBLY */
/*
* ISA Level encodings
*
*/
#define MIPS_CPU_ISA_II 0x00000001
#define MIPS_CPU_ISA_III 0x00000002
#define MIPS_CPU_ISA_IV 0x00000004
#define MIPS_CPU_ISA_V 0x00000008
#define MIPS_CPU_ISA_M32R1 0x00000010
#define MIPS_CPU_ISA_M32R2 0x00000020
#define MIPS_CPU_ISA_M64R1 0x00000040
#define MIPS_CPU_ISA_M64R2 0x00000080
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
MIPS_CPU_ISA_M32R2)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
/*
* CPU Option encodings
*/
#define MIPS_CPU_TLB 0x00000001ull /* CPU has TLB */
#define MIPS_CPU_4KEX 0x00000002ull /* "R4K" exception model */
#define MIPS_CPU_3K_CACHE 0x00000004ull /* R3000-style caches */
#define MIPS_CPU_4K_CACHE 0x00000008ull /* R4000-style caches */
#define MIPS_CPU_TX39_CACHE 0x00000010ull /* TX3900-style caches */
#define MIPS_CPU_FPU 0x00000020ull /* CPU has FPU */
#define MIPS_CPU_32FPR 0x00000040ull /* 32 dbl. prec. FP registers */
#define MIPS_CPU_COUNTER 0x00000080ull /* Cycle count/compare */
#define MIPS_CPU_WATCH 0x00000100ull /* watchpoint registers */
#define MIPS_CPU_DIVEC 0x00000200ull /* dedicated interrupt vector */
#define MIPS_CPU_VCE 0x00000400ull /* virt. coherence conflict possible */
#define MIPS_CPU_CACHE_CDEX_P 0x00000800ull /* Create_Dirty_Exclusive CACHE op */
#define MIPS_CPU_CACHE_CDEX_S 0x00001000ull /* ... same for seconary cache ... */
#define MIPS_CPU_MCHECK 0x00002000ull /* Machine check exception */
#define MIPS_CPU_EJTAG 0x00004000ull /* EJTAG exception */
#define MIPS_CPU_NOFPUEX 0x00008000ull /* no FPU exception */
#define MIPS_CPU_LLSC 0x00010000ull /* CPU has ll/sc instructions */
#define MIPS_CPU_INCLUSIVE_CACHES 0x00020000ull /* P-cache subset enforced */
#define MIPS_CPU_PREFETCH 0x00040000ull /* CPU has usable prefetch */
#define MIPS_CPU_VINT 0x00080000ull /* CPU supports MIPSR2 vectored interrupts */
#define MIPS_CPU_VEIC 0x00100000ull /* CPU supports MIPSR2 external interrupt controller mode */
#define MIPS_CPU_ULRI 0x00200000ull /* CPU has ULRI feature */
#define MIPS_CPU_PCI 0x00400000ull /* CPU has Perf Ctr Int indicator */
#define MIPS_CPU_RIXI 0x00800000ull /* CPU has TLB Read/eXec Inhibit */
#define MIPS_CPU_MICROMIPS 0x01000000ull /* CPU has microMIPS capability */
#define MIPS_CPU_TLBINV 0x02000000ull /* CPU supports TLBINV/F */
#define MIPS_CPU_SEGMENTS 0x04000000ull /* CPU supports Segmentation Control registers */
#define MIPS_CPU_EVA 0x80000000ull /* CPU supports Enhanced Virtual Addressing */
#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */
#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
/*
* CPU ASE encodings
*/
#define MIPS_ASE_MIPS16 0x00000001 /* code compression */
#define MIPS_ASE_MDMX 0x00000002 /* MIPS digital media extension */
#define MIPS_ASE_MIPS3D 0x00000004 /* MIPS-3D */
#define MIPS_ASE_SMARTMIPS 0x00000008 /* SmartMIPS */
#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */
#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#endif /* _ASM_CPU_H */

View file

@ -0,0 +1,48 @@
/*
* Debug macros for run-time debugging.
* Turned on/off with CONFIG_RUNTIME_DEBUG option.
*
* Copyright (C) 2001 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef _ASM_DEBUG_H
#define _ASM_DEBUG_H
/*
* run-time macros for catching spurious errors. Eable CONFIG_RUNTIME_DEBUG in
* kernel hacking config menu to use them.
*
* Use them as run-time debugging aid. NEVER USE THEM AS ERROR HANDLING CODE!!!
*/
#ifdef CONFIG_RUNTIME_DEBUG
#include <linux/kernel.h>
#define db_assert(x) if (!(x)) { \
panic("assertion failed at %s:%d: %s", __FILE__, __LINE__, #x); }
#define db_warn(x) if (!(x)) { \
printk(KERN_WARNING "warning at %s:%d: %s", __FILE__, __LINE__, #x); }
#define db_verify(x, y) db_assert(x y)
#define db_verify_warn(x, y) db_warn(x y)
#define db_run(x) do { x; } while (0)
#else
#define db_assert(x)
#define db_warn(x)
#define db_verify(x, y) x
#define db_verify_warn(x, y) x
#define db_run(x)
#endif
#endif /* _ASM_DEBUG_H */

View file

@ -0,0 +1,55 @@
/*
* include/asm-mips/dec/ecc.h
*
* ECC handling logic definitions common to DECstation/DECsystem
* 5000/200 (KN02), 5000/240 (KN03), 5000/260 (KN05) and
* DECsystem 5900 (KN03), 5900/260 (KN05) systems.
*
* Copyright (C) 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_MIPS_DEC_ECC_H
#define __ASM_MIPS_DEC_ECC_H
/*
* Error Address Register bits.
* The register is r/wc -- any write clears it.
*/
#define KN0X_EAR_VALID (1<<31) /* error data valid, bus IRQ */
#define KN0X_EAR_CPU (1<<30) /* CPU/DMA transaction */
#define KN0X_EAR_WRITE (1<<29) /* write/read transaction */
#define KN0X_EAR_ECCERR (1<<28) /* ECC/timeout or overrun */
#define KN0X_EAR_RES_27 (1<<27) /* unused */
#define KN0X_EAR_ADDRESS (0x7ffffff<<0) /* address involved */
/*
* Error Syndrome Register bits.
* The register is frozen when EAR.VALID is set, otherwise it records bits
* from the last memory read. The register is r/wc -- any write clears it.
*/
#define KN0X_ESR_VLDHI (1<<31) /* error data valid hi word */
#define KN0X_ESR_CHKHI (0x7f<<24) /* check bits read from mem */
#define KN0X_ESR_SNGHI (1<<23) /* single/double bit error */
#define KN0X_ESR_SYNHI (0x7f<<16) /* syndrome from ECC logic */
#define KN0X_ESR_VLDLO (1<<15) /* error data valid lo word */
#define KN0X_ESR_CHKLO (0x7f<<8) /* check bits read from mem */
#define KN0X_ESR_SNGLO (1<<7) /* single/double bit error */
#define KN0X_ESR_SYNLO (0x7f<<0) /* syndrome from ECC logic */
#ifndef __ASSEMBLY__
#include <linux/interrupt.h>
struct pt_regs;
extern void dec_ecc_be_init(void);
extern int dec_ecc_be_handler(struct pt_regs *regs, int is_fixup);
extern irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id);
#endif
#endif /* __ASM_MIPS_DEC_ECC_H */

View file

@ -0,0 +1,126 @@
/*
* Miscellaneous definitions used to initialise the interrupt vector table
* with the machine-specific interrupt routines.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997 by Paul M. Antoine.
* reworked 1998 by Harald Koerfgen.
* Copyright (C) 2001, 2002, 2003 Maciej W. Rozycki
*/
#ifndef __ASM_DEC_INTERRUPTS_H
#define __ASM_DEC_INTERRUPTS_H
#include <irq.h>
#include <asm/mipsregs.h>
/*
* The list of possible system devices which provide an
* interrupt. Not all devices exist on a given system.
*/
#define DEC_IRQ_CASCADE 0 /* cascade from CSR or I/O ASIC */
/* Ordinary interrupts */
#define DEC_IRQ_AB_RECV 1 /* ACCESS.bus receive */
#define DEC_IRQ_AB_XMIT 2 /* ACCESS.bus transmit */
#define DEC_IRQ_DZ11 3 /* DZ11 (DC7085) serial */
#define DEC_IRQ_ASC 4 /* ASC (NCR53C94) SCSI */
#define DEC_IRQ_FLOPPY 5 /* 82077 FDC */
#define DEC_IRQ_FPU 6 /* R3k FPU */
#define DEC_IRQ_HALT 7 /* HALT button or from ACCESS.Bus */
#define DEC_IRQ_ISDN 8 /* Am79C30A ISDN */
#define DEC_IRQ_LANCE 9 /* LANCE (Am7990) Ethernet */
#define DEC_IRQ_BUS 10 /* memory, I/O bus read/write errors */
#define DEC_IRQ_PSU 11 /* power supply unit warning */
#define DEC_IRQ_RTC 12 /* DS1287 RTC */
#define DEC_IRQ_SCC0 13 /* SCC (Z85C30) serial #0 */
#define DEC_IRQ_SCC1 14 /* SCC (Z85C30) serial #1 */
#define DEC_IRQ_SII 15 /* SII (DC7061) SCSI */
#define DEC_IRQ_TC0 16 /* TURBOchannel slot #0 */
#define DEC_IRQ_TC1 17 /* TURBOchannel slot #1 */
#define DEC_IRQ_TC2 18 /* TURBOchannel slot #2 */
#define DEC_IRQ_TIMER 19 /* ARC periodic timer */
#define DEC_IRQ_VIDEO 20 /* framebuffer */
/* I/O ASIC DMA interrupts */
#define DEC_IRQ_ASC_MERR 21 /* ASC memory read error */
#define DEC_IRQ_ASC_ERR 22 /* ASC page overrun */
#define DEC_IRQ_ASC_DMA 23 /* ASC buffer pointer loaded */
#define DEC_IRQ_FLOPPY_ERR 24 /* FDC error */
#define DEC_IRQ_ISDN_ERR 25 /* ISDN memory read/overrun error */
#define DEC_IRQ_ISDN_RXDMA 26 /* ISDN recv buffer pointer loaded */
#define DEC_IRQ_ISDN_TXDMA 27 /* ISDN xmit buffer pointer loaded */
#define DEC_IRQ_LANCE_MERR 28 /* LANCE memory read error */
#define DEC_IRQ_SCC0A_RXERR 29 /* SCC0A (printer) receive overrun */
#define DEC_IRQ_SCC0A_RXDMA 30 /* SCC0A receive half page */
#define DEC_IRQ_SCC0A_TXERR 31 /* SCC0A xmit memory read/overrun */
#define DEC_IRQ_SCC0A_TXDMA 32 /* SCC0A transmit page end */
#define DEC_IRQ_AB_RXERR 33 /* ACCESS.bus receive overrun */
#define DEC_IRQ_AB_RXDMA 34 /* ACCESS.bus receive half page */
#define DEC_IRQ_AB_TXERR 35 /* ACCESS.bus xmit memory read/ovrn */
#define DEC_IRQ_AB_TXDMA 36 /* ACCESS.bus transmit page end */
#define DEC_IRQ_SCC1A_RXERR 37 /* SCC1A (modem) receive overrun */
#define DEC_IRQ_SCC1A_RXDMA 38 /* SCC1A receive half page */
#define DEC_IRQ_SCC1A_TXERR 39 /* SCC1A xmit memory read/overrun */
#define DEC_IRQ_SCC1A_TXDMA 40 /* SCC1A transmit page end */
/* TC5 & TC6 are virtual slots for KN02's onboard devices */
#define DEC_IRQ_TC5 DEC_IRQ_ASC /* virtual PMAZ-AA */
#define DEC_IRQ_TC6 DEC_IRQ_LANCE /* virtual PMAD-AA */
#define DEC_NR_INTS 41
/* Largest of cpu mask_nr tables. */
#define DEC_MAX_CPU_INTS 6
/* Largest of asic mask_nr tables. */
#define DEC_MAX_ASIC_INTS 9
/*
* CPU interrupt bits common to all systems.
*/
#define DEC_CPU_INR_FPU 7 /* R3k FPU */
#define DEC_CPU_INR_SW1 1 /* software #1 */
#define DEC_CPU_INR_SW0 0 /* software #0 */
#define DEC_CPU_IRQ_BASE MIPS_CPU_IRQ_BASE /* first IRQ assigned to CPU */
#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE)
#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP))
#define DEC_CPU_IRQ_ALL (0xff << CAUSEB_IP)
#ifndef __ASSEMBLY__
/*
* Interrupt table structures to hide differences between systems.
*/
typedef union { int i; void *p; } int_ptr;
extern int dec_interrupt[DEC_NR_INTS];
extern int_ptr cpu_mask_nr_tbl[DEC_MAX_CPU_INTS][2];
extern int_ptr asic_mask_nr_tbl[DEC_MAX_ASIC_INTS][2];
extern int cpu_fpu_mask;
/*
* Common interrupt routine prototypes for all DECStations
*/
extern void kn02_io_int(void);
extern void kn02xa_io_int(void);
extern void kn03_io_int(void);
extern void asic_dma_int(void);
extern void asic_all_int(void);
extern void kn02_all_int(void);
extern void cpu_all_int(void);
extern void dec_intr_unimplemented(void);
extern void asic_intr_unimplemented(void);
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,38 @@
/*
* include/asm-mips/dec/ioasic.h
*
* DEC I/O ASIC access operations.
*
* Copyright (C) 2000, 2002, 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_DEC_IOASIC_H
#define __ASM_DEC_IOASIC_H
#include <linux/spinlock.h>
#include <linux/types.h>
extern spinlock_t ioasic_ssr_lock;
extern volatile u32 *ioasic_base;
static inline void ioasic_write(unsigned int reg, u32 v)
{
ioasic_base[reg / 4] = v;
}
static inline u32 ioasic_read(unsigned int reg)
{
return ioasic_base[reg / 4];
}
extern void init_ioasic_irqs(int base);
extern int dec_ioasic_clocksource_init(void);
#endif /* __ASM_DEC_IOASIC_H */

View file

@ -0,0 +1,152 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Definitions for the address map in the JUNKIO Asic
*
* Created with Information from:
*
* "DEC 3000 300/400/500/600/700/800/900 AXP Models System Programmer's Manual"
*
* and the Mach Sources
*
* Copyright (C) 199x the Anonymous
* Copyright (C) 2002, 2003 Maciej W. Rozycki
*/
#ifndef __ASM_MIPS_DEC_IOASIC_ADDRS_H
#define __ASM_MIPS_DEC_IOASIC_ADDRS_H
#define IOASIC_SLOT_SIZE 0x00040000
/*
* Address ranges decoded by the I/O ASIC for onboard devices.
*/
#define IOASIC_SYS_ROM (0*IOASIC_SLOT_SIZE) /* system board ROM */
#define IOASIC_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
#define IOASIC_VDAC_HI (5*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
#define IOASIC_VDAC_LO (7*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
#define IOASIC_ERRADDR (9*IOASIC_SLOT_SIZE) /* bus error address (3max+) */
#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
#define IOASIC_ACC_BUS (10*IOASIC_SLOT_SIZE) /* ACCESS.bus (maxine) */
#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
#define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE) /* FDC DMA (maxine) */
#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
#define IOASIC_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
/*
* Offsets for I/O ASIC registers
* (relative to (dec_kn_slot_base + IOASIC_IOCTL)).
*/
/* all systems */
#define IO_REG_SCSI_DMA_P 0x00 /* SCSI DMA Pointer */
#define IO_REG_SCSI_DMA_BP 0x10 /* SCSI DMA Buffer Pointer */
#define IO_REG_LANCE_DMA_P 0x20 /* LANCE DMA Pointer */
#define IO_REG_SCC0A_T_DMA_P 0x30 /* SCC0A Transmit DMA Pointer */
#define IO_REG_SCC0A_R_DMA_P 0x40 /* SCC0A Receive DMA Pointer */
/* except Maxine */
#define IO_REG_SCC1A_T_DMA_P 0x50 /* SCC1A Transmit DMA Pointer */
#define IO_REG_SCC1A_R_DMA_P 0x60 /* SCC1A Receive DMA Pointer */
/* Maxine */
#define IO_REG_AB_T_DMA_P 0x50 /* ACCESS.bus Transmit DMA Pointer */
#define IO_REG_AB_R_DMA_P 0x60 /* ACCESS.bus Receive DMA Pointer */
#define IO_REG_FLOPPY_DMA_P 0x70 /* Floppy DMA Pointer */
#define IO_REG_ISDN_T_DMA_P 0x80 /* ISDN Transmit DMA Pointer */
#define IO_REG_ISDN_T_DMA_BP 0x90 /* ISDN Transmit DMA Buffer Pointer */
#define IO_REG_ISDN_R_DMA_P 0xa0 /* ISDN Receive DMA Pointer */
#define IO_REG_ISDN_R_DMA_BP 0xb0 /* ISDN Receive DMA Buffer Pointer */
/* all systems */
#define IO_REG_DATA_0 0xc0 /* System Data Buffer 0 */
#define IO_REG_DATA_1 0xd0 /* System Data Buffer 1 */
#define IO_REG_DATA_2 0xe0 /* System Data Buffer 2 */
#define IO_REG_DATA_3 0xf0 /* System Data Buffer 3 */
/* all systems */
#define IO_REG_SSR 0x100 /* System Support Register */
#define IO_REG_SIR 0x110 /* System Interrupt Register */
#define IO_REG_SIMR 0x120 /* System Interrupt Mask Reg. */
#define IO_REG_SAR 0x130 /* System Address Register */
/* Maxine */
#define IO_REG_ISDN_T_DATA 0x140 /* ISDN Xmit Data Register */
#define IO_REG_ISDN_R_DATA 0x150 /* ISDN Receive Data Register */
/* all systems */
#define IO_REG_LANCE_SLOT 0x160 /* LANCE I/O Slot Register */
#define IO_REG_SCSI_SLOT 0x170 /* SCSI Slot Register */
#define IO_REG_SCC0A_SLOT 0x180 /* SCC0A DMA Slot Register */
/* except Maxine */
#define IO_REG_SCC1A_SLOT 0x190 /* SCC1A DMA Slot Register */
/* Maxine */
#define IO_REG_AB_SLOT 0x190 /* ACCESS.bus DMA Slot Register */
#define IO_REG_FLOPPY_SLOT 0x1a0 /* Floppy Slot Register */
/* all systems */
#define IO_REG_SCSI_SCR 0x1b0 /* SCSI Partial-Word DMA Control */
#define IO_REG_SCSI_SDR0 0x1c0 /* SCSI DMA Partial Word 0 */
#define IO_REG_SCSI_SDR1 0x1d0 /* SCSI DMA Partial Word 1 */
#define IO_REG_FCTR 0x1e0 /* Free-Running Counter */
#define IO_REG_RES_31 0x1f0 /* unused */
/*
* The upper 16 bits of the System Support Register are a part of the
* I/O ASIC's internal DMA engine and thus are common to all I/O ASIC
* machines. The exception is the Maxine, which makes use of the
* FLOPPY and ISDN bits (otherwise unused) and has a different SCC
* wiring.
*/
/* all systems */
#define IO_SSR_SCC0A_TX_DMA_EN (1<<31) /* SCC0A transmit DMA enable */
#define IO_SSR_SCC0A_RX_DMA_EN (1<<30) /* SCC0A receive DMA enable */
#define IO_SSR_RES_27 (1<<27) /* unused */
#define IO_SSR_RES_26 (1<<26) /* unused */
#define IO_SSR_RES_25 (1<<25) /* unused */
#define IO_SSR_RES_24 (1<<24) /* unused */
#define IO_SSR_RES_23 (1<<23) /* unused */
#define IO_SSR_SCSI_DMA_DIR (1<<18) /* SCSI DMA direction */
#define IO_SSR_SCSI_DMA_EN (1<<17) /* SCSI DMA enable */
#define IO_SSR_LANCE_DMA_EN (1<<16) /* LANCE DMA enable */
/* except Maxine */
#define IO_SSR_SCC1A_TX_DMA_EN (1<<29) /* SCC1A transmit DMA enable */
#define IO_SSR_SCC1A_RX_DMA_EN (1<<28) /* SCC1A receive DMA enable */
#define IO_SSR_RES_22 (1<<22) /* unused */
#define IO_SSR_RES_21 (1<<21) /* unused */
#define IO_SSR_RES_20 (1<<20) /* unused */
#define IO_SSR_RES_19 (1<<19) /* unused */
/* Maxine */
#define IO_SSR_AB_TX_DMA_EN (1<<29) /* ACCESS.bus xmit DMA enable */
#define IO_SSR_AB_RX_DMA_EN (1<<28) /* ACCESS.bus recv DMA enable */
#define IO_SSR_FLOPPY_DMA_DIR (1<<22) /* Floppy DMA direction */
#define IO_SSR_FLOPPY_DMA_EN (1<<21) /* Floppy DMA enable */
#define IO_SSR_ISDN_TX_DMA_EN (1<<20) /* ISDN transmit DMA enable */
#define IO_SSR_ISDN_RX_DMA_EN (1<<19) /* ISDN receive DMA enable */
/*
* The lower 16 bits are system-specific. Bits 15,11:8 are common and
* defined here. The rest is defined in system-specific headers.
*/
#define KN0X_IO_SSR_DIAGDN (1<<15) /* diagnostic jumper */
#define KN0X_IO_SSR_SCC_RST (1<<11) /* ~SCC0,1 (Z85C30) reset */
#define KN0X_IO_SSR_RTC_RST (1<<10) /* ~RTC (DS1287) reset */
#define KN0X_IO_SSR_ASC_RST (1<<9) /* ~ASC (NCR53C94) reset */
#define KN0X_IO_SSR_LANCE_RST (1<<8) /* ~LANCE (Am7990) reset */
#endif /* __ASM_MIPS_DEC_IOASIC_ADDRS_H */

View file

@ -0,0 +1,74 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Definitions for the interrupt related bits in the I/O ASIC
* interrupt status register (and the interrupt mask register, of course)
*
* Created with Information from:
*
* "DEC 3000 300/400/500/600/700/800/900 AXP Models System Programmer's Manual"
*
* and the Mach Sources
*
* Copyright (C) 199x the Anonymous
* Copyright (C) 2002 Maciej W. Rozycki
*/
#ifndef __ASM_DEC_IOASIC_INTS_H
#define __ASM_DEC_IOASIC_INTS_H
/*
* The upper 16 bits are a part of the I/O ASIC's internal DMA engine
* and thus are common to all I/O ASIC machines. The exception is
* the Maxine, which makes use of the FLOPPY and ISDN bits (otherwise
* unused) and has a different SCC wiring.
*/
/* all systems */
#define IO_INR_SCC0A_TXDMA 31 /* SCC0A transmit page end */
#define IO_INR_SCC0A_TXERR 30 /* SCC0A transmit memory read error */
#define IO_INR_SCC0A_RXDMA 29 /* SCC0A receive half page */
#define IO_INR_SCC0A_RXERR 28 /* SCC0A receive overrun */
#define IO_INR_ASC_DMA 19 /* ASC buffer pointer loaded */
#define IO_INR_ASC_ERR 18 /* ASC page overrun */
#define IO_INR_ASC_MERR 17 /* ASC memory read error */
#define IO_INR_LANCE_MERR 16 /* LANCE memory read error */
/* except Maxine */
#define IO_INR_SCC1A_TXDMA 27 /* SCC1A transmit page end */
#define IO_INR_SCC1A_TXERR 26 /* SCC1A transmit memory read error */
#define IO_INR_SCC1A_RXDMA 25 /* SCC1A receive half page */
#define IO_INR_SCC1A_RXERR 24 /* SCC1A receive overrun */
#define IO_INR_RES_23 23 /* unused */
#define IO_INR_RES_22 22 /* unused */
#define IO_INR_RES_21 21 /* unused */
#define IO_INR_RES_20 20 /* unused */
/* Maxine */
#define IO_INR_AB_TXDMA 27 /* ACCESS.bus transmit page end */
#define IO_INR_AB_TXERR 26 /* ACCESS.bus xmit memory read error */
#define IO_INR_AB_RXDMA 25 /* ACCESS.bus receive half page */
#define IO_INR_AB_RXERR 24 /* ACCESS.bus receive overrun */
#define IO_INR_FLOPPY_ERR 23 /* FDC error */
#define IO_INR_ISDN_TXDMA 22 /* ISDN xmit buffer pointer loaded */
#define IO_INR_ISDN_RXDMA 21 /* ISDN recv buffer pointer loaded */
#define IO_INR_ISDN_ERR 20 /* ISDN memory read/overrun error */
#define IO_INR_DMA 16 /* first DMA IRQ */
/*
* The lower 16 bits are system-specific and thus defined in
* system-specific headers.
*/
#define IO_IRQ_BASE 8 /* first IRQ assigned to I/O ASIC */
#define IO_IRQ_LINES 32 /* number of I/O ASIC interrupts */
#define IO_IRQ_NR(n) ((n) + IO_IRQ_BASE)
#define IO_IRQ_MASK(n) (1 << (n))
#define IO_IRQ_ALL 0x0000ffff
#define IO_IRQ_DMA 0xffff0000
#endif /* __ASM_DEC_IOASIC_INTS_H */

View file

@ -0,0 +1,89 @@
/*
* Hardware info about DECstation DS2100/3100 systems (otherwise known as
* pmin/pmax or KN01).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
* are by courtesy of Chris Fraser.
* Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
*/
#ifndef __ASM_MIPS_DEC_KN01_H
#define __ASM_MIPS_DEC_KN01_H
#define KN01_SLOT_BASE 0x10000000
#define KN01_SLOT_SIZE 0x01000000
/*
* Address ranges for devices.
*/
#define KN01_PMASK (0*KN01_SLOT_SIZE) /* color plane mask */
#define KN01_PCC (1*KN01_SLOT_SIZE) /* PCC (DC503) cursor */
#define KN01_VDAC (2*KN01_SLOT_SIZE) /* color map */
#define KN01_RES_3 (3*KN01_SLOT_SIZE) /* unused */
#define KN01_RES_4 (4*KN01_SLOT_SIZE) /* unused */
#define KN01_RES_5 (5*KN01_SLOT_SIZE) /* unused */
#define KN01_RES_6 (6*KN01_SLOT_SIZE) /* unused */
#define KN01_ERRADDR (7*KN01_SLOT_SIZE) /* write error address */
#define KN01_LANCE (8*KN01_SLOT_SIZE) /* LANCE (Am7990) Ethernet */
#define KN01_LANCE_MEM (9*KN01_SLOT_SIZE) /* LANCE buffer memory */
#define KN01_SII (10*KN01_SLOT_SIZE) /* SII (DC7061) SCSI */
#define KN01_SII_MEM (11*KN01_SLOT_SIZE) /* SII buffer memory */
#define KN01_DZ11 (12*KN01_SLOT_SIZE) /* DZ11 (DC7085) serial */
#define KN01_RTC (13*KN01_SLOT_SIZE) /* DS1287 RTC (bytes #0) */
#define KN01_ESAR (13*KN01_SLOT_SIZE) /* MAC address (bytes #1) */
#define KN01_CSR (14*KN01_SLOT_SIZE) /* system ctrl & status reg */
#define KN01_SYS_ROM (15*KN01_SLOT_SIZE) /* system board ROM */
/*
* Frame buffer memory address.
*/
#define KN01_VFB_MEM 0x0fc00000
/*
* CPU interrupt bits.
*/
#define KN01_CPU_INR_BUS 6 /* memory, I/O bus read/write errors */
#define KN01_CPU_INR_VIDEO 6 /* PCC area detect #2 */
#define KN01_CPU_INR_RTC 5 /* DS1287 RTC */
#define KN01_CPU_INR_DZ11 4 /* DZ11 (DC7085) serial */
#define KN01_CPU_INR_LANCE 3 /* LANCE (Am7990) Ethernet */
#define KN01_CPU_INR_SII 2 /* SII (DC7061) SCSI */
/*
* System Control & Status Register bits.
*/
#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
#define KN01_CSR_VINT (1<<9) /* PCC area detect #2 status & ack */
#define KN01_CSR_TXDIS (1<<8) /* DZ11 transmit disable */
#define KN01_CSR_VBGTRG (1<<2) /* blue DAC voltage over green (r/o) */
#define KN01_CSR_VRGTRG (1<<1) /* red DAC voltage over green (r/o) */
#define KN01_CSR_VRGTRB (1<<0) /* red DAC voltage over blue (r/o) */
#define KN01_CSR_LEDS (0xff<<0) /* ~diagnostic LEDs (w/o) */
#ifndef __ASSEMBLY__
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/types.h>
struct pt_regs;
extern u16 cached_kn01_csr;
extern void dec_kn01_be_init(void);
extern int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup);
extern irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id);
#endif
#endif /* __ASM_MIPS_DEC_KN01_H */

View file

@ -0,0 +1,91 @@
/*
* Hardware info about DECstation 5000/200 systems (otherwise known as
* 3max or KN02).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
* are by courtesy of Chris Fraser.
* Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
*/
#ifndef __ASM_MIPS_DEC_KN02_H
#define __ASM_MIPS_DEC_KN02_H
#define KN02_SLOT_BASE 0x1fc00000
#define KN02_SLOT_SIZE 0x00080000
/*
* Address ranges decoded by the "system slot" logic for onboard devices.
*/
#define KN02_SYS_ROM (0*KN02_SLOT_SIZE) /* system board ROM */
#define KN02_RES_1 (1*KN02_SLOT_SIZE) /* unused */
#define KN02_CHKSYN (2*KN02_SLOT_SIZE) /* ECC syndrome */
#define KN02_ERRADDR (3*KN02_SLOT_SIZE) /* bus error address */
#define KN02_DZ11 (4*KN02_SLOT_SIZE) /* DZ11 (DC7085) serial */
#define KN02_RTC (5*KN02_SLOT_SIZE) /* DS1287 RTC */
#define KN02_CSR (6*KN02_SLOT_SIZE) /* system ctrl & status reg */
#define KN02_SYS_ROM_7 (7*KN02_SLOT_SIZE) /* system board ROM (alias) */
/*
* System Control & Status Register bits.
*/
#define KN02_CSR_RES_28 (0xf<<28) /* unused */
#define KN02_CSR_PSU (1<<27) /* power supply unit warning */
#define KN02_CSR_NVRAM (1<<26) /* ~NVRAM clear jumper */
#define KN02_CSR_REFEVEN (1<<25) /* mem refresh bank toggle */
#define KN02_CSR_NRMOD (1<<24) /* ~NRMOD manufact. jumper */
#define KN02_CSR_IOINTEN (0xff<<16) /* IRQ mask bits */
#define KN02_CSR_DIAGCHK (1<<15) /* diagn/norml ECC reads */
#define KN02_CSR_DIAGGEN (1<<14) /* diagn/norml ECC writes */
#define KN02_CSR_CORRECT (1<<13) /* ECC correct/check */
#define KN02_CSR_LEDIAG (1<<12) /* ECC diagn. latch strobe */
#define KN02_CSR_TXDIS (1<<11) /* DZ11 transmit disable */
#define KN02_CSR_BNK32M (1<<10) /* 32M/8M stride */
#define KN02_CSR_DIAGDN (1<<9) /* DIAGDN manufact. jumper */
#define KN02_CSR_BAUD38 (1<<8) /* DZ11 38/19kbps ext. rate */
#define KN02_CSR_IOINT (0xff<<0) /* IRQ status bits (r/o) */
#define KN02_CSR_LEDS (0xff<<0) /* ~diagnostic LEDs (w/o) */
/*
* CPU interrupt bits.
*/
#define KN02_CPU_INR_RES_6 6 /* unused */
#define KN02_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
#define KN02_CPU_INR_RES_4 4 /* unused */
#define KN02_CPU_INR_RTC 3 /* DS1287 RTC */
#define KN02_CPU_INR_CASCADE 2 /* CSR cascade */
/*
* CSR interrupt bits.
*/
#define KN02_CSR_INR_DZ11 7 /* DZ11 (DC7085) serial */
#define KN02_CSR_INR_LANCE 6 /* LANCE (Am7990) Ethernet */
#define KN02_CSR_INR_ASC 5 /* ASC (NCR53C94) SCSI */
#define KN02_CSR_INR_RES_4 4 /* unused */
#define KN02_CSR_INR_RES_3 3 /* unused */
#define KN02_CSR_INR_TC2 2 /* TURBOchannel slot #2 */
#define KN02_CSR_INR_TC1 1 /* TURBOchannel slot #1 */
#define KN02_CSR_INR_TC0 0 /* TURBOchannel slot #0 */
#define KN02_IRQ_BASE 8 /* first IRQ assigned to CSR */
#define KN02_IRQ_LINES 8 /* number of CSR interrupts */
#define KN02_IRQ_NR(n) ((n) + KN02_IRQ_BASE)
#define KN02_IRQ_MASK(n) (1 << (n))
#define KN02_IRQ_ALL 0xff
#ifndef __ASSEMBLY__
#include <linux/types.h>
extern u32 cached_kn02_csr;
extern void init_kn02_irqs(int base);
#endif
#endif /* __ASM_MIPS_DEC_KN02_H */

View file

@ -0,0 +1,67 @@
/*
* include/asm-mips/dec/kn02ba.h
*
* DECstation 5000/1xx (3min or KN02-BA) definitions.
*
* Copyright (C) 2002, 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_MIPS_DEC_KN02BA_H
#define __ASM_MIPS_DEC_KN02BA_H
#include <asm/dec/kn02xa.h> /* For common definitions. */
/*
* CPU interrupt bits.
*/
#define KN02BA_CPU_INR_HALT 6 /* HALT button */
#define KN02BA_CPU_INR_CASCADE 5 /* I/O ASIC cascade */
#define KN02BA_CPU_INR_TC2 4 /* TURBOchannel slot #2 */
#define KN02BA_CPU_INR_TC1 3 /* TURBOchannel slot #1 */
#define KN02BA_CPU_INR_TC0 2 /* TURBOchannel slot #0 */
/*
* I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
*/
#define KN02BA_IO_INR_RES_15 15 /* unused */
#define KN02BA_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
#define KN02BA_IO_INR_RES_13 13 /* unused */
#define KN02BA_IO_INR_BUS 12 /* memory, I/O bus read/write errors */
#define KN02BA_IO_INR_RES_11 11 /* unused */
#define KN02BA_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
#define KN02BA_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
#define KN02BA_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
#define KN02BA_IO_INR_SCC1 7 /* SCC (Z85C30) serial #1 */
#define KN02BA_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
#define KN02BA_IO_INR_RTC 5 /* DS1287 RTC */
#define KN02BA_IO_INR_PSU 4 /* power supply unit warning */
#define KN02BA_IO_INR_RES_3 3 /* unused */
#define KN02BA_IO_INR_ASC_DATA 2 /* SCSI data ready (for PIO) */
#define KN02BA_IO_INR_PBNC 1 /* ~HALT button debouncer */
#define KN02BA_IO_INR_PBNO 0 /* HALT button debouncer */
/*
* Memory Error Register bits.
*/
#define KN02BA_MER_RES_27 (1<<27) /* unused */
/*
* Memory Size Register bits.
*/
#define KN02BA_MSR_RES_17 (0x3ff<<17) /* unused */
/*
* I/O ASIC System Support Register bits.
*/
#define KN02BA_IO_SSR_TXDIS1 (1<<14) /* SCC1 transmit disable */
#define KN02BA_IO_SSR_TXDIS0 (1<<13) /* SCC0 transmit disable */
#define KN02BA_IO_SSR_RES_12 (1<<12) /* unused */
#define KN02BA_IO_SSR_LEDS (0xff<<0) /* ~diagnostic LEDs */
#endif /* __ASM_MIPS_DEC_KN02BA_H */

View file

@ -0,0 +1,79 @@
/*
* include/asm-mips/dec/kn02ca.h
*
* Personal DECstation 5000/xx (Maxine or KN02-CA) definitions.
*
* Copyright (C) 2002, 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_MIPS_DEC_KN02CA_H
#define __ASM_MIPS_DEC_KN02CA_H
#include <asm/dec/kn02xa.h> /* For common definitions. */
/*
* CPU interrupt bits.
*/
#define KN02CA_CPU_INR_HALT 6 /* HALT from ACCESS.Bus */
#define KN02CA_CPU_INR_CASCADE 5 /* I/O ASIC cascade */
#define KN02CA_CPU_INR_BUS 4 /* memory, I/O bus read/write errors */
#define KN02CA_CPU_INR_RTC 3 /* DS1287 RTC */
#define KN02CA_CPU_INR_TIMER 2 /* ARC periodic timer */
/*
* I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
*/
#define KN02CA_IO_INR_FLOPPY 15 /* 82077 FDC */
#define KN02CA_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
#define KN02CA_IO_INR_POWERON 13 /* (*) ACCESS.Bus/power-on reset */
#define KN02CA_IO_INR_TC0 12 /* TURBOchannel slot #0 */
#define KN02CA_IO_INR_TIMER 12 /* ARC periodic timer (?) */
#define KN02CA_IO_INR_ISDN 11 /* Am79C30A ISDN */
#define KN02CA_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
#define KN02CA_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
#define KN02CA_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
#define KN02CA_IO_INR_HDFLOPPY 7 /* (*) HD (1.44MB) floppy status */
#define KN02CA_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
#define KN02CA_IO_INR_TC1 5 /* TURBOchannel slot #1 */
#define KN02CA_IO_INR_XDFLOPPY 4 /* (*) XD (2.88MB) floppy status */
#define KN02CA_IO_INR_VIDEO 3 /* framebuffer */
#define KN02CA_IO_INR_XVIDEO 2 /* ~framebuffer */
#define KN02CA_IO_INR_AB_XMIT 1 /* ACCESS.bus transmit */
#define KN02CA_IO_INR_AB_RECV 0 /* ACCESS.bus receive */
/*
* Memory Error Register bits.
*/
#define KN02CA_MER_INTR (1<<27) /* ARC IRQ status & ack */
/*
* Memory Size Register bits.
*/
#define KN02CA_MSR_INTREN (1<<26) /* ARC periodic IRQ enable */
#define KN02CA_MSR_MS10EN (1<<25) /* 10/1ms IRQ period select */
#define KN02CA_MSR_PFORCE (0xf<<21) /* byte lane error force */
#define KN02CA_MSR_MABEN (1<<20) /* A side VFB address enable */
#define KN02CA_MSR_LASTBANK (0x7<<17) /* onboard RAM bank # */
/*
* I/O ASIC System Support Register bits.
*/
#define KN03CA_IO_SSR_RES_14 (1<<14) /* unused */
#define KN03CA_IO_SSR_RES_13 (1<<13) /* unused */
#define KN03CA_IO_SSR_ISDN_RST (1<<12) /* ~ISDN (Am79C30A) reset */
#define KN03CA_IO_SSR_FLOPPY_RST (1<<7) /* ~FDC (82077) reset */
#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
#define KN03CA_IO_SSR_AB_RST (1<<5) /* ACCESS.bus reset */
#define KN03CA_IO_SSR_RES_4 (1<<4) /* unused */
#define KN03CA_IO_SSR_RES_3 (1<<4) /* unused */
#define KN03CA_IO_SSR_RES_2 (1<<2) /* unused */
#define KN03CA_IO_SSR_RES_1 (1<<1) /* unused */
#define KN03CA_IO_SSR_LED (1<<0) /* power LED */
#endif /* __ASM_MIPS_DEC_KN02CA_H */

View file

@ -0,0 +1,84 @@
/*
* Hardware info common to DECstation 5000/1xx systems (otherwise
* known as 3min or kn02ba) and Personal DECstations 5000/xx ones
* (otherwise known as maxine or kn02ca).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
* are by courtesy of Chris Fraser.
* Copyright (C) 2000, 2002, 2003, 2005 Maciej W. Rozycki
*
* These are addresses which have to be known early in the boot process.
* For other addresses refer to tc.h, ioasic_addrs.h and friends.
*/
#ifndef __ASM_MIPS_DEC_KN02XA_H
#define __ASM_MIPS_DEC_KN02XA_H
#include <asm/dec/ioasic_addrs.h>
#define KN02XA_SLOT_BASE 0x1c000000
/*
* Memory control ASIC registers.
*/
#define KN02XA_MER 0x0c400000 /* memory error register */
#define KN02XA_MSR 0x0c800000 /* memory size register */
/*
* CPU control ASIC registers.
*/
#define KN02XA_MEM_CONF 0x0e000000 /* write timeout config */
#define KN02XA_EAR 0x0e000004 /* error address register */
#define KN02XA_BOOT0 0x0e000008 /* boot 0 register */
#define KN02XA_MEM_INTR 0x0e00000c /* write err IRQ stat & ack */
/*
* Memory Error Register bits, common definitions.
* The rest is defined in system-specific headers.
*/
#define KN02XA_MER_RES_28 (0xf<<28) /* unused */
#define KN02XA_MER_RES_17 (0x3ff<<17) /* unused */
#define KN02XA_MER_PAGERR (1<<16) /* 2k page boundary error */
#define KN02XA_MER_TRANSERR (1<<15) /* transfer length error */
#define KN02XA_MER_PARDIS (1<<14) /* parity error disable */
#define KN02XA_MER_SIZE (1<<13) /* r/o mirror of MSR_SIZE */
#define KN02XA_MER_RES_12 (1<<12) /* unused */
#define KN02XA_MER_BYTERR (0xf<<8) /* byte lane error bitmask: */
#define KN02XA_MER_BYTERR_3 (0x8<<8) /* byte lane #3 */
#define KN02XA_MER_BYTERR_2 (0x4<<8) /* byte lane #2 */
#define KN02XA_MER_BYTERR_1 (0x2<<8) /* byte lane #1 */
#define KN02XA_MER_BYTERR_0 (0x1<<8) /* byte lane #0 */
#define KN02XA_MER_RES_0 (0xff<<0) /* unused */
/*
* Memory Size Register bits, common definitions.
* The rest is defined in system-specific headers.
*/
#define KN02XA_MSR_RES_27 (0x1f<<27) /* unused */
#define KN02XA_MSR_RES_14 (0x7<<14) /* unused */
#define KN02XA_MSR_SIZE (1<<13) /* 16M/4M stride */
#define KN02XA_MSR_RES_0 (0x1fff<<0) /* unused */
/*
* Error Address Register bits.
*/
#define KN02XA_EAR_RES_29 (0x7<<29) /* unused */
#define KN02XA_EAR_ADDRESS (0x7ffffff<<2) /* address involved */
#define KN02XA_EAR_RES_0 (0x3<<0) /* unused */
#ifndef __ASSEMBLY__
#include <linux/interrupt.h>
struct pt_regs;
extern void dec_kn02xa_be_init(void);
extern int dec_kn02xa_be_handler(struct pt_regs *regs, int is_fixup);
extern irqreturn_t dec_kn02xa_be_interrupt(int irq, void *dev_id);
#endif
#endif /* __ASM_MIPS_DEC_KN02XA_H */

View file

@ -0,0 +1,74 @@
/*
* Hardware info about DECstation 5000/2x0 systems (otherwise known as
* 3max+) and DECsystem 5900 systems (otherwise known as bigmax) which
* differ mechanically but are otherwise identical (both are known as
* KN03).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995,1996 by Paul M. Antoine, some code and definitions
* are by courtesy of Chris Fraser.
* Copyright (C) 2000, 2002, 2003, 2005 Maciej W. Rozycki
*/
#ifndef __ASM_MIPS_DEC_KN03_H
#define __ASM_MIPS_DEC_KN03_H
#include <asm/dec/ecc.h>
#include <asm/dec/ioasic_addrs.h>
#define KN03_SLOT_BASE 0x1f800000
/*
* CPU interrupt bits.
*/
#define KN03_CPU_INR_HALT 6 /* HALT button */
#define KN03_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
#define KN03_CPU_INR_RES_4 4 /* unused */
#define KN03_CPU_INR_RTC 3 /* DS1287 RTC */
#define KN03_CPU_INR_CASCADE 2 /* I/O ASIC cascade */
/*
* I/O ASIC interrupt bits. Star marks denote non-IRQ status bits.
*/
#define KN03_IO_INR_3MAXP 15 /* (*) 3max+/bigmax ID */
#define KN03_IO_INR_NVRAM 14 /* (*) NVRAM clear jumper */
#define KN03_IO_INR_TC2 13 /* TURBOchannel slot #2 */
#define KN03_IO_INR_TC1 12 /* TURBOchannel slot #1 */
#define KN03_IO_INR_TC0 11 /* TURBOchannel slot #0 */
#define KN03_IO_INR_NRMOD 10 /* (*) NRMOD manufacturing jumper */
#define KN03_IO_INR_ASC 9 /* ASC (NCR53C94) SCSI */
#define KN03_IO_INR_LANCE 8 /* LANCE (Am7990) Ethernet */
#define KN03_IO_INR_SCC1 7 /* SCC (Z85C30) serial #1 */
#define KN03_IO_INR_SCC0 6 /* SCC (Z85C30) serial #0 */
#define KN03_IO_INR_RTC 5 /* DS1287 RTC */
#define KN03_IO_INR_PSU 4 /* power supply unit warning */
#define KN03_IO_INR_RES_3 3 /* unused */
#define KN03_IO_INR_ASC_DATA 2 /* SCSI data ready (for PIO) */
#define KN03_IO_INR_PBNC 1 /* ~HALT button debouncer */
#define KN03_IO_INR_PBNO 0 /* HALT button debouncer */
/*
* Memory Control Register bits.
*/
#define KN03_MCR_RES_16 (0xffff<<16) /* unused */
#define KN03_MCR_DIAGCHK (1<<15) /* diagn/norml ECC reads */
#define KN03_MCR_DIAGGEN (1<<14) /* diagn/norml ECC writes */
#define KN03_MCR_CORRECT (1<<13) /* ECC correct/check */
#define KN03_MCR_RES_11 (0x3<<12) /* unused */
#define KN03_MCR_BNK32M (1<<10) /* 32M/8M stride */
#define KN03_MCR_RES_7 (0x7<<7) /* unused */
#define KN03_MCR_CHECK (0x7f<<0) /* diagnostic check bits */
/*
* I/O ASIC System Support Register bits.
*/
#define KN03_IO_SSR_TXDIS1 (1<<14) /* SCC1 transmit disable */
#define KN03_IO_SSR_TXDIS0 (1<<13) /* SCC0 transmit disable */
#define KN03_IO_SSR_RES_12 (1<<12) /* unused */
#define KN03_IO_SSR_LEDS (0xff<<0) /* ~diagnostic LEDs */
#endif /* __ASM_MIPS_DEC_KN03_H */

View file

@ -0,0 +1,85 @@
/*
* include/asm-mips/dec/kn05.h
*
* DECstation/DECsystem 5000/260 (4max+ or KN05), 5000/150 (4min
* or KN04-BA), Personal DECstation/DECsystem 5000/50 (4maxine or
* KN04-CA) and DECsystem 5900/260 (KN05) R4k CPU card MB ASIC
* definitions.
*
* Copyright (C) 2002, 2003, 2005, 2008 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* WARNING! All this information is pure guesswork based on the
* ROM. It is provided here in hope it will give someone some
* food for thought. No documentation for the KN05 nor the KN04
* module has been located so far.
*/
#ifndef __ASM_MIPS_DEC_KN05_H
#define __ASM_MIPS_DEC_KN05_H
#include <asm/dec/ioasic_addrs.h>
/*
* The oncard MB (Memory Buffer) ASIC provides an additional address
* decoder. Certain address ranges within the "high" 16 slots are
* passed to the I/O ASIC's decoder like with the KN03 or KN02-BA/CA.
* Others are handled locally. "Low" slots are always passed.
*/
#define KN4K_SLOT_BASE 0x1fc00000
#define KN4K_MB_ROM (0*IOASIC_SLOT_SIZE) /* KN05/KN04 card ROM */
#define KN4K_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
#define KN4K_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
#define KN4K_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
#define KN4K_MB_INT (4*IOASIC_SLOT_SIZE) /* MB interrupt register */
#define KN4K_MB_EA (5*IOASIC_SLOT_SIZE) /* MB error address? */
#define KN4K_MB_EC (6*IOASIC_SLOT_SIZE) /* MB error ??? */
#define KN4K_MB_CSR (7*IOASIC_SLOT_SIZE) /* MB control & status */
#define KN4K_RES_08 (8*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_09 (9*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_10 (10*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_11 (11*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
#define KN4K_RES_13 (13*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_14 (14*IOASIC_SLOT_SIZE) /* unused? */
#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
/*
* MB ASIC interrupt bits.
*/
#define KN4K_MB_INR_MB 4 /* ??? */
#define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */
#define KN4K_MB_INR_RES_2 2 /* unused */
#define KN4K_MB_INR_RTC 1 /* RTC */
#define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */
/*
* Bits for the MB interrupt register.
* The register appears read-only.
*/
#define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */
#define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */
/*
* Bits for the MB control & status register.
* Set to 0x00bf8001 for KN05 and to 0x003f8000 for KN04 by the firmware.
*/
#define KN4K_MB_CSR_PF (1<<0) /* PreFetching enable? */
#define KN4K_MB_CSR_F (1<<1) /* ??? */
#define KN4K_MB_CSR_ECC (0xff<<2) /* ??? */
#define KN4K_MB_CSR_OD (1<<10) /* ??? */
#define KN4K_MB_CSR_CP (1<<11) /* ??? */
#define KN4K_MB_CSR_UNC (1<<12) /* ??? */
#define KN4K_MB_CSR_IM (1<<13) /* ??? */
#define KN4K_MB_CSR_NC (1<<14) /* ??? */
#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */
#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */
#define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */
#define KN4K_MB_CSR_FW (1<<21) /* ??? */
#define KN4K_MB_CSR_W (1<<31) /* ??? */
#endif /* __ASM_MIPS_DEC_KN05_H */

View file

@ -0,0 +1,26 @@
/*
* include/asm-mips/dec/kn230.h
*
* DECsystem 5100 (MIPSmate or KN230) definitions.
*
* Copyright (C) 2002, 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_MIPS_DEC_KN230_H
#define __ASM_MIPS_DEC_KN230_H
/*
* CPU interrupt bits.
*/
#define KN230_CPU_INR_HALT 6 /* HALT button */
#define KN230_CPU_INR_BUS 5 /* memory, I/O bus read/write errors */
#define KN230_CPU_INR_RTC 4 /* DS1287 RTC */
#define KN230_CPU_INR_SII 3 /* SII (DC7061) SCSI */
#define KN230_CPU_INR_LANCE 3 /* LANCE (Am7990) Ethernet */
#define KN230_CPU_INR_DZ11 2 /* DZ11 (DC7085) serial */
#endif /* __ASM_MIPS_DEC_KN230_H */

View file

@ -0,0 +1,27 @@
/*
* Various machine type macros
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1998, 2000 Harald Koerfgen
*/
#ifndef __ASM_DEC_MACHTYPE_H
#define __ASM_DEC_MACHTYPE_H
#include <asm/bootinfo.h>
#define TURBOCHANNEL (mips_machtype == MACH_DS5000_200 || \
mips_machtype == MACH_DS5000_1XX || \
mips_machtype == MACH_DS5000_XX || \
mips_machtype == MACH_DS5000_2X0 || \
mips_machtype == MACH_DS5900)
#define IOASIC (mips_machtype == MACH_DS5000_1XX || \
mips_machtype == MACH_DS5000_XX || \
mips_machtype == MACH_DS5000_2X0 || \
mips_machtype == MACH_DS5900)
#endif

View file

@ -0,0 +1,174 @@
/*
* include/asm-mips/dec/prom.h
*
* DECstation PROM interface.
*
* Copyright (C) 2002 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on arch/mips/dec/prom/prom.h by the Anonymous.
*/
#ifndef _ASM_DEC_PROM_H
#define _ASM_DEC_PROM_H
#include <linux/types.h>
#include <asm/addrspace.h>
/*
* PMAX/3MAX PROM entry points for DS2100/3100's and DS5000/2xx's.
* Many of these will work for MIPSen as well!
*/
#define VEC_RESET (u64 *)CKSEG1ADDR(0x1fc00000)
/* Prom base address */
#define PMAX_PROM_ENTRY(x) (VEC_RESET + (x)) /* Prom jump table */
#define PMAX_PROM_HALT PMAX_PROM_ENTRY(2) /* valid on MIPSen */
#define PMAX_PROM_AUTOBOOT PMAX_PROM_ENTRY(5) /* valid on MIPSen */
#define PMAX_PROM_OPEN PMAX_PROM_ENTRY(6)
#define PMAX_PROM_READ PMAX_PROM_ENTRY(7)
#define PMAX_PROM_CLOSE PMAX_PROM_ENTRY(10)
#define PMAX_PROM_LSEEK PMAX_PROM_ENTRY(11)
#define PMAX_PROM_GETCHAR PMAX_PROM_ENTRY(12)
#define PMAX_PROM_PUTCHAR PMAX_PROM_ENTRY(13) /* 12 on MIPSen */
#define PMAX_PROM_GETS PMAX_PROM_ENTRY(15)
#define PMAX_PROM_PRINTF PMAX_PROM_ENTRY(17)
#define PMAX_PROM_GETENV PMAX_PROM_ENTRY(33) /* valid on MIPSen */
/*
* Magic number indicating REX PROM available on DECstation. Found in
* register a2 on transfer of control to program from PROM.
*/
#define REX_PROM_MAGIC 0x30464354
#ifdef CONFIG_64BIT
#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
#else /* !CONFIG_64BIT */
#define prom_is_rex(magic) ((magic) == REX_PROM_MAGIC)
#endif /* !CONFIG_64BIT */
/*
* 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and
* DS5000/2x0.
*/
#define REX_PROM_GETBITMAP 0x84/4 /* get mem bitmap */
#define REX_PROM_GETCHAR 0x24/4 /* getch() */
#define REX_PROM_GETENV 0x64/4 /* get env. variable */
#define REX_PROM_GETSYSID 0x80/4 /* get system id */
#define REX_PROM_GETTCINFO 0xa4/4
#define REX_PROM_PRINTF 0x30/4 /* printf() */
#define REX_PROM_SLOTADDR 0x6c/4 /* slotaddr */
#define REX_PROM_BOOTINIT 0x54/4 /* open() */
#define REX_PROM_BOOTREAD 0x58/4 /* read() */
#define REX_PROM_CLEARCACHE 0x7c/4
/*
* Used by rex_getbitmap().
*/
typedef struct {
int pagesize;
unsigned char bitmap[0];
} memmap;
/*
* Function pointers as read from a PROM's callback vector.
*/
extern int (*__rex_bootinit)(void);
extern int (*__rex_bootread)(void);
extern int (*__rex_getbitmap)(memmap *);
extern unsigned long *(*__rex_slot_address)(int);
extern void *(*__rex_gettcinfo)(void);
extern int (*__rex_getsysid)(void);
extern void (*__rex_clear_cache)(void);
extern int (*__prom_getchar)(void);
extern char *(*__prom_getenv)(char *);
extern int (*__prom_printf)(char *, ...);
extern int (*__pmax_open)(char*, int);
extern int (*__pmax_lseek)(int, long, int);
extern int (*__pmax_read)(int, void *, int);
extern int (*__pmax_close)(int);
#ifdef CONFIG_64BIT
/*
* On MIPS64 we have to call PROM functions via a helper
* dispatcher to accommodate ABI incompatibilities.
*/
#define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
unsigned long *__DEC_PROM_O32(_rex_slot_address,
(unsigned long *(*)(int), void *, int));
void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
#define rex_bootinit() _rex_bootinit(__rex_bootinit, NULL)
#define rex_bootread() _rex_bootread(__rex_bootread, NULL)
#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, NULL, x)
#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, NULL, x)
#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo, NULL)
#define rex_getsysid() _rex_getsysid(__rex_getsysid, NULL)
#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache, NULL)
#define prom_getchar() _prom_getchar(__prom_getchar, NULL)
#define prom_getenv(x) _prom_getenv(__prom_getenv, NULL, x)
#define prom_printf(x...) _prom_printf(__prom_printf, NULL, x)
#else /* !CONFIG_64BIT */
/*
* On plain MIPS we just call PROM functions directly.
*/
#define rex_bootinit __rex_bootinit
#define rex_bootread __rex_bootread
#define rex_getbitmap __rex_getbitmap
#define rex_slot_address __rex_slot_address
#define rex_gettcinfo __rex_gettcinfo
#define rex_getsysid __rex_getsysid
#define rex_clear_cache __rex_clear_cache
#define prom_getchar __prom_getchar
#define prom_getenv __prom_getenv
#define prom_printf __prom_printf
#define pmax_open __pmax_open
#define pmax_lseek __pmax_lseek
#define pmax_read __pmax_read
#define pmax_close __pmax_close
#endif /* !CONFIG_64BIT */
extern void prom_meminit(u32);
extern void prom_identify_arch(u32);
extern void prom_init_cmdline(s32, s32 *, u32);
extern void register_prom_console(void);
extern void unregister_prom_console(void);
#endif /* _ASM_DEC_PROM_H */

View file

@ -0,0 +1,19 @@
/*
* include/asm-mips/dec/system.h
*
* Generic DECstation/DECsystem bits.
*
* Copyright (C) 2005, 2006 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_DEC_SYSTEM_H
#define __ASM_DEC_SYSTEM_H
extern unsigned long dec_kn_slot_base, dec_kn_slot_size;
extern int dec_tc_bus;
#endif /* __ASM_DEC_SYSTEM_H */

View file

@ -0,0 +1,32 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 by Waldorf Electronics
* Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2007 Maciej W. Rozycki
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
#include <linux/param.h>
extern void __delay(unsigned long loops);
extern void __ndelay(unsigned long ns);
extern void __udelay(unsigned long us);
#define ndelay(ns) __ndelay(ns)
#define udelay(us) __udelay(us)
/* make sure "usecs *= ..." in udelay do not overflow. */
#if HZ >= 1000
#define MAX_UDELAY_MS 1
#elif HZ <= 200
#define MAX_UDELAY_MS 5
#else
#define MAX_UDELAY_MS (1000 / HZ)
#endif
#endif /* _ASM_DELAY_H */

View file

@ -0,0 +1,19 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_MIPS_DEVICE_H
#define _ASM_MIPS_DEVICE_H
struct dma_map_ops;
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
};
#endif /* _ASM_MIPS_DEVICE_H*/

View file

@ -0,0 +1,68 @@
/*
* Copyright (C) 2000, 2004 Maciej W. Rozycki
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
#include <asm-generic/div64.h>
#if BITS_PER_LONG == 64
#include <linux/types.h>
/*
* No traps on overflows for any of these...
*/
#define __div64_32(n, base) \
({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
unsigned long __high, __low; \
unsigned long long __n; \
\
__high = *__n >> 32; \
__low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
" .set noreorder \n" \
" move %2, $0 \n" \
" move %3, $0 \n" \
" b 1f \n" \
" li %4, 0x21 \n" \
"0: \n" \
" sll $1, %0, 0x1 \n" \
" srl %3, %0, 0x1f \n" \
" or %0, $1, %5 \n" \
" sll %1, %1, 0x1 \n" \
" sll %2, %2, 0x1 \n" \
"1: \n" \
" bnez %3, 2f \n" \
" sltu %5, %0, %z6 \n" \
" bnez %5, 3f \n" \
"2: \n" \
" addiu %4, %4, -1 \n" \
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
" bnez %4, 0b\n\t" \
" srl %5, %1, 0x1f\n\t" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
: "Jr" (base), "0" (__high), "1" (__low)); \
\
(__n) = __quot32; \
__mod32; \
})
#endif /* BITS_PER_LONG == 64 */
#endif /* __ASM_DIV64_H */

View file

@ -0,0 +1,24 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
*
*/
#ifndef __ASM_DMA_COHERENCE_H
#define __ASM_DMA_COHERENCE_H
#ifdef CONFIG_DMA_MAYBE_COHERENT
extern int coherentio;
extern int hw_coherentio;
#else
#ifdef CONFIG_DMA_COHERENT
#define coherentio 1
#else
#define coherentio 0
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
#endif

View file

@ -0,0 +1,104 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <asm/scatterlist.h>
#include <asm/dma-coherence.h>
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
#include <dma-coherence.h>
#endif
extern struct dma_map_ops *mips_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
else
return mips_dma_map_ops;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size <= *dev->dma_mask;
}
static inline void dma_mark_clean(void *addr, size_t size) {}
#include <asm-generic/dma-mapping-common.h>
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
return ops->dma_supported(dev, mask);
}
static inline int dma_mapping_error(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, mask);
return ops->mapping_error(dev, mask);
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
ops->free(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#endif /* _ASM_DMA_MAPPING_H */

317
arch/mips/include/asm/dma.h Normal file
View file

@ -0,0 +1,317 @@
/*
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*
* NOTE: all this is true *only* for ISA/EISA expansions on Mips boards
* and can only be used for expansion cards. Onboard DMA controllers, such
* as the R4030 on Jazz boards behave totally different!
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/io.h> /* need byte IO */
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
#define dma_outb outb
#endif
#define dma_inb inb
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
#ifndef CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN
#define MAX_DMA_CHANNELS 8
#endif
/*
* The maximum address in KSEG0 that we can perform a DMA transfer to on this
* platform. This describes only the PC style part of the DMA logic like on
* Deskstations or Acer PICA but not the much more versatile DMA logic used
* for the local devices on Acer PICA or Magnums.
*/
#if defined(CONFIG_SGI_IP22) || defined(CONFIG_SGI_IP28)
/* don't care; ISA bus master won't work, ISA slave DMA supports 32bit addr */
#define MAX_DMA_ADDRESS PAGE_OFFSET
#else
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
#endif
#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
#ifndef MAX_DMA32_PFN
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static __inline__ void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
static __inline__ void disable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(0, DMA1_CLEAR_FF_REG);
else
dma_outb(0, DMA2_CLEAR_FF_REG);
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
switch(dmanr) {
case 0:
dma_outb(pagenr, DMA_PAGE_0);
break;
case 1:
dma_outb(pagenr, DMA_PAGE_1);
break;
case 2:
dma_outb(pagenr, DMA_PAGE_2);
break;
case 3:
dma_outb(pagenr, DMA_PAGE_3);
break;
case 5:
dma_outb(pagenr & 0xfe, DMA_PAGE_5);
break;
case 6:
dma_outb(pagenr & 0xfe, DMA_PAGE_6);
break;
case 7:
dma_outb(pagenr & 0xfe, DMA_PAGE_7);
break;
}
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
} else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
}
/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
} else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
}
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */

View file

@ -0,0 +1,27 @@
/*
* DS1287 timer functions.
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ASM_DS1287_H
#define __ASM_DS1287_H
extern int ds1287_timer_state(void);
extern void ds1287_set_base_clock(unsigned int clock);
extern int ds1287_clockevent_init(int irq);
#endif

View file

@ -0,0 +1,85 @@
/*
* Copyright (C) 2005 Mips Technologies
* Author: Chris Dearman, chris@mips.com derived from fpu.h
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _ASM_DSP_H
#define _ASM_DSP_H
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/hazards.h>
#include <asm/mipsregs.h>
#define DSP_DEFAULT 0x00000000
#define DSP_MASK 0x3f
#define __enable_dsp_hazard() \
do { \
asm("_ehb"); \
} while (0)
static inline void __init_dsp(void)
{
mthi1(0);
mtlo1(0);
mthi2(0);
mtlo2(0);
mthi3(0);
mtlo3(0);
wrdsp(DSP_DEFAULT, DSP_MASK);
}
static inline void init_dsp(void)
{
if (cpu_has_dsp)
__init_dsp();
}
#define __save_dsp(tsk) \
do { \
tsk->thread.dsp.dspr[0] = mfhi1(); \
tsk->thread.dsp.dspr[1] = mflo1(); \
tsk->thread.dsp.dspr[2] = mfhi2(); \
tsk->thread.dsp.dspr[3] = mflo2(); \
tsk->thread.dsp.dspr[4] = mfhi3(); \
tsk->thread.dsp.dspr[5] = mflo3(); \
tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \
} while (0)
#define save_dsp(tsk) \
do { \
if (cpu_has_dsp) \
__save_dsp(tsk); \
} while (0)
#define __restore_dsp(tsk) \
do { \
mthi1(tsk->thread.dsp.dspr[0]); \
mtlo1(tsk->thread.dsp.dspr[1]); \
mthi2(tsk->thread.dsp.dspr[2]); \
mtlo2(tsk->thread.dsp.dspr[3]); \
mthi3(tsk->thread.dsp.dspr[4]); \
mtlo3(tsk->thread.dsp.dspr[5]); \
wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK); \
} while (0)
#define restore_dsp(tsk) \
do { \
if (cpu_has_dsp) \
__restore_dsp(tsk); \
} while (0)
#define __get_dsp_regs(tsk) \
({ \
if (tsk == current) \
__save_dsp(current); \
\
tsk->thread.dsp.dspr; \
})
#endif /* _ASM_DSP_H */

View file

@ -0,0 +1,34 @@
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static inline void atomic_scrub(void *va, u32 size)
{
unsigned long *virt_addr = va;
unsigned long temp;
u32 i;
for (i = 0; i < size / sizeof(unsigned long); i++) {
/*
* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*
* Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
*/
__asm__ __volatile__ (
" .set mips2 \n"
"1: ll %0, %1 # atomic_scrub \n"
" addu %0, $0 \n"
" sc %0, %1 \n"
" beqz %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*virt_addr)
: "m" (*virt_addr));
virt_addr++;
}
}
#endif

393
arch/mips/include/asm/elf.h Normal file
View file

@ -0,0 +1,393 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Much of this is taken from binutils and GNU libc ...
*/
#ifndef _ASM_ELF_H
#define _ASM_ELF_H
/* ELF header e_flags defines. */
/* MIPS architecture level. */
#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */
#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */
/* The ABI of a file. */
#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
#define PT_MIPS_REGINFO 0x70000000
#define PT_MIPS_RTPROC 0x70000001
#define PT_MIPS_OPTIONS 0x70000002
/* Flags in the e_flags field of the header */
#define EF_MIPS_NOREORDER 0x00000001
#define EF_MIPS_PIC 0x00000002
#define EF_MIPS_CPIC 0x00000004
#define EF_MIPS_ABI2 0x00000020
#define EF_MIPS_OPTIONS_FIRST 0x00000080
#define EF_MIPS_32BITMODE 0x00000100
#define EF_MIPS_FP64 0x00000200
#define EF_MIPS_ABI 0x0000f000
#define EF_MIPS_ARCH 0xf0000000
#define DT_MIPS_RLD_VERSION 0x70000001
#define DT_MIPS_TIME_STAMP 0x70000002
#define DT_MIPS_ICHECKSUM 0x70000003
#define DT_MIPS_IVERSION 0x70000004
#define DT_MIPS_FLAGS 0x70000005
#define RHF_NONE 0x00000000
#define RHF_HARDWAY 0x00000001
#define RHF_NOTPOT 0x00000002
#define RHF_SGI_ONLY 0x00000010
#define DT_MIPS_BASE_ADDRESS 0x70000006
#define DT_MIPS_CONFLICT 0x70000008
#define DT_MIPS_LIBLIST 0x70000009
#define DT_MIPS_LOCAL_GOTNO 0x7000000a
#define DT_MIPS_CONFLICTNO 0x7000000b
#define DT_MIPS_LIBLISTNO 0x70000010
#define DT_MIPS_SYMTABNO 0x70000011
#define DT_MIPS_UNREFEXTNO 0x70000012
#define DT_MIPS_GOTSYM 0x70000013
#define DT_MIPS_HIPAGENO 0x70000014
#define DT_MIPS_RLD_MAP 0x70000016
#define R_MIPS_NONE 0
#define R_MIPS_16 1
#define R_MIPS_32 2
#define R_MIPS_REL32 3
#define R_MIPS_26 4
#define R_MIPS_HI16 5
#define R_MIPS_LO16 6
#define R_MIPS_GPREL16 7
#define R_MIPS_LITERAL 8
#define R_MIPS_GOT16 9
#define R_MIPS_PC16 10
#define R_MIPS_CALL16 11
#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
in the MIPS ELF ABI. */
#define R_MIPS_UNUSED1 13
#define R_MIPS_UNUSED2 14
#define R_MIPS_UNUSED3 15
#define R_MIPS_SHIFT5 16
#define R_MIPS_SHIFT6 17
#define R_MIPS_64 18
#define R_MIPS_GOT_DISP 19
#define R_MIPS_GOT_PAGE 20
#define R_MIPS_GOT_OFST 21
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
#define R_MIPS_GOTHI16 22
#define R_MIPS_GOTLO16 23
#define R_MIPS_SUB 24
#define R_MIPS_INSERT_A 25
#define R_MIPS_INSERT_B 26
#define R_MIPS_DELETE 27
#define R_MIPS_HIGHER 28
#define R_MIPS_HIGHEST 29
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
#define R_MIPS_CALLHI16 30
#define R_MIPS_CALLLO16 31
/*
* This range is reserved for vendor specific relocations.
*/
#define R_MIPS_LOVENDOR 100
#define R_MIPS_HIVENDOR 127
#define SHN_MIPS_ACCOMON 0xff00 /* Allocated common symbols */
#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */
#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */
#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
#define SHT_MIPS_LIST 0x70000000
#define SHT_MIPS_CONFLICT 0x70000002
#define SHT_MIPS_GPTAB 0x70000003
#define SHT_MIPS_UCODE 0x70000004
#define SHT_MIPS_DEBUG 0x70000005
#define SHT_MIPS_REGINFO 0x70000006
#define SHT_MIPS_PACKAGE 0x70000007
#define SHT_MIPS_PACKSYM 0x70000008
#define SHT_MIPS_RELD 0x70000009
#define SHT_MIPS_IFACE 0x7000000b
#define SHT_MIPS_CONTENT 0x7000000c
#define SHT_MIPS_OPTIONS 0x7000000d
#define SHT_MIPS_SHDR 0x70000010
#define SHT_MIPS_FDESC 0x70000011
#define SHT_MIPS_EXTSYM 0x70000012
#define SHT_MIPS_DENSE 0x70000013
#define SHT_MIPS_PDESC 0x70000014
#define SHT_MIPS_LOCSYM 0x70000015
#define SHT_MIPS_AUXSYM 0x70000016
#define SHT_MIPS_OPTSYM 0x70000017
#define SHT_MIPS_LOCSTR 0x70000018
#define SHT_MIPS_LINE 0x70000019
#define SHT_MIPS_RFDESC 0x7000001a
#define SHT_MIPS_DELTASYM 0x7000001b
#define SHT_MIPS_DELTAINST 0x7000001c
#define SHT_MIPS_DELTACLASS 0x7000001d
#define SHT_MIPS_DWARF 0x7000001e
#define SHT_MIPS_DELTADECL 0x7000001f
#define SHT_MIPS_SYMBOL_LIB 0x70000020
#define SHT_MIPS_EVENTS 0x70000021
#define SHT_MIPS_TRANSLATE 0x70000022
#define SHT_MIPS_PIXIE 0x70000023
#define SHT_MIPS_XLATE 0x70000024
#define SHT_MIPS_XLATE_DEBUG 0x70000025
#define SHT_MIPS_WHIRL 0x70000026
#define SHT_MIPS_EH_REGION 0x70000027
#define SHT_MIPS_XLATE_OLD 0x70000028
#define SHT_MIPS_PDR_EXCEPTION 0x70000029
#define SHF_MIPS_GPREL 0x10000000
#define SHF_MIPS_MERGE 0x20000000
#define SHF_MIPS_ADDR 0x40000000
#define SHF_MIPS_STRING 0x80000000
#define SHF_MIPS_NOSTRIP 0x08000000
#define SHF_MIPS_LOCAL 0x04000000
#define SHF_MIPS_NAMES 0x02000000
#define SHF_MIPS_NODUPES 0x01000000
#ifndef ELF_ARCH
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#ifdef CONFIG_32BIT
/*
* In order to be sure that we don't attempt to execute an O32 binary which
* requires 64 bit FP (FR=1) on a system which does not support it we refuse
* to execute any binary which has bits specified by the following macro set
* in its ELF header flags.
*/
#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
# define __MIPS_O32_FP64_MUST_BE_ZERO 0
#else
# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
__res = 0; \
\
__res; \
})
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
__res = 0; \
\
__res; \
})
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#endif /* CONFIG_64BIT */
/*
* These are used to set parameters in the core dumps.
*/
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB
#elif defined(__MIPSEL__)
#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_MIPS
#endif /* !defined(ELF_ARCH) */
struct mips_abi;
extern struct mips_abi mips_abi;
extern struct mips_abi mips_abi_32;
extern struct mips_abi mips_abi_n32;
#ifdef CONFIG_32BIT
#define SET_PERSONALITY(ex) \
do { \
if ((ex).e_flags & EF_MIPS_FP64) \
clear_thread_flag(TIF_32BIT_FPREGS); \
else \
set_thread_flag(TIF_32BIT_FPREGS); \
\
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
current->thread.abi = &mips_abi; \
} while (0)
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
#ifdef CONFIG_MIPS32_N32
#define __SET_PERSONALITY32_N32() \
do { \
set_thread_flag(TIF_32BIT_ADDR); \
current->thread.abi = &mips_abi_n32; \
} while (0)
#else
#define __SET_PERSONALITY32_N32() \
do { } while (0)
#endif
#ifdef CONFIG_MIPS32_O32
#define __SET_PERSONALITY32_O32(ex) \
do { \
set_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
\
if (!((ex).e_flags & EF_MIPS_FP64)) \
set_thread_flag(TIF_32BIT_FPREGS); \
\
current->thread.abi = &mips_abi_32; \
} while (0)
#else
#define __SET_PERSONALITY32_O32(ex) \
do { } while (0)
#endif
#ifdef CONFIG_MIPS32_COMPAT
#define __SET_PERSONALITY32(ex) \
do { \
if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
((ex).e_flags & EF_MIPS_ABI) == 0) \
__SET_PERSONALITY32_N32(); \
else \
__SET_PERSONALITY32_O32(ex); \
} while (0)
#else
#define __SET_PERSONALITY32(ex) do { } while (0)
#endif
#define SET_PERSONALITY(ex) \
do { \
unsigned int p; \
\
clear_thread_flag(TIF_32BIT_REGS); \
clear_thread_flag(TIF_32BIT_FPREGS); \
clear_thread_flag(TIF_32BIT_ADDR); \
\
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
__SET_PERSONALITY32(ex); \
else \
current->thread.abi = &mips_abi; \
\
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)
#endif /* CONFIG_64BIT */
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM __elf_platform
extern const char *__elf_platform;
/*
* See comments in asm-alpha/elf.h, this is the same thing
* on the MIPS.
*/
#define ELF_PLAT_INIT(_r, load_addr) do { \
_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
_r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
_r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
_r->regs[30] = _r->regs[31] = 0; \
} while (0)
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#endif /* _ASM_ELF_H */

View file

@ -0,0 +1,261 @@
/*
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
* Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_EMMA_EMMA2RH_H
#define __ASM_EMMA_EMMA2RH_H
#include <irq.h>
/*
* EMMA2RH registers
*/
#define REGBASE 0x10000000
#define EMMA2RH_BHIF_STRAP_0 (0x000010+REGBASE)
#define EMMA2RH_BHIF_INT_ST_0 (0x000030+REGBASE)
#define EMMA2RH_BHIF_INT_ST_1 (0x000034+REGBASE)
#define EMMA2RH_BHIF_INT_ST_2 (0x000038+REGBASE)
#define EMMA2RH_BHIF_INT_EN_0 (0x000040+REGBASE)
#define EMMA2RH_BHIF_INT_EN_1 (0x000044+REGBASE)
#define EMMA2RH_BHIF_INT_EN_2 (0x000048+REGBASE)
#define EMMA2RH_BHIF_INT1_EN_0 (0x000050+REGBASE)
#define EMMA2RH_BHIF_INT1_EN_1 (0x000054+REGBASE)
#define EMMA2RH_BHIF_INT1_EN_2 (0x000058+REGBASE)
#define EMMA2RH_BHIF_SW_INT (0x000070+REGBASE)
#define EMMA2RH_BHIF_SW_INT_EN (0x000080+REGBASE)
#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
#define EMMA2RH_BHIF_MAIN_CTRL (0x0000b4+REGBASE)
#define EMMA2RH_BHIF_EXCEPT_VECT_BASE_ADDRESS (0x0000c0+REGBASE)
#define EMMA2RH_GPIO_DIR (0x110d20+REGBASE)
#define EMMA2RH_GPIO_INT_ST (0x110d30+REGBASE)
#define EMMA2RH_GPIO_INT_MASK (0x110d3c+REGBASE)
#define EMMA2RH_GPIO_INT_MODE (0x110d48+REGBASE)
#define EMMA2RH_GPIO_INT_CND_A (0x110d54+REGBASE)
#define EMMA2RH_GPIO_INT_CND_B (0x110d60+REGBASE)
#define EMMA2RH_PBRD_INT_EN (0x100010+REGBASE)
#define EMMA2RH_PBRD_CLKSEL (0x100028+REGBASE)
#define EMMA2RH_PFUR0_BASE (0x101000+REGBASE)
#define EMMA2RH_PFUR1_BASE (0x102000+REGBASE)
#define EMMA2RH_PFUR2_BASE (0x103000+REGBASE)
#define EMMA2RH_PIIC0_BASE (0x107000+REGBASE)
#define EMMA2RH_PIIC1_BASE (0x108000+REGBASE)
#define EMMA2RH_PIIC2_BASE (0x109000+REGBASE)
#define EMMA2RH_PCI_CONTROL (0x200000+REGBASE)
#define EMMA2RH_PCI_ARBIT_CTR (0x200004+REGBASE)
#define EMMA2RH_PCI_IWIN0_CTR (0x200010+REGBASE)
#define EMMA2RH_PCI_IWIN1_CTR (0x200014+REGBASE)
#define EMMA2RH_PCI_INIT_ESWP (0x200018+REGBASE)
#define EMMA2RH_PCI_INT (0x200020+REGBASE)
#define EMMA2RH_PCI_INT_EN (0x200024+REGBASE)
#define EMMA2RH_PCI_TWIN_CTR (0x200030+REGBASE)
#define EMMA2RH_PCI_TWIN_BADR (0x200034+REGBASE)
#define EMMA2RH_PCI_TWIN0_DADR (0x200038+REGBASE)
#define EMMA2RH_PCI_TWIN1_DADR (0x20003c+REGBASE)
/*
* Memory map (physical address)
*
* Note most of the following address must be properly aligned by the
* corresponding size. For example, if PCI_IO_SIZE is 16MB, then
* PCI_IO_BASE must be aligned along 16MB boundary.
*/
/* the actual ram size is detected at run-time */
#define EMMA2RH_RAM_BASE 0x00000000
#define EMMA2RH_RAM_SIZE 0x10000000 /* less than 256MB */
#define EMMA2RH_IO_BASE 0x10000000
#define EMMA2RH_IO_SIZE 0x01000000 /* 16 MB */
#define EMMA2RH_GENERALIO_BASE 0x11000000
#define EMMA2RH_GENERALIO_SIZE 0x01000000 /* 16 MB */
#define EMMA2RH_PCI_IO_BASE 0x12000000
#define EMMA2RH_PCI_IO_SIZE 0x02000000 /* 32 MB */
#define EMMA2RH_PCI_MEM_BASE 0x14000000
#define EMMA2RH_PCI_MEM_SIZE 0x08000000 /* 128 MB */
#define EMMA2RH_ROM_BASE 0x1c000000
#define EMMA2RH_ROM_SIZE 0x04000000 /* 64 MB */
#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
#define NUM_EMMA2RH_IRQ 96
#define EMMA2RH_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
/*
* emma2rh irq defs
*/
#define EMMA2RH_IRQ_INT(n) (EMMA2RH_IRQ_BASE + (n))
#define EMMA2RH_IRQ_PFUR0 EMMA2RH_IRQ_INT(49)
#define EMMA2RH_IRQ_PFUR1 EMMA2RH_IRQ_INT(50)
#define EMMA2RH_IRQ_PFUR2 EMMA2RH_IRQ_INT(51)
#define EMMA2RH_IRQ_PIIC0 EMMA2RH_IRQ_INT(56)
#define EMMA2RH_IRQ_PIIC1 EMMA2RH_IRQ_INT(57)
#define EMMA2RH_IRQ_PIIC2 EMMA2RH_IRQ_INT(58)
/*
* EMMA2RH Register Access
*/
#define EMMA2RH_BASE (0xa0000000)
static inline void emma2rh_sync(void)
{
volatile u32 *p = (volatile u32 *)0xbfc00000;
(void)(*p);
}
static inline void emma2rh_out32(u32 offset, u32 val)
{
*(volatile u32 *)(EMMA2RH_BASE | offset) = val;
emma2rh_sync();
}
static inline u32 emma2rh_in32(u32 offset)
{
u32 val = *(volatile u32 *)(EMMA2RH_BASE | offset);
return val;
}
static inline void emma2rh_out16(u32 offset, u16 val)
{
*(volatile u16 *)(EMMA2RH_BASE | offset) = val;
emma2rh_sync();
}
static inline u16 emma2rh_in16(u32 offset)
{
u16 val = *(volatile u16 *)(EMMA2RH_BASE | offset);
return val;
}
static inline void emma2rh_out8(u32 offset, u8 val)
{
*(volatile u8 *)(EMMA2RH_BASE | offset) = val;
emma2rh_sync();
}
static inline u8 emma2rh_in8(u32 offset)
{
u8 val = *(volatile u8 *)(EMMA2RH_BASE | offset);
return val;
}
/**
* IIC registers map
**/
/*---------------------------------------------------------------------------*/
/* CNT - Control register (00H R/W) */
/*---------------------------------------------------------------------------*/
#define SPT 0x00000001
#define STT 0x00000002
#define ACKE 0x00000004
#define WTIM 0x00000008
#define SPIE 0x00000010
#define WREL 0x00000020
#define LREL 0x00000040
#define IICE 0x00000080
#define CNT_RESERVED 0x000000ff /* reserved bit 0 */
#define I2C_EMMA_START (IICE | STT)
#define I2C_EMMA_STOP (IICE | SPT)
#define I2C_EMMA_REPSTART I2C_EMMA_START
/*---------------------------------------------------------------------------*/
/* STA - Status register (10H Read) */
/*---------------------------------------------------------------------------*/
#define MSTS 0x00000080
#define ALD 0x00000040
#define EXC 0x00000020
#define COI 0x00000010
#define TRC 0x00000008
#define ACKD 0x00000004
#define STD 0x00000002
#define SPD 0x00000001
/*---------------------------------------------------------------------------*/
/* CSEL - Clock select register (20H R/W) */
/*---------------------------------------------------------------------------*/
#define FCL 0x00000080
#define ND50 0x00000040
#define CLD 0x00000020
#define DAD 0x00000010
#define SMC 0x00000008
#define DFC 0x00000004
#define CL 0x00000003
#define CSEL_RESERVED 0x000000ff /* reserved bit 0 */
#define FAST397 0x0000008b
#define FAST297 0x0000008a
#define FAST347 0x0000000b
#define FAST260 0x0000000a
#define FAST130 0x00000008
#define STANDARD108 0x00000083
#define STANDARD83 0x00000082
#define STANDARD95 0x00000003
#define STANDARD73 0x00000002
#define STANDARD36 0x00000001
#define STANDARD71 0x00000000
/*---------------------------------------------------------------------------*/
/* SVA - Slave address register (30H R/W) */
/*---------------------------------------------------------------------------*/
#define SVA 0x000000fe
/*---------------------------------------------------------------------------*/
/* SHR - Shift register (40H R/W) */
/*---------------------------------------------------------------------------*/
#define SR 0x000000ff
/*---------------------------------------------------------------------------*/
/* INT - Interrupt register (50H R/W) */
/* INTM - Interrupt mask register (60H R/W) */
/*---------------------------------------------------------------------------*/
#define INTE0 0x00000001
/***********************************************************************
* I2C registers
***********************************************************************
*/
#define I2C_EMMA_CNT 0x00
#define I2C_EMMA_STA 0x10
#define I2C_EMMA_CSEL 0x20
#define I2C_EMMA_SVA 0x30
#define I2C_EMMA_SHR 0x40
#define I2C_EMMA_INT 0x50
#define I2C_EMMA_INTM 0x60
/*
* include the board dependent part
*/
#ifdef CONFIG_NEC_MARKEINS
#include <asm/emma/markeins.h>
#else
#error "Unknown EMMA2RH board!"
#endif
#endif /* __ASM_EMMA_EMMA2RH_H */

View file

@ -0,0 +1,41 @@
/*
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
* Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef MARKEINS_H
#define MARKEINS_H
#define NUM_EMMA2RH_IRQ_SW 32
#define NUM_EMMA2RH_IRQ_GPIO 32
#define EMMA2RH_SW_CASCADE (EMMA2RH_IRQ_INT(7) - EMMA2RH_IRQ_INT(0))
#define EMMA2RH_GPIO_CASCADE (EMMA2RH_IRQ_INT(46) - EMMA2RH_IRQ_INT(0))
#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ)
#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW)
#define EMMA2RH_SW_IRQ_INT(n) (EMMA2RH_SW_IRQ_BASE + (n))
#define MARKEINS_PCI_IRQ_INTA EMMA2RH_GPIO_IRQ_BASE+15
#define MARKEINS_PCI_IRQ_INTB EMMA2RH_GPIO_IRQ_BASE+16
#define MARKEINS_PCI_IRQ_INTC EMMA2RH_GPIO_IRQ_BASE+17
#define MARKEINS_PCI_IRQ_INTD EMMA2RH_GPIO_IRQ_BASE+18
#endif /* CONFIG_MARKEINS */

View file

@ -0,0 +1,17 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
*/
#ifndef _ASM_ERRNO_H
#define _ASM_ERRNO_H
#include <uapi/asm/errno.h>
/* The biggest error number defined here or in <linux/errno.h>. */
#define EMAXERRNO 1133
#endif /* _ASM_ERRNO_H */

View file

@ -0,0 +1,43 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014, Imagination Technologies Ltd.
*
* EVA functions for generic code
*/
#ifndef _ASM_EVA_H
#define _ASM_EVA_H
#include <kernel-entry-init.h>
#ifdef __ASSEMBLY__
#ifdef CONFIG_EVA
/*
* EVA early init code
*
* Platforms must define their own 'platform_eva_init' macro in
* their kernel-entry-init.h header. This macro usually does the
* platform specific configuration of the segmentation registers,
* and it is normally called from assembly code.
*
*/
.macro eva_init
platform_eva_init
.endm
#else
.macro eva_init
.endm
#endif /* CONFIG_EVA */
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,17 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#ifndef _ASM_EXEC_H
#define _ASM_EXEC_H
extern unsigned long arch_align_stack(unsigned long sp);
#endif /* _ASM_EXEC_H */

View file

@ -0,0 +1,19 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View file

@ -0,0 +1,82 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <asm/page.h>
#include <spaces.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_fixed_addresses
};
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#include <asm-generic/fixmap.h>
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
/*
* Called from pgtable_init()
*/
extern void fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base);
#endif

View file

@ -0,0 +1,56 @@
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 Ralf Baechle
*/
#ifndef _ASM_FLOPPY_H
#define _ASM_FLOPPY_H
#include <linux/dma-mapping.h>
static inline void fd_cacheflush(char * addr, long size)
{
dma_cache_sync(NULL, addr, size, DMA_BIDIRECTIONAL);
}
#define MAX_BUFFER_SECTORS 24
/*
* And on Mips's the CMOS info fails also ...
*
* FIXME: This information should come from the ARC configuration tree
* or wherever a particular machine has stored this ...
*/
#define FLOPPY0_TYPE fd_drive_type(0)
#define FLOPPY1_TYPE fd_drive_type(1)
#define FDC1 fd_getfdaddr1()
#define N_FDC 1 /* do you *really* want a second controller? */
#define N_DRIVE 8
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*
* On MIPSes using vdma, this actually means that *all* transfers go thru
* the * track buffer since 0x1000000 is always smaller than KSEG0/1.
* Actually this needs to be a bit more complicated since the so much different
* hardware available with MIPS CPUs ...
*/
#define CROSS_64KB(a, s) ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)
#define EXTRA_FLOPPY_PARAMS
#include <floppy.h>
#endif /* _ASM_FLOPPY_H */

View file

@ -0,0 +1,113 @@
/*
* Definitions for the FPU register names
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1999 Ralf Baechle
* Copyright (C) 1985 MIPS Computer Systems, Inc.
* Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_FPREGDEF_H
#define _ASM_FPREGDEF_H
#include <asm/sgidefs.h>
/*
* starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
* hardfloat and softfloat object files. The kernel build uses soft-float by
* default, so we also need to pass -msoft-float along to GAS if it supports it.
* But this in turn causes assembler errors in files which access hardfloat
* registers. We detect if GAS supports "-msoft-float" in the Makefile and
* explicitly put ".set hardfloat" where floating point registers are touched.
*/
#ifdef GAS_HAS_SET_HARDFLOAT
#define SET_HARDFLOAT .set hardfloat
#else
#define SET_HARDFLOAT
#endif
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
* These definitions only cover the R3000-ish 16/32 register model.
* But we're trying to be R3000 friendly anyway ...
*/
#define fv0 $f0 /* return value */
#define fv0f $f1
#define fv1 $f2
#define fv1f $f3
#define fa0 $f12 /* argument registers */
#define fa0f $f13
#define fa1 $f14
#define fa1f $f15
#define ft0 $f4 /* caller saved */
#define ft0f $f5
#define ft1 $f6
#define ft1f $f7
#define ft2 $f8
#define ft2f $f9
#define ft3 $f10
#define ft3f $f11
#define ft4 $f16
#define ft4f $f17
#define ft5 $f18
#define ft5f $f19
#define fs0 $f20 /* callee saved */
#define fs0f $f21
#define fs1 $f22
#define fs1f $f23
#define fs2 $f24
#define fs2f $f25
#define fs3 $f26
#define fs3f $f27
#define fs4 $f28
#define fs4f $f29
#define fs5 $f30
#define fs5f $f31
#define fcr31 $31 /* FPU status register */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
#define fv0 $f0 /* return value */
#define fv1 $f2
#define fa0 $f12 /* argument registers */
#define fa1 $f13
#define fa2 $f14
#define fa3 $f15
#define fa4 $f16
#define fa5 $f17
#define fa6 $f18
#define fa7 $f19
#define ft0 $f4 /* caller saved */
#define ft1 $f5
#define ft2 $f6
#define ft3 $f7
#define ft4 $f8
#define ft5 $f9
#define ft6 $f10
#define ft7 $f11
#define ft8 $f20
#define ft9 $f21
#define ft10 $f22
#define ft11 $f23
#define ft12 $f1
#define ft13 $f3
#define fs0 $f24 /* callee saved */
#define fs1 $f25
#define fs2 $f26
#define fs3 $f27
#define fs4 $f28
#define fs5 $f29
#define fs6 $f30
#define fs7 $f31
#define fcr31 $31
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#endif /* _ASM_FPREGDEF_H */

202
arch/mips/include/asm/fpu.h Normal file
View file

@ -0,0 +1,202 @@
/*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>
#include <asm/mipsregs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
#include <asm/processor.h>
#include <asm/current.h>
#include <asm/msa.h>
#ifdef CONFIG_MIPS_MT_FPAFF
#include <asm/mips_mt.h>
#endif
struct sigcontext;
struct sigcontext32;
extern void _init_fpu(void);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
/*
* This enum specifies a mode in which we want the FPU to operate, for cores
* which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT
* purposefully have the values 0 & 1 respectively, so that an integer value
* of Status.FR can be trivially casted to the corresponding enum fpu_mode.
*/
enum fpu_mode {
FPU_32BIT = 0, /* FR = 0 */
FPU_64BIT, /* FR = 1 */
FPU_AS_IS,
};
static inline int __enable_fpu(enum fpu_mode mode)
{
int fr;
switch (mode) {
case FPU_AS_IS:
/* just enable the FPU in its current mode */
set_c0_status(ST0_CU1);
enable_fpu_hazard();
return 0;
case FPU_64BIT:
#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
/* fall through */
case FPU_32BIT:
/* set CU1 & change FR appropriately */
fr = (int)mode;
change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
enable_fpu_hazard();
/* check FR has the desired value */
return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
default:
BUG();
}
return SIGFPE;
}
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
disable_fpu_hazard(); \
} while (0)
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void)
{
return test_thread_flag(TIF_USEDFPU);
}
static inline int is_fpu_owner(void)
{
return cpu_has_fpu && __is_fpu_owner();
}
static inline int __own_fpu(void)
{
enum fpu_mode mode;
int ret;
mode = !test_thread_flag(TIF_32BIT_FPREGS);
ret = __enable_fpu(mode);
if (ret)
return ret;
KSTK_STATUS(current) |= ST0_CU1;
if (mode == FPU_64BIT)
KSTK_STATUS(current) |= ST0_FR;
else /* mode == FPU_32BIT */
KSTK_STATUS(current) &= ~ST0_FR;
set_thread_flag(TIF_USEDFPU);
return 0;
}
static inline int own_fpu_inatomic(int restore)
{
int ret = 0;
if (cpu_has_fpu && !__is_fpu_owner()) {
ret = __own_fpu();
if (restore && !ret)
_restore_fp(current);
}
return ret;
}
static inline int own_fpu(int restore)
{
int ret;
preempt_disable();
ret = own_fpu_inatomic(restore);
preempt_enable();
return ret;
}
static inline void lose_fpu(int save)
{
preempt_disable();
if (is_msa_enabled()) {
if (save) {
save_msa(current);
current->thread.fpu.fcr31 =
read_32bit_cp1_register(CP1_STATUS);
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
__disable_fpu();
} else if (is_fpu_owner()) {
if (save)
_save_fp(current);
__disable_fpu();
}
KSTK_STATUS(current) &= ~ST0_CU1;
clear_thread_flag(TIF_USEDFPU);
preempt_enable();
}
static inline int init_fpu(void)
{
int ret = 0;
if (cpu_has_fpu) {
ret = __own_fpu();
if (!ret)
_init_fpu();
} else
fpu_emulator_init_fpu();
return ret;
}
static inline void save_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_save_fp(tsk);
}
static inline void restore_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_restore_fp(tsk);
}
static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
if (tsk == current) {
preempt_disable();
if (is_fpu_owner())
_save_fp(current);
preempt_enable();
}
return tsk->thread.fpu.fpr;
}
#endif /* _ASM_FPU_H */

View file

@ -0,0 +1,95 @@
/*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Further private data for which no space exists in mips_fpu_struct.
* This should be subsumed into the mips_fpu_struct structure as
* defined in processor.h as soon as the absurd wired absolute assembler
* offsets become dynamic at compile time.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#ifndef _ASM_FPU_EMULATOR_H
#define _ASM_FPU_EMULATOR_H
#include <linux/sched.h>
#include <asm/break.h>
#include <asm/thread_info.h>
#include <asm/inst.h>
#include <asm/local.h>
#include <asm/processor.h>
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
unsigned long emulated;
unsigned long loads;
unsigned long stores;
unsigned long cp1ops;
unsigned long cp1xops;
unsigned long errors;
unsigned long ieee754_inexact;
unsigned long ieee754_underflow;
unsigned long ieee754_overflow;
unsigned long ieee754_zerodiv;
unsigned long ieee754_invalidop;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#define MIPS_FPU_EMU_INC_STATS(M) \
do { \
preempt_disable(); \
__this_cpu_inc(fpuemustats.M); \
preempt_enable(); \
} while (0)
#else
#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
#endif /* CONFIG_DEBUG_FS */
extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
unsigned long cpc);
extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
int process_fpemu_return(int sig, void __user *fault_addr);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
/*
* Instruction inserted following the badinst to further tag the sequence
*/
#define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */
/*
* Break instruction with special math emu break code set
*/
#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16))
#define SIGNALLING_NAN 0x7ff800007ff80000LL
static inline void fpu_emulator_init_fpu(void)
{
struct task_struct *t = current;
int i;
t->thread.fpu.fcr31 = 0;
for (i = 0; i < 32; i++)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
#endif /* _ASM_FPU_EMULATOR_H */

View file

@ -0,0 +1,90 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*
* Copyright (C) 2009 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
#ifndef _ASM_MIPS_FTRACE_H
#define _ASM_MIPS_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void _mcount(void);
#define mcount _mcount
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
"1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
" li %[tmp_err], 0\n" \
"2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t" \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
: [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
" li %[tmp_err], 0\n" \
"2: .insn\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t"\
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_err] "=r" (error) \
: [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
#define safe_load_code(dst, src, error) \
safe_load(STR(lw), src, dst, error)
#define safe_store_code(src, dst, error) \
safe_store(STR(sw), src, dst, error)
#define safe_load_stack(dst, src, error) \
safe_load(STR(PTR_L), src, dst, error)
#define safe_store_stack(src, dst, error) \
safe_store(STR(PTR_S), src, dst, error)
#ifdef CONFIG_DYNAMIC_FTRACE
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _ASM_MIPS_FTRACE_H */

View file

@ -0,0 +1,207 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/asm-eva.h>
#include <asm/barrier.h>
#include <asm/errno.h>
#include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
if (cpu_has_llsc && R10000_LLSC_WAR) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set arch=r4000 \n" \
"1: ll %1, %4 # __futex_atomic_op \n" \
" .set mips0 \n" \
" " insn " \n" \
" .set arch=r4000 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
__WEAK_LLSC_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \
" j 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
: "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set arch=r4000 \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set mips0 \n" \
" " insn " \n" \
" .set arch=r4000 \n" \
"2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \
__WEAK_LLSC_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \
" j 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
: "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
: "memory"); \
} else \
ret = -ENOSYS; \
}
static inline int
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("addu $1, %1, %z5",
ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5",
ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5",
ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5",
ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set arch=r4000 \n"
"1: ll %1, %3 \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
" .set arch=r4000 \n"
"2: sc $1, %2 \n"
" beqzl $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %6 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "+r" (ret), "=&r" (val), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set arch=r4000 \n"
"1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
" .set arch=r4000 \n"
"2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %6 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "+r" (ret), "=&r" (val), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else
return -ENOSYS;
*uval = val;
return ret;
}
#endif
#endif /* _ASM_FUTEX_H */

View file

@ -0,0 +1,175 @@
/*
* ARCS hardware/memory inventory/configuration and system ID definitions.
*/
#ifndef _ASM_ARC_HINV_H
#define _ASM_ARC_HINV_H
#include <asm/sgidefs.h>
#include <asm/fw/arc/types.h>
/* configuration query defines */
typedef enum configclass {
SystemClass,
ProcessorClass,
CacheClass,
#ifndef _NT_PROM
MemoryClass,
AdapterClass,
ControllerClass,
PeripheralClass
#else /* _NT_PROM */
AdapterClass,
ControllerClass,
PeripheralClass,
MemoryClass
#endif /* _NT_PROM */
} CONFIGCLASS;
typedef enum configtype {
ARC,
CPU,
FPU,
PrimaryICache,
PrimaryDCache,
SecondaryICache,
SecondaryDCache,
SecondaryCache,
#ifndef _NT_PROM
Memory,
#endif
EISAAdapter,
TCAdapter,
SCSIAdapter,
DTIAdapter,
MultiFunctionAdapter,
DiskController,
TapeController,
CDROMController,
WORMController,
SerialController,
NetworkController,
DisplayController,
ParallelController,
PointerController,
KeyboardController,
AudioController,
OtherController,
DiskPeripheral,
FloppyDiskPeripheral,
TapePeripheral,
ModemPeripheral,
MonitorPeripheral,
PrinterPeripheral,
PointerPeripheral,
KeyboardPeripheral,
TerminalPeripheral,
LinePeripheral,
NetworkPeripheral,
#ifdef _NT_PROM
Memory,
#endif
OtherPeripheral,
/* new stuff for IP30 */
/* added without moving anything */
/* except ANONYMOUS. */
XTalkAdapter,
PCIAdapter,
GIOAdapter,
TPUAdapter,
Anonymous
} CONFIGTYPE;
typedef enum {
Failed = 1,
ReadOnly = 2,
Removable = 4,
ConsoleIn = 8,
ConsoleOut = 16,
Input = 32,
Output = 64
} IDENTIFIERFLAG;
#ifndef NULL /* for GetChild(NULL); */
#define NULL 0
#endif
union key_u {
struct {
#ifdef _MIPSEB
unsigned char c_bsize; /* block size in lines */
unsigned char c_lsize; /* line size in bytes/tag */
unsigned short c_size; /* cache size in 4K pages */
#else /* _MIPSEL */
unsigned short c_size; /* cache size in 4K pages */
unsigned char c_lsize; /* line size in bytes/tag */
unsigned char c_bsize; /* block size in lines */
#endif /* _MIPSEL */
} cache;
ULONG FullKey;
};
#if _MIPS_SIM == _MIPS_SIM_ABI64
#define SGI_ARCS_VERS 64 /* sgi 64-bit version */
#define SGI_ARCS_REV 0 /* rev .00 */
#else
#define SGI_ARCS_VERS 1 /* first version */
#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
#endif
typedef struct component {
CONFIGCLASS Class;
CONFIGTYPE Type;
IDENTIFIERFLAG Flags;
USHORT Version;
USHORT Revision;
ULONG Key;
ULONG AffinityMask;
ULONG ConfigurationDataSize;
ULONG IdentifierLength;
char *Identifier;
} COMPONENT;
/* internal structure that holds pathname parsing data */
struct cfgdata {
char *name; /* full name */
int minlen; /* minimum length to match */
CONFIGTYPE type; /* type of token */
};
/* System ID */
typedef struct systemid {
CHAR VendorId[8];
CHAR ProductId[8];
} SYSTEMID;
/* memory query functions */
typedef enum memorytype {
ExceptionBlock,
SPBPage, /* ARCS == SystemParameterBlock */
#ifndef _NT_PROM
FreeContiguous,
FreeMemory,
BadMemory,
LoadedProgram,
FirmwareTemporary,
FirmwarePermanent
#else /* _NT_PROM */
FreeMemory,
BadMemory,
LoadedProgram,
FirmwareTemporary,
FirmwarePermanent,
FreeContiguous
#endif /* _NT_PROM */
} MEMORYTYPE;
typedef struct memorydescriptor {
MEMORYTYPE Type;
LONG BasePage;
LONG PageCount;
} MEMORYDESCRIPTOR;
#endif /* _ASM_ARC_HINV_H */

View file

@ -0,0 +1,86 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright 1999 Ralf Baechle (ralf@gnu.org)
* Copyright 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_ARC_TYPES_H
#define _ASM_ARC_TYPES_H
#ifdef CONFIG_FW_ARC32
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
typedef long LONG __attribute__ ((__mode__ (__SI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__)));
typedef void VOID;
/* The pointer types. Note that we're using a 64-bit compiler but all
pointer in the ARC structures are only 32-bit, so we need some disgusting
workarounds. Keep your vomit bag handy. */
typedef LONG _PCHAR;
typedef LONG _PSHORT;
typedef LONG _PLARGE_INTEGER;
typedef LONG _PLONG;
typedef LONG _PUCHAR;
typedef LONG _PUSHORT;
typedef LONG _PULONG;
typedef LONG _PVOID;
#endif /* CONFIG_FW_ARC32 */
#ifdef CONFIG_FW_ARC64
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
typedef long LONG __attribute__ ((__mode__ (__DI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__DI__)));
typedef void VOID;
/* The pointer types. We're 64-bit and the firmware is also 64-bit, so
live is sane ... */
typedef CHAR *_PCHAR;
typedef SHORT *_PSHORT;
typedef LARGE_INTEGER *_PLARGE_INTEGER;
typedef LONG *_PLONG;
typedef UCHAR *_PUCHAR;
typedef USHORT *_PUSHORT;
typedef ULONG *_PULONG;
typedef VOID *_PVOID;
#endif /* CONFIG_FW_ARC64 */
typedef CHAR *PCHAR;
typedef SHORT *PSHORT;
typedef LARGE_INTEGER *PLARGE_INTEGER;
typedef LONG *PLONG;
typedef UCHAR *PUCHAR;
typedef USHORT *PUSHORT;
typedef ULONG *PULONG;
typedef VOID *PVOID;
/*
* Return type of ArcGetDisplayStatus()
*/
typedef struct {
USHORT CursorXPosition;
USHORT CursorYPosition;
USHORT CursorMaxXPosition;
USHORT CursorMaxYPosition;
USHORT ForegroundColor;
USHORT BackgroundColor;
UCHAR HighIntensity;
UCHAR Underscored;
UCHAR ReverseVideo;
} DISPLAY_STATUS;
#endif /* _ASM_ARC_TYPES_H */

View file

@ -0,0 +1,122 @@
/*
* Copyright (C) 2000, 2001, 2002 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* Broadcom Common Firmware Environment (CFE)
*
* This file contains declarations for doing callbacks to
* cfe from an application. It should be the only header
* needed by the application to use this library
*
* Authors: Mitch Lichtenberg, Chris Demetriou
*/
#ifndef CFE_API_H
#define CFE_API_H
#include <linux/types.h>
#include <linux/string.h>
typedef long intptr_t;
/*
* Constants
*/
/* Seal indicating CFE's presence, passed to user program. */
#define CFE_EPTSEAL 0x43464531
#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
#define CFE_MI_AVAILABLE 1 /* memory is available */
#define CFE_FLG_WARMSTART 0x00000001
#define CFE_FLG_FULL_ARENA 0x00000001
#define CFE_FLG_ENV_PERMANENT 0x00000001
#define CFE_CPU_CMD_START 1
#define CFE_CPU_CMD_STOP 0
#define CFE_STDHANDLE_CONSOLE 0
#define CFE_DEV_NETWORK 1
#define CFE_DEV_DISK 2
#define CFE_DEV_FLASH 3
#define CFE_DEV_SERIAL 4
#define CFE_DEV_CPU 5
#define CFE_DEV_NVRAM 6
#define CFE_DEV_CLOCK 7
#define CFE_DEV_OTHER 8
#define CFE_DEV_MASK 0x0F
#define CFE_CACHE_FLUSH_D 1
#define CFE_CACHE_INVAL_I 2
#define CFE_CACHE_INVAL_D 4
#define CFE_CACHE_INVAL_L2 8
#define CFE_FWI_64BIT 0x00000001
#define CFE_FWI_32BIT 0x00000002
#define CFE_FWI_RELOC 0x00000004
#define CFE_FWI_UNCACHED 0x00000008
#define CFE_FWI_MULTICPU 0x00000010
#define CFE_FWI_FUNCSIM 0x00000020
#define CFE_FWI_RTLSIM 0x00000040
typedef struct {
int64_t fwi_version; /* major, minor, eco version */
int64_t fwi_totalmem; /* total installed mem */
int64_t fwi_flags; /* various flags */
int64_t fwi_boardid; /* board ID */
int64_t fwi_bootarea_va; /* VA of boot area */
int64_t fwi_bootarea_pa; /* PA of boot area */
int64_t fwi_bootarea_size; /* size of boot area */
} cfe_fwinfo_t;
/*
* Defines and prototypes for functions which take no arguments.
*/
int64_t cfe_getticks(void);
/*
* Defines and prototypes for the rest of the functions.
*/
int cfe_close(int handle);
int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1);
int cfe_cpu_stop(int cpu);
int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen);
int cfe_enummem(int idx, int flags, uint64_t * start, uint64_t * length,
uint64_t * type);
int cfe_exit(int warm, int status);
int cfe_flushcache(int flg);
int cfe_getdevinfo(char *name);
int cfe_getenv(char *name, char *dest, int destlen);
int cfe_getfwinfo(cfe_fwinfo_t * info);
int cfe_getstdhandle(int flg);
int cfe_init(uint64_t handle, uint64_t ept);
int cfe_inpstat(int handle);
int cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer,
int length, int *retlen, uint64_t offset);
int cfe_open(char *name);
int cfe_read(int handle, unsigned char *buffer, int length);
int cfe_readblk(int handle, int64_t offset, unsigned char *buffer,
int length);
int cfe_setenv(char *name, char *val);
int cfe_write(int handle, const char *buffer, int length);
int cfe_writeblk(int handle, int64_t offset, const char *buffer,
int length);
#endif /* CFE_API_H */

View file

@ -0,0 +1,80 @@
/*
* Copyright (C) 2000, 2001, 2002 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* Broadcom Common Firmware Environment (CFE)
*
* CFE's global error code list is here.
*
* Author: Mitch Lichtenberg
*/
#define CFE_OK 0
#define CFE_ERR -1 /* generic error */
#define CFE_ERR_INV_COMMAND -2
#define CFE_ERR_EOF -3
#define CFE_ERR_IOERR -4
#define CFE_ERR_NOMEM -5
#define CFE_ERR_DEVNOTFOUND -6
#define CFE_ERR_DEVOPEN -7
#define CFE_ERR_INV_PARAM -8
#define CFE_ERR_ENVNOTFOUND -9
#define CFE_ERR_ENVREADONLY -10
#define CFE_ERR_NOTELF -11
#define CFE_ERR_NOT32BIT -12
#define CFE_ERR_WRONGENDIAN -13
#define CFE_ERR_BADELFVERS -14
#define CFE_ERR_NOTMIPS -15
#define CFE_ERR_BADELFFMT -16
#define CFE_ERR_BADADDR -17
#define CFE_ERR_FILENOTFOUND -18
#define CFE_ERR_UNSUPPORTED -19
#define CFE_ERR_HOSTUNKNOWN -20
#define CFE_ERR_TIMEOUT -21
#define CFE_ERR_PROTOCOLERR -22
#define CFE_ERR_NETDOWN -23
#define CFE_ERR_NONAMESERVER -24
#define CFE_ERR_NOHANDLES -25
#define CFE_ERR_ALREADYBOUND -26
#define CFE_ERR_CANNOTSET -27
#define CFE_ERR_NOMORE -28
#define CFE_ERR_BADFILESYS -29
#define CFE_ERR_FSNOTAVAIL -30
#define CFE_ERR_INVBOOTBLOCK -31
#define CFE_ERR_WRONGDEVTYPE -32
#define CFE_ERR_BBCHECKSUM -33
#define CFE_ERR_BOOTPROGCHKSUM -34
#define CFE_ERR_LDRNOTAVAIL -35
#define CFE_ERR_NOTREADY -36
#define CFE_ERR_GETMEM -37
#define CFE_ERR_SETMEM -38
#define CFE_ERR_NOTCONN -39
#define CFE_ERR_ADDRINUSE -40

View file

@ -0,0 +1,47 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 MIPS Technologies, Inc.
*/
#ifndef __ASM_FW_H_
#define __ASM_FW_H_
#include <asm/bootinfo.h> /* For cleaner code... */
enum fw_memtypes {
fw_dontuse,
fw_code,
fw_free,
};
typedef struct {
unsigned long base; /* Within KSEG0 */
unsigned int size; /* bytes */
enum fw_memtypes type; /* fw_memtypes */
} fw_memblock_t;
/* Maximum number of memory block descriptors. */
#define FW_MAX_MEMBLOCKS 32
extern int fw_argc;
extern int *_fw_argv;
extern int *_fw_envp;
/*
* Most firmware like YAMON, PMON, etc. pass arguments and environment
* variables as 32-bit pointers. These take care of sign extension.
*/
#define fw_argv(index) ((char *)(long)_fw_argv[(index)])
#define fw_envp(index) ((char *)(long)_fw_envp[(index)])
extern void fw_init_cmdline(void);
extern char *fw_getcmdline(void);
extern fw_memblock_t *fw_getmdesc(int);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
extern void fw_init_early_console(char port);
#endif /* __ASM_FW_H_ */

384
arch/mips/include/asm/gic.h Normal file
View file

@ -0,0 +1,384 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 07 MIPS Technologies, Inc.
*
* GIC Register Definitions
*
*/
#ifndef _ASM_GICREGS_H
#define _ASM_GICREGS_H
#include <linux/bitmap.h>
#include <linux/threads.h>
#include <irq.h>
#undef GICISBYTELITTLEENDIAN
/* Constants */
#define GIC_POL_POS 1
#define GIC_POL_NEG 0
#define GIC_TRIG_EDGE 1
#define GIC_TRIG_LEVEL 0
#define MSK(n) ((1 << (n)) - 1)
#define REG32(addr) (*(volatile unsigned int *) (addr))
#define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS)
#define REGP(base, phys) REG32((unsigned long)(base) + (phys))
/* Accessors */
#define GIC_REG(segment, offset) \
REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
#define GIC_REG_ADDR(segment, offset) \
REG32(_gic_base + segment##_##SECTION_OFS + offset)
#define GIC_ABS_REG(segment, offset) \
(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
#define GIC_REG_ABS_ADDR(segment, offset) \
(_gic_base + segment##_##SECTION_OFS + offset)
#ifdef GICISBYTELITTLEENDIAN
#define GICREAD(reg, data) ((data) = (reg), (data) = le32_to_cpu(data))
#define GICWRITE(reg, data) ((reg) = cpu_to_le32(data))
#else
#define GICREAD(reg, data) ((data) = (reg))
#define GICWRITE(reg, data) ((reg) = (data))
#endif
#define GICBIS(reg, mask, bits) \
do { u32 data; \
GICREAD(reg, data); \
data &= ~(mask); \
data |= ((bits) & (mask)); \
GICWRITE((reg), data); \
} while (0)
/* GIC Address Space */
#define SHARED_SECTION_OFS 0x0000
#define SHARED_SECTION_SIZE 0x8000
#define VPE_LOCAL_SECTION_OFS 0x8000
#define VPE_LOCAL_SECTION_SIZE 0x4000
#define VPE_OTHER_SECTION_OFS 0xc000
#define VPE_OTHER_SECTION_SIZE 0x4000
#define USM_VISIBLE_SECTION_OFS 0x10000
#define USM_VISIBLE_SECTION_SIZE 0x10000
/* Register Map for Shared Section */
#define GIC_SH_CONFIG_OFS 0x0000
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
/* Interrupt Polarity */
#define GIC_SH_POL_31_0_OFS 0x0100
#define GIC_SH_POL_63_32_OFS 0x0104
#define GIC_SH_POL_95_64_OFS 0x0108
#define GIC_SH_POL_127_96_OFS 0x010c
#define GIC_SH_POL_159_128_OFS 0x0110
#define GIC_SH_POL_191_160_OFS 0x0114
#define GIC_SH_POL_223_192_OFS 0x0118
#define GIC_SH_POL_255_224_OFS 0x011c
/* Edge/Level Triggering */
#define GIC_SH_TRIG_31_0_OFS 0x0180
#define GIC_SH_TRIG_63_32_OFS 0x0184
#define GIC_SH_TRIG_95_64_OFS 0x0188
#define GIC_SH_TRIG_127_96_OFS 0x018c
#define GIC_SH_TRIG_159_128_OFS 0x0190
#define GIC_SH_TRIG_191_160_OFS 0x0194
#define GIC_SH_TRIG_223_192_OFS 0x0198
#define GIC_SH_TRIG_255_224_OFS 0x019c
/* Dual Edge Triggering */
#define GIC_SH_DUAL_31_0_OFS 0x0200
#define GIC_SH_DUAL_63_32_OFS 0x0204
#define GIC_SH_DUAL_95_64_OFS 0x0208
#define GIC_SH_DUAL_127_96_OFS 0x020c
#define GIC_SH_DUAL_159_128_OFS 0x0210
#define GIC_SH_DUAL_191_160_OFS 0x0214
#define GIC_SH_DUAL_223_192_OFS 0x0218
#define GIC_SH_DUAL_255_224_OFS 0x021c
/* Set/Clear corresponding bit in Edge Detect Register */
#define GIC_SH_WEDGE_OFS 0x0280
/* Reset Mask - Disables Interrupt */
#define GIC_SH_RMASK_31_0_OFS 0x0300
#define GIC_SH_RMASK_63_32_OFS 0x0304
#define GIC_SH_RMASK_95_64_OFS 0x0308
#define GIC_SH_RMASK_127_96_OFS 0x030c
#define GIC_SH_RMASK_159_128_OFS 0x0310
#define GIC_SH_RMASK_191_160_OFS 0x0314
#define GIC_SH_RMASK_223_192_OFS 0x0318
#define GIC_SH_RMASK_255_224_OFS 0x031c
/* Set Mask (WO) - Enables Interrupt */
#define GIC_SH_SMASK_31_0_OFS 0x0380
#define GIC_SH_SMASK_63_32_OFS 0x0384
#define GIC_SH_SMASK_95_64_OFS 0x0388
#define GIC_SH_SMASK_127_96_OFS 0x038c
#define GIC_SH_SMASK_159_128_OFS 0x0390
#define GIC_SH_SMASK_191_160_OFS 0x0394
#define GIC_SH_SMASK_223_192_OFS 0x0398
#define GIC_SH_SMASK_255_224_OFS 0x039c
/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
#define GIC_SH_MASK_31_0_OFS 0x0400
#define GIC_SH_MASK_63_32_OFS 0x0404
#define GIC_SH_MASK_95_64_OFS 0x0408
#define GIC_SH_MASK_127_96_OFS 0x040c
#define GIC_SH_MASK_159_128_OFS 0x0410
#define GIC_SH_MASK_191_160_OFS 0x0414
#define GIC_SH_MASK_223_192_OFS 0x0418
#define GIC_SH_MASK_255_224_OFS 0x041c
/* Pending Global Interrupts (RO) */
#define GIC_SH_PEND_31_0_OFS 0x0480
#define GIC_SH_PEND_63_32_OFS 0x0484
#define GIC_SH_PEND_95_64_OFS 0x0488
#define GIC_SH_PEND_127_96_OFS 0x048c
#define GIC_SH_PEND_159_128_OFS 0x0490
#define GIC_SH_PEND_191_160_OFS 0x0494
#define GIC_SH_PEND_223_192_OFS 0x0498
#define GIC_SH_PEND_255_224_OFS 0x049c
#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
/* Maps Interrupt X to a Pin */
#define GIC_SH_MAP_TO_PIN(intr) \
(GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
/* Maps Interrupt X to a VPE */
#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
(GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4))
#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
#define GIC_INTR_OFS(intr) (((intr) / 32)*4)
#define GIC_INTR_BIT(intr) ((intr) % 32)
/* Polarity : Reset Value is always 0 */
#define GIC_SH_SET_POLARITY_OFS 0x0100
#define GIC_SET_POLARITY(intr, pol) \
GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + \
GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
(pol) << GIC_INTR_BIT(intr))
/* Triggering : Reset Value is always 0 */
#define GIC_SH_SET_TRIGGER_OFS 0x0180
#define GIC_SET_TRIGGER(intr, trig) \
GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + \
GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
(trig) << GIC_INTR_BIT(intr))
/* Mask manipulation */
#define GIC_SH_SMASK_OFS 0x0380
#define GIC_SET_INTR_MASK(intr) \
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + \
GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
#define GIC_SH_RMASK_OFS 0x0300
#define GIC_CLR_INTR_MASK(intr) \
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + \
GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
/* Register Map for Local Section */
#define GIC_VPE_CTL_OFS 0x0000
#define GIC_VPE_PEND_OFS 0x0004
#define GIC_VPE_MASK_OFS 0x0008
#define GIC_VPE_RMASK_OFS 0x000c
#define GIC_VPE_SMASK_OFS 0x0010
#define GIC_VPE_WD_MAP_OFS 0x0040
#define GIC_VPE_COMPARE_MAP_OFS 0x0044
#define GIC_VPE_TIMER_MAP_OFS 0x0048
#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
#define GIC_VPE_SWINT0_MAP_OFS 0x0054
#define GIC_VPE_SWINT1_MAP_OFS 0x0058
#define GIC_VPE_OTHER_ADDR_OFS 0x0080
#define GIC_VPE_WD_CONFIG0_OFS 0x0090
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
#define GIC_VPE_EIC_SS(intr) \
(GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
#define GIC_VPE_EIC_VEC_BASE 0x0800
#define GIC_VPE_EIC_VEC(intr) \
(GIC_VPE_EIC_VEC_BASE + (4 * intr))
#define GIC_VPE_TENABLE_NMI_OFS 0x1000
#define GIC_VPE_TENABLE_YQ_OFS 0x1004
#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
/* User Mode Visible Section Register Map */
#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
/* Masks */
#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
#define GIC_SH_CONFIG_COUNTBITS_SHF 24
#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
#define GIC_SH_CONFIG_NUMINTRS_SHF 16
#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
#define GIC_SH_CONFIG_NUMVPES_SHF 0
#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
#define GIC_SH_WEDGE_SET(intr) (intr | (0x1 << 31))
#define GIC_SH_WEDGE_CLR(intr) (intr & ~(0x1 << 31))
#define GIC_MAP_TO_PIN_SHF 31
#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
#define GIC_MAP_TO_NMI_SHF 30
#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
#define GIC_MAP_TO_YQ_SHF 29
#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
#define GIC_MAP_SHF 0
#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
/* GIC_VPE_CTL Masks */
#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
#define GIC_VPE_CTL_EIC_MODE_SHF 0
#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
/* GIC_VPE_PEND Masks */
#define GIC_VPE_PEND_WD_SHF 0
#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
#define GIC_VPE_PEND_CMP_SHF 1
#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
#define GIC_VPE_PEND_TIMER_SHF 2
#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
#define GIC_VPE_PEND_PERFCOUNT_SHF 3
#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
#define GIC_VPE_PEND_SWINT0_SHF 4
#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
#define GIC_VPE_PEND_SWINT1_SHF 5
#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
/* GIC_VPE_RMASK Masks */
#define GIC_VPE_RMASK_WD_SHF 0
#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
#define GIC_VPE_RMASK_CMP_SHF 1
#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
#define GIC_VPE_RMASK_TIMER_SHF 2
#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
#define GIC_VPE_RMASK_PERFCNT_SHF 3
#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
#define GIC_VPE_RMASK_SWINT0_SHF 4
#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
#define GIC_VPE_RMASK_SWINT1_SHF 5
#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
/* GIC_VPE_SMASK Masks */
#define GIC_VPE_SMASK_WD_SHF 0
#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
#define GIC_VPE_SMASK_CMP_SHF 1
#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
#define GIC_VPE_SMASK_TIMER_SHF 2
#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
#define GIC_VPE_SMASK_PERFCNT_SHF 3
#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
#define GIC_VPE_SMASK_SWINT0_SHF 4
#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
/*
* Set the Mapping of Interrupt X to a VPE.
*/
#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \
GIC_SH_MAP_TO_VPE_REG_BIT(vpe))
/*
* Interrupt Meta-data specification. The ipiflag helps
* in building ipi_map.
*/
struct gic_intr_map {
unsigned int cpunum; /* Directed to this CPU */
#define GIC_UNUSED 0xdead /* Dummy data */
unsigned int pin; /* Directed to this Pin */
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
unsigned int flags; /* Misc flags */
#define GIC_FLAG_TRANSPARENT 0x01
};
/*
* This is only used in EIC mode. This helps to figure out which
* shared interrupts we need to process when we get a vector interrupt.
*/
#define GIC_MAX_SHARED_INTR 0x5
struct gic_shared_intr_map {
unsigned int num_shared_intr;
unsigned int intr_list[GIC_MAX_SHARED_INTR];
unsigned int local_intr_mask;
};
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
#define GIC_CPU_INT1 1 /* . */
#define GIC_CPU_INT2 2 /* . */
#define GIC_CPU_INT3 3 /* . */
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
/* Local GIC interrupts. */
#define GIC_INT_TMR (GIC_CPU_INT5)
#define GIC_INT_PERFCTR (GIC_CPU_INT5)
/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
#define GIC_CPU_TO_VEC_OFFSET (2)
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
#include <linux/clocksource.h>
#include <linux/irq.h>
extern unsigned int gic_present;
extern unsigned int gic_frequency;
extern unsigned long _gic_base;
extern unsigned int gic_irq_base;
extern unsigned int gic_irq_flags[];
extern struct gic_shared_intr_map gic_shared_intr_map[];
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
unsigned int intrmap_size, unsigned int irqbase);
extern void gic_clocksource_init(unsigned int);
extern unsigned int gic_compare_int (void);
extern cycle_t gic_read_count(void);
extern cycle_t gic_read_compare(void);
extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern void gic_bind_eic_interrupt(int irq, int set);
extern unsigned int gic_get_timer_pending(void);
extern void gic_get_int_mask(unsigned long *dst, const unsigned long *src);
extern unsigned int gic_get_int(void);
extern void gic_enable_interrupt(int irq_vec);
extern void gic_disable_interrupt(int irq_vec);
extern void gic_irq_ack(struct irq_data *d);
extern void gic_finish_irq(struct irq_data *d);
extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
#endif /* _ASM_GICREGS_H */

View file

@ -0,0 +1,56 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
struct gio_device_id {
__u8 id;
};
struct gio_device {
struct device dev;
struct resource resource;
unsigned int irq;
unsigned int slotno;
const char *name;
struct gio_device_id id;
unsigned id32:1;
unsigned gio64:1;
};
#define to_gio_device(d) container_of(d, struct gio_device, dev)
struct gio_driver {
const char *name;
struct module *owner;
const struct gio_device_id *id_table;
int (*probe)(struct gio_device *, const struct gio_device_id *);
void (*remove)(struct gio_device *);
int (*suspend)(struct gio_device *, pm_message_t);
int (*resume)(struct gio_device *);
void (*shutdown)(struct gio_device *);
struct device_driver driver;
};
#define to_gio_driver(drv) container_of(drv, struct gio_driver, driver)
extern const struct gio_device_id *gio_match_device(const struct gio_device_id *,
const struct gio_device *);
extern struct gio_device *gio_dev_get(struct gio_device *);
extern void gio_dev_put(struct gio_device *);
extern int gio_device_register(struct gio_device *);
extern void gio_device_unregister(struct gio_device *);
extern void gio_release_dev(struct device *);
static inline void gio_device_free(struct gio_device *dev)
{
gio_release_dev(&dev->dev);
}
extern int gio_register_driver(struct gio_driver *);
extern void gio_unregister_driver(struct gio_driver *);
#define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
#define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
extern void gio_set_master(struct gio_device *);

View file

@ -0,0 +1,6 @@
#ifndef __ASM_MIPS_GPIO_H
#define __ASM_MIPS_GPIO_H
#include <gpio.h>
#endif /* __ASM_MIPS_GPIO_H */

View file

@ -0,0 +1,578 @@
/*
* Copyright (C) 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#ifndef _ASM_GT64120_H
#define _ASM_GT64120_H
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#define MSK(n) ((1 << (n)) - 1)
/*
* Register offset addresses
*/
/* CPU Configuration. */
#define GT_CPU_OFS 0x000
#define GT_MULTI_OFS 0x120
/* CPU Address Decode. */
#define GT_SCS10LD_OFS 0x008
#define GT_SCS10HD_OFS 0x010
#define GT_SCS32LD_OFS 0x018
#define GT_SCS32HD_OFS 0x020
#define GT_CS20LD_OFS 0x028
#define GT_CS20HD_OFS 0x030
#define GT_CS3BOOTLD_OFS 0x038
#define GT_CS3BOOTHD_OFS 0x040
#define GT_PCI0IOLD_OFS 0x048
#define GT_PCI0IOHD_OFS 0x050
#define GT_PCI0M0LD_OFS 0x058
#define GT_PCI0M0HD_OFS 0x060
#define GT_ISD_OFS 0x068
#define GT_PCI0M1LD_OFS 0x080
#define GT_PCI0M1HD_OFS 0x088
#define GT_PCI1IOLD_OFS 0x090
#define GT_PCI1IOHD_OFS 0x098
#define GT_PCI1M0LD_OFS 0x0a0
#define GT_PCI1M0HD_OFS 0x0a8
#define GT_PCI1M1LD_OFS 0x0b0
#define GT_PCI1M1HD_OFS 0x0b8
#define GT_PCI1M1LD_OFS 0x0b0
#define GT_PCI1M1HD_OFS 0x0b8
#define GT_SCS10AR_OFS 0x0d0
#define GT_SCS32AR_OFS 0x0d8
#define GT_CS20R_OFS 0x0e0
#define GT_CS3BOOTR_OFS 0x0e8
#define GT_PCI0IOREMAP_OFS 0x0f0
#define GT_PCI0M0REMAP_OFS 0x0f8
#define GT_PCI0M1REMAP_OFS 0x100
#define GT_PCI1IOREMAP_OFS 0x108
#define GT_PCI1M0REMAP_OFS 0x110
#define GT_PCI1M1REMAP_OFS 0x118
/* CPU Error Report. */
#define GT_CPUERR_ADDRLO_OFS 0x070
#define GT_CPUERR_ADDRHI_OFS 0x078
#define GT_CPUERR_DATALO_OFS 0x128 /* GT-64120A only */
#define GT_CPUERR_DATAHI_OFS 0x130 /* GT-64120A only */
#define GT_CPUERR_PARITY_OFS 0x138 /* GT-64120A only */
/* CPU Sync Barrier. */
#define GT_PCI0SYNC_OFS 0x0c0
#define GT_PCI1SYNC_OFS 0x0c8
/* SDRAM and Device Address Decode. */
#define GT_SCS0LD_OFS 0x400
#define GT_SCS0HD_OFS 0x404
#define GT_SCS1LD_OFS 0x408
#define GT_SCS1HD_OFS 0x40c
#define GT_SCS2LD_OFS 0x410
#define GT_SCS2HD_OFS 0x414
#define GT_SCS3LD_OFS 0x418
#define GT_SCS3HD_OFS 0x41c
#define GT_CS0LD_OFS 0x420
#define GT_CS0HD_OFS 0x424
#define GT_CS1LD_OFS 0x428
#define GT_CS1HD_OFS 0x42c
#define GT_CS2LD_OFS 0x430
#define GT_CS2HD_OFS 0x434
#define GT_CS3LD_OFS 0x438
#define GT_CS3HD_OFS 0x43c
#define GT_BOOTLD_OFS 0x440
#define GT_BOOTHD_OFS 0x444
#define GT_ADERR_OFS 0x470
/* SDRAM Configuration. */
#define GT_SDRAM_CFG_OFS 0x448
#define GT_SDRAM_OPMODE_OFS 0x474
#define GT_SDRAM_BM_OFS 0x478
#define GT_SDRAM_ADDRDECODE_OFS 0x47c
/* SDRAM Parameters. */
#define GT_SDRAM_B0_OFS 0x44c
#define GT_SDRAM_B1_OFS 0x450
#define GT_SDRAM_B2_OFS 0x454
#define GT_SDRAM_B3_OFS 0x458
/* Device Parameters. */
#define GT_DEV_B0_OFS 0x45c
#define GT_DEV_B1_OFS 0x460
#define GT_DEV_B2_OFS 0x464
#define GT_DEV_B3_OFS 0x468
#define GT_DEV_BOOT_OFS 0x46c
/* ECC. */
#define GT_ECC_ERRDATALO 0x480 /* GT-64120A only */
#define GT_ECC_ERRDATAHI 0x484 /* GT-64120A only */
#define GT_ECC_MEM 0x488 /* GT-64120A only */
#define GT_ECC_CALC 0x48c /* GT-64120A only */
#define GT_ECC_ERRADDR 0x490 /* GT-64120A only */
/* DMA Record. */
#define GT_DMA0_CNT_OFS 0x800
#define GT_DMA1_CNT_OFS 0x804
#define GT_DMA2_CNT_OFS 0x808
#define GT_DMA3_CNT_OFS 0x80c
#define GT_DMA0_SA_OFS 0x810
#define GT_DMA1_SA_OFS 0x814
#define GT_DMA2_SA_OFS 0x818
#define GT_DMA3_SA_OFS 0x81c
#define GT_DMA0_DA_OFS 0x820
#define GT_DMA1_DA_OFS 0x824
#define GT_DMA2_DA_OFS 0x828
#define GT_DMA3_DA_OFS 0x82c
#define GT_DMA0_NEXT_OFS 0x830
#define GT_DMA1_NEXT_OFS 0x834
#define GT_DMA2_NEXT_OFS 0x838
#define GT_DMA3_NEXT_OFS 0x83c
#define GT_DMA0_CUR_OFS 0x870
#define GT_DMA1_CUR_OFS 0x874
#define GT_DMA2_CUR_OFS 0x878
#define GT_DMA3_CUR_OFS 0x87c
/* DMA Channel Control. */
#define GT_DMA0_CTRL_OFS 0x840
#define GT_DMA1_CTRL_OFS 0x844
#define GT_DMA2_CTRL_OFS 0x848
#define GT_DMA3_CTRL_OFS 0x84c
/* DMA Arbiter. */
#define GT_DMA_ARB_OFS 0x860
/* Timer/Counter. */
#define GT_TC0_OFS 0x850
#define GT_TC1_OFS 0x854
#define GT_TC2_OFS 0x858
#define GT_TC3_OFS 0x85c
#define GT_TC_CONTROL_OFS 0x864
/* PCI Internal. */
#define GT_PCI0_CMD_OFS 0xc00
#define GT_PCI0_TOR_OFS 0xc04
#define GT_PCI0_BS_SCS10_OFS 0xc08
#define GT_PCI0_BS_SCS32_OFS 0xc0c
#define GT_PCI0_BS_CS20_OFS 0xc10
#define GT_PCI0_BS_CS3BT_OFS 0xc14
#define GT_PCI1_IACK_OFS 0xc30
#define GT_PCI0_IACK_OFS 0xc34
#define GT_PCI0_BARE_OFS 0xc3c
#define GT_PCI0_PREFMBR_OFS 0xc40
#define GT_PCI0_SCS10_BAR_OFS 0xc48
#define GT_PCI0_SCS32_BAR_OFS 0xc4c
#define GT_PCI0_CS20_BAR_OFS 0xc50
#define GT_PCI0_CS3BT_BAR_OFS 0xc54
#define GT_PCI0_SSCS10_BAR_OFS 0xc58
#define GT_PCI0_SSCS32_BAR_OFS 0xc5c
#define GT_PCI0_SCS3BT_BAR_OFS 0xc64
#define GT_PCI1_CMD_OFS 0xc80
#define GT_PCI1_TOR_OFS 0xc84
#define GT_PCI1_BS_SCS10_OFS 0xc88
#define GT_PCI1_BS_SCS32_OFS 0xc8c
#define GT_PCI1_BS_CS20_OFS 0xc90
#define GT_PCI1_BS_CS3BT_OFS 0xc94
#define GT_PCI1_BARE_OFS 0xcbc
#define GT_PCI1_PREFMBR_OFS 0xcc0
#define GT_PCI1_SCS10_BAR_OFS 0xcc8
#define GT_PCI1_SCS32_BAR_OFS 0xccc
#define GT_PCI1_CS20_BAR_OFS 0xcd0
#define GT_PCI1_CS3BT_BAR_OFS 0xcd4
#define GT_PCI1_SSCS10_BAR_OFS 0xcd8
#define GT_PCI1_SSCS32_BAR_OFS 0xcdc
#define GT_PCI1_SCS3BT_BAR_OFS 0xce4
#define GT_PCI1_CFGADDR_OFS 0xcf0
#define GT_PCI1_CFGDATA_OFS 0xcf4
#define GT_PCI0_CFGADDR_OFS 0xcf8
#define GT_PCI0_CFGDATA_OFS 0xcfc
/* Interrupts. */
#define GT_INTRCAUSE_OFS 0xc18
#define GT_INTRMASK_OFS 0xc1c
#define GT_PCI0_ICMASK_OFS 0xc24
#define GT_PCI0_SERR0MASK_OFS 0xc28
#define GT_CPU_INTSEL_OFS 0xc70
#define GT_PCI0_INTSEL_OFS 0xc74
#define GT_HINTRCAUSE_OFS 0xc98
#define GT_HINTRMASK_OFS 0xc9c
#define GT_PCI0_HICMASK_OFS 0xca4
#define GT_PCI1_SERR1MASK_OFS 0xca8
/*
* I2O Support Registers
*/
#define INBOUND_MESSAGE_REGISTER0_PCI_SIDE 0x010
#define INBOUND_MESSAGE_REGISTER1_PCI_SIDE 0x014
#define OUTBOUND_MESSAGE_REGISTER0_PCI_SIDE 0x018
#define OUTBOUND_MESSAGE_REGISTER1_PCI_SIDE 0x01c
#define INBOUND_DOORBELL_REGISTER_PCI_SIDE 0x020
#define INBOUND_INTERRUPT_CAUSE_REGISTER_PCI_SIDE 0x024
#define INBOUND_INTERRUPT_MASK_REGISTER_PCI_SIDE 0x028
#define OUTBOUND_DOORBELL_REGISTER_PCI_SIDE 0x02c
#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_PCI_SIDE 0x030
#define OUTBOUND_INTERRUPT_MASK_REGISTER_PCI_SIDE 0x034
#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI_SIDE 0x040
#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI_SIDE 0x044
#define QUEUE_CONTROL_REGISTER_PCI_SIDE 0x050
#define QUEUE_BASE_ADDRESS_REGISTER_PCI_SIDE 0x054
#define INBOUND_FREE_HEAD_POINTER_REGISTER_PCI_SIDE 0x060
#define INBOUND_FREE_TAIL_POINTER_REGISTER_PCI_SIDE 0x064
#define INBOUND_POST_HEAD_POINTER_REGISTER_PCI_SIDE 0x068
#define INBOUND_POST_TAIL_POINTER_REGISTER_PCI_SIDE 0x06c
#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_PCI_SIDE 0x070
#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_PCI_SIDE 0x074
#define OUTBOUND_POST_HEAD_POINTER_REGISTER_PCI_SIDE 0x078
#define OUTBOUND_POST_TAIL_POINTER_REGISTER_PCI_SIDE 0x07c
#define INBOUND_MESSAGE_REGISTER0_CPU_SIDE 0x1c10
#define INBOUND_MESSAGE_REGISTER1_CPU_SIDE 0x1c14
#define OUTBOUND_MESSAGE_REGISTER0_CPU_SIDE 0x1c18
#define OUTBOUND_MESSAGE_REGISTER1_CPU_SIDE 0x1c1c
#define INBOUND_DOORBELL_REGISTER_CPU_SIDE 0x1c20
#define INBOUND_INTERRUPT_CAUSE_REGISTER_CPU_SIDE 0x1c24
#define INBOUND_INTERRUPT_MASK_REGISTER_CPU_SIDE 0x1c28
#define OUTBOUND_DOORBELL_REGISTER_CPU_SIDE 0x1c2c
#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_CPU_SIDE 0x1c30
#define OUTBOUND_INTERRUPT_MASK_REGISTER_CPU_SIDE 0x1c34
#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU_SIDE 0x1c40
#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU_SIDE 0x1c44
#define QUEUE_CONTROL_REGISTER_CPU_SIDE 0x1c50
#define QUEUE_BASE_ADDRESS_REGISTER_CPU_SIDE 0x1c54
#define INBOUND_FREE_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c60
#define INBOUND_FREE_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c64
#define INBOUND_POST_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c68
#define INBOUND_POST_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c6c
#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c70
#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c74
#define OUTBOUND_POST_HEAD_POINTER_REGISTER_CPU_SIDE 0x1c78
#define OUTBOUND_POST_TAIL_POINTER_REGISTER_CPU_SIDE 0x1c7c
/*
* Register encodings
*/
#define GT_CPU_ENDIAN_SHF 12
#define GT_CPU_ENDIAN_MSK (MSK(1) << GT_CPU_ENDIAN_SHF)
#define GT_CPU_ENDIAN_BIT GT_CPU_ENDIAN_MSK
#define GT_CPU_WR_SHF 16
#define GT_CPU_WR_MSK (MSK(1) << GT_CPU_WR_SHF)
#define GT_CPU_WR_BIT GT_CPU_WR_MSK
#define GT_CPU_WR_DXDXDXDX 0
#define GT_CPU_WR_DDDD 1
#define GT_PCI_DCRM_SHF 21
#define GT_PCI_LD_SHF 0
#define GT_PCI_LD_MSK (MSK(15) << GT_PCI_LD_SHF)
#define GT_PCI_HD_SHF 0
#define GT_PCI_HD_MSK (MSK(7) << GT_PCI_HD_SHF)
#define GT_PCI_REMAP_SHF 0
#define GT_PCI_REMAP_MSK (MSK(11) << GT_PCI_REMAP_SHF)
#define GT_CFGADDR_CFGEN_SHF 31
#define GT_CFGADDR_CFGEN_MSK (MSK(1) << GT_CFGADDR_CFGEN_SHF)
#define GT_CFGADDR_CFGEN_BIT GT_CFGADDR_CFGEN_MSK
#define GT_CFGADDR_BUSNUM_SHF 16
#define GT_CFGADDR_BUSNUM_MSK (MSK(8) << GT_CFGADDR_BUSNUM_SHF)
#define GT_CFGADDR_DEVNUM_SHF 11
#define GT_CFGADDR_DEVNUM_MSK (MSK(5) << GT_CFGADDR_DEVNUM_SHF)
#define GT_CFGADDR_FUNCNUM_SHF 8
#define GT_CFGADDR_FUNCNUM_MSK (MSK(3) << GT_CFGADDR_FUNCNUM_SHF)
#define GT_CFGADDR_REGNUM_SHF 2
#define GT_CFGADDR_REGNUM_MSK (MSK(6) << GT_CFGADDR_REGNUM_SHF)
#define GT_SDRAM_BM_ORDER_SHF 2
#define GT_SDRAM_BM_ORDER_MSK (MSK(1) << GT_SDRAM_BM_ORDER_SHF)
#define GT_SDRAM_BM_ORDER_BIT GT_SDRAM_BM_ORDER_MSK
#define GT_SDRAM_BM_ORDER_SUB 1
#define GT_SDRAM_BM_ORDER_LIN 0
#define GT_SDRAM_BM_RSVD_ALL1 0xffb
#define GT_SDRAM_ADDRDECODE_ADDR_SHF 0
#define GT_SDRAM_ADDRDECODE_ADDR_MSK (MSK(3) << GT_SDRAM_ADDRDECODE_ADDR_SHF)
#define GT_SDRAM_ADDRDECODE_ADDR_0 0
#define GT_SDRAM_ADDRDECODE_ADDR_1 1
#define GT_SDRAM_ADDRDECODE_ADDR_2 2
#define GT_SDRAM_ADDRDECODE_ADDR_3 3
#define GT_SDRAM_ADDRDECODE_ADDR_4 4
#define GT_SDRAM_ADDRDECODE_ADDR_5 5
#define GT_SDRAM_ADDRDECODE_ADDR_6 6
#define GT_SDRAM_ADDRDECODE_ADDR_7 7
#define GT_SDRAM_B0_CASLAT_SHF 0
#define GT_SDRAM_B0_CASLAT_MSK (MSK(2) << GT_SDRAM_B0__SHF)
#define GT_SDRAM_B0_CASLAT_2 1
#define GT_SDRAM_B0_CASLAT_3 2
#define GT_SDRAM_B0_FTDIS_SHF 2
#define GT_SDRAM_B0_FTDIS_MSK (MSK(1) << GT_SDRAM_B0_FTDIS_SHF)
#define GT_SDRAM_B0_FTDIS_BIT GT_SDRAM_B0_FTDIS_MSK
#define GT_SDRAM_B0_SRASPRCHG_SHF 3
#define GT_SDRAM_B0_SRASPRCHG_MSK (MSK(1) << GT_SDRAM_B0_SRASPRCHG_SHF)
#define GT_SDRAM_B0_SRASPRCHG_BIT GT_SDRAM_B0_SRASPRCHG_MSK
#define GT_SDRAM_B0_SRASPRCHG_2 0
#define GT_SDRAM_B0_SRASPRCHG_3 1
#define GT_SDRAM_B0_B0COMPAB_SHF 4
#define GT_SDRAM_B0_B0COMPAB_MSK (MSK(1) << GT_SDRAM_B0_B0COMPAB_SHF)
#define GT_SDRAM_B0_B0COMPAB_BIT GT_SDRAM_B0_B0COMPAB_MSK
#define GT_SDRAM_B0_64BITINT_SHF 5
#define GT_SDRAM_B0_64BITINT_MSK (MSK(1) << GT_SDRAM_B0_64BITINT_SHF)
#define GT_SDRAM_B0_64BITINT_BIT GT_SDRAM_B0_64BITINT_MSK
#define GT_SDRAM_B0_64BITINT_2 0
#define GT_SDRAM_B0_64BITINT_4 1
#define GT_SDRAM_B0_BW_SHF 6
#define GT_SDRAM_B0_BW_MSK (MSK(1) << GT_SDRAM_B0_BW_SHF)
#define GT_SDRAM_B0_BW_BIT GT_SDRAM_B0_BW_MSK
#define GT_SDRAM_B0_BW_32 0
#define GT_SDRAM_B0_BW_64 1
#define GT_SDRAM_B0_BLODD_SHF 7
#define GT_SDRAM_B0_BLODD_MSK (MSK(1) << GT_SDRAM_B0_BLODD_SHF)
#define GT_SDRAM_B0_BLODD_BIT GT_SDRAM_B0_BLODD_MSK
#define GT_SDRAM_B0_PAR_SHF 8
#define GT_SDRAM_B0_PAR_MSK (MSK(1) << GT_SDRAM_B0_PAR_SHF)
#define GT_SDRAM_B0_PAR_BIT GT_SDRAM_B0_PAR_MSK
#define GT_SDRAM_B0_BYPASS_SHF 9
#define GT_SDRAM_B0_BYPASS_MSK (MSK(1) << GT_SDRAM_B0_BYPASS_SHF)
#define GT_SDRAM_B0_BYPASS_BIT GT_SDRAM_B0_BYPASS_MSK
#define GT_SDRAM_B0_SRAS2SCAS_SHF 10
#define GT_SDRAM_B0_SRAS2SCAS_MSK (MSK(1) << GT_SDRAM_B0_SRAS2SCAS_SHF)
#define GT_SDRAM_B0_SRAS2SCAS_BIT GT_SDRAM_B0_SRAS2SCAS_MSK
#define GT_SDRAM_B0_SRAS2SCAS_2 0
#define GT_SDRAM_B0_SRAS2SCAS_3 1
#define GT_SDRAM_B0_SIZE_SHF 11
#define GT_SDRAM_B0_SIZE_MSK (MSK(1) << GT_SDRAM_B0_SIZE_SHF)
#define GT_SDRAM_B0_SIZE_BIT GT_SDRAM_B0_SIZE_MSK
#define GT_SDRAM_B0_SIZE_16M 0
#define GT_SDRAM_B0_SIZE_64M 1
#define GT_SDRAM_B0_EXTPAR_SHF 12
#define GT_SDRAM_B0_EXTPAR_MSK (MSK(1) << GT_SDRAM_B0_EXTPAR_SHF)
#define GT_SDRAM_B0_EXTPAR_BIT GT_SDRAM_B0_EXTPAR_MSK
#define GT_SDRAM_B0_BLEN_SHF 13
#define GT_SDRAM_B0_BLEN_MSK (MSK(1) << GT_SDRAM_B0_BLEN_SHF)
#define GT_SDRAM_B0_BLEN_BIT GT_SDRAM_B0_BLEN_MSK
#define GT_SDRAM_B0_BLEN_8 0
#define GT_SDRAM_B0_BLEN_4 1
#define GT_SDRAM_CFG_REFINT_SHF 0
#define GT_SDRAM_CFG_REFINT_MSK (MSK(14) << GT_SDRAM_CFG_REFINT_SHF)
#define GT_SDRAM_CFG_NINTERLEAVE_SHF 14
#define GT_SDRAM_CFG_NINTERLEAVE_MSK (MSK(1) << GT_SDRAM_CFG_NINTERLEAVE_SHF)
#define GT_SDRAM_CFG_NINTERLEAVE_BIT GT_SDRAM_CFG_NINTERLEAVE_MSK
#define GT_SDRAM_CFG_RMW_SHF 15
#define GT_SDRAM_CFG_RMW_MSK (MSK(1) << GT_SDRAM_CFG_RMW_SHF)
#define GT_SDRAM_CFG_RMW_BIT GT_SDRAM_CFG_RMW_MSK
#define GT_SDRAM_CFG_NONSTAGREF_SHF 16
#define GT_SDRAM_CFG_NONSTAGREF_MSK (MSK(1) << GT_SDRAM_CFG_NONSTAGREF_SHF)
#define GT_SDRAM_CFG_NONSTAGREF_BIT GT_SDRAM_CFG_NONSTAGREF_MSK
#define GT_SDRAM_CFG_DUPCNTL_SHF 19
#define GT_SDRAM_CFG_DUPCNTL_MSK (MSK(1) << GT_SDRAM_CFG_DUPCNTL_SHF)
#define GT_SDRAM_CFG_DUPCNTL_BIT GT_SDRAM_CFG_DUPCNTL_MSK
#define GT_SDRAM_CFG_DUPBA_SHF 20
#define GT_SDRAM_CFG_DUPBA_MSK (MSK(1) << GT_SDRAM_CFG_DUPBA_SHF)
#define GT_SDRAM_CFG_DUPBA_BIT GT_SDRAM_CFG_DUPBA_MSK
#define GT_SDRAM_CFG_DUPEOT0_SHF 21
#define GT_SDRAM_CFG_DUPEOT0_MSK (MSK(1) << GT_SDRAM_CFG_DUPEOT0_SHF)
#define GT_SDRAM_CFG_DUPEOT0_BIT GT_SDRAM_CFG_DUPEOT0_MSK
#define GT_SDRAM_CFG_DUPEOT1_SHF 22
#define GT_SDRAM_CFG_DUPEOT1_MSK (MSK(1) << GT_SDRAM_CFG_DUPEOT1_SHF)
#define GT_SDRAM_CFG_DUPEOT1_BIT GT_SDRAM_CFG_DUPEOT1_MSK
#define GT_SDRAM_OPMODE_OP_SHF 0
#define GT_SDRAM_OPMODE_OP_MSK (MSK(3) << GT_SDRAM_OPMODE_OP_SHF)
#define GT_SDRAM_OPMODE_OP_NORMAL 0
#define GT_SDRAM_OPMODE_OP_NOP 1
#define GT_SDRAM_OPMODE_OP_PRCHG 2
#define GT_SDRAM_OPMODE_OP_MODE 3
#define GT_SDRAM_OPMODE_OP_CBR 4
#define GT_TC_CONTROL_ENTC0_SHF 0
#define GT_TC_CONTROL_ENTC0_MSK (MSK(1) << GT_TC_CONTROL_ENTC0_SHF)
#define GT_TC_CONTROL_ENTC0_BIT GT_TC_CONTROL_ENTC0_MSK
#define GT_TC_CONTROL_SELTC0_SHF 1
#define GT_TC_CONTROL_SELTC0_MSK (MSK(1) << GT_TC_CONTROL_SELTC0_SHF)
#define GT_TC_CONTROL_SELTC0_BIT GT_TC_CONTROL_SELTC0_MSK
#define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0
#define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF)
#define GT_PCI0_BARE_SWSCS3BOOTDIS_BIT GT_PCI0_BARE_SWSCS3BOOTDIS_MSK
#define GT_PCI0_BARE_SWSCS32DIS_SHF 1
#define GT_PCI0_BARE_SWSCS32DIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS32DIS_SHF)
#define GT_PCI0_BARE_SWSCS32DIS_BIT GT_PCI0_BARE_SWSCS32DIS_MSK
#define GT_PCI0_BARE_SWSCS10DIS_SHF 2
#define GT_PCI0_BARE_SWSCS10DIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS10DIS_SHF)
#define GT_PCI0_BARE_SWSCS10DIS_BIT GT_PCI0_BARE_SWSCS10DIS_MSK
#define GT_PCI0_BARE_INTIODIS_SHF 3
#define GT_PCI0_BARE_INTIODIS_MSK (MSK(1) << GT_PCI0_BARE_INTIODIS_SHF)
#define GT_PCI0_BARE_INTIODIS_BIT GT_PCI0_BARE_INTIODIS_MSK
#define GT_PCI0_BARE_INTMEMDIS_SHF 4
#define GT_PCI0_BARE_INTMEMDIS_MSK (MSK(1) << GT_PCI0_BARE_INTMEMDIS_SHF)
#define GT_PCI0_BARE_INTMEMDIS_BIT GT_PCI0_BARE_INTMEMDIS_MSK
#define GT_PCI0_BARE_CS3BOOTDIS_SHF 5
#define GT_PCI0_BARE_CS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_CS3BOOTDIS_SHF)
#define GT_PCI0_BARE_CS3BOOTDIS_BIT GT_PCI0_BARE_CS3BOOTDIS_MSK
#define GT_PCI0_BARE_CS20DIS_SHF 6
#define GT_PCI0_BARE_CS20DIS_MSK (MSK(1) << GT_PCI0_BARE_CS20DIS_SHF)
#define GT_PCI0_BARE_CS20DIS_BIT GT_PCI0_BARE_CS20DIS_MSK
#define GT_PCI0_BARE_SCS32DIS_SHF 7
#define GT_PCI0_BARE_SCS32DIS_MSK (MSK(1) << GT_PCI0_BARE_SCS32DIS_SHF)
#define GT_PCI0_BARE_SCS32DIS_BIT GT_PCI0_BARE_SCS32DIS_MSK
#define GT_PCI0_BARE_SCS10DIS_SHF 8
#define GT_PCI0_BARE_SCS10DIS_MSK (MSK(1) << GT_PCI0_BARE_SCS10DIS_SHF)
#define GT_PCI0_BARE_SCS10DIS_BIT GT_PCI0_BARE_SCS10DIS_MSK
#define GT_INTRCAUSE_MASABORT0_SHF 18
#define GT_INTRCAUSE_MASABORT0_MSK (MSK(1) << GT_INTRCAUSE_MASABORT0_SHF)
#define GT_INTRCAUSE_MASABORT0_BIT GT_INTRCAUSE_MASABORT0_MSK
#define GT_INTRCAUSE_TARABORT0_SHF 19
#define GT_INTRCAUSE_TARABORT0_MSK (MSK(1) << GT_INTRCAUSE_TARABORT0_SHF)
#define GT_INTRCAUSE_TARABORT0_BIT GT_INTRCAUSE_TARABORT0_MSK
#define GT_PCI0_CFGADDR_REGNUM_SHF 2
#define GT_PCI0_CFGADDR_REGNUM_MSK (MSK(6) << GT_PCI0_CFGADDR_REGNUM_SHF)
#define GT_PCI0_CFGADDR_FUNCTNUM_SHF 8
#define GT_PCI0_CFGADDR_FUNCTNUM_MSK (MSK(3) << GT_PCI0_CFGADDR_FUNCTNUM_SHF)
#define GT_PCI0_CFGADDR_DEVNUM_SHF 11
#define GT_PCI0_CFGADDR_DEVNUM_MSK (MSK(5) << GT_PCI0_CFGADDR_DEVNUM_SHF)
#define GT_PCI0_CFGADDR_BUSNUM_SHF 16
#define GT_PCI0_CFGADDR_BUSNUM_MSK (MSK(8) << GT_PCI0_CFGADDR_BUSNUM_SHF)
#define GT_PCI0_CFGADDR_CONFIGEN_SHF 31
#define GT_PCI0_CFGADDR_CONFIGEN_MSK (MSK(1) << GT_PCI0_CFGADDR_CONFIGEN_SHF)
#define GT_PCI0_CFGADDR_CONFIGEN_BIT GT_PCI0_CFGADDR_CONFIGEN_MSK
#define GT_PCI0_CMD_MBYTESWAP_SHF 0
#define GT_PCI0_CMD_MBYTESWAP_MSK (MSK(1) << GT_PCI0_CMD_MBYTESWAP_SHF)
#define GT_PCI0_CMD_MBYTESWAP_BIT GT_PCI0_CMD_MBYTESWAP_MSK
#define GT_PCI0_CMD_MWORDSWAP_SHF 10
#define GT_PCI0_CMD_MWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_MWORDSWAP_SHF)
#define GT_PCI0_CMD_MWORDSWAP_BIT GT_PCI0_CMD_MWORDSWAP_MSK
#define GT_PCI0_CMD_SBYTESWAP_SHF 16
#define GT_PCI0_CMD_SBYTESWAP_MSK (MSK(1) << GT_PCI0_CMD_SBYTESWAP_SHF)
#define GT_PCI0_CMD_SBYTESWAP_BIT GT_PCI0_CMD_SBYTESWAP_MSK
#define GT_PCI0_CMD_SWORDSWAP_SHF 11
#define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF)
#define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK
#define GT_INTR_T0EXP_SHF 8
#define GT_INTR_T0EXP_MSK (MSK(1) << GT_INTR_T0EXP_SHF)
#define GT_INTR_T0EXP_BIT GT_INTR_T0EXP_MSK
#define GT_INTR_RETRYCTR0_SHF 20
#define GT_INTR_RETRYCTR0_MSK (MSK(1) << GT_INTR_RETRYCTR0_SHF)
#define GT_INTR_RETRYCTR0_BIT GT_INTR_RETRYCTR0_MSK
/*
* Misc
*/
#define GT_DEF_PCI0_IO_BASE 0x10000000UL
#define GT_DEF_PCI0_IO_SIZE 0x02000000UL
#define GT_DEF_PCI0_MEM0_BASE 0x12000000UL
#define GT_DEF_PCI0_MEM0_SIZE 0x02000000UL
#define GT_DEF_BASE 0x14000000UL
#define GT_MAX_BANKSIZE (256 * 1024 * 1024) /* Max 256MB bank */
#define GT_LATTIM_MIN 6 /* Minimum lat */
/*
* The gt64120_dep.h file must define the following macros
*
* GT_READ(ofs, data_pointer)
* GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
*
* TIMER - gt64120 timer irq, temporary solution until
* full gt64120 cascade interrupt support is in place
*/
#include <mach-gt64120.h>
/*
* Because of an error/peculiarity in the Galileo chip, we need to swap the
* bytes when running bigendian. We also provide non-swapping versions.
*/
#define __GT_READ(ofs) \
(*(volatile u32 *)(GT64120_BASE+(ofs)))
#define __GT_WRITE(ofs, data) \
do { *(volatile u32 *)(GT64120_BASE+(ofs)) = (data); } while (0)
#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs))
#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data))
extern void gt641xx_set_base_clock(unsigned int clock);
extern int gt641xx_timer0_state(void);
#endif /* _ASM_GT64120_H */

View file

@ -0,0 +1,18 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 98, 99, 2000, 01, 05 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#ifndef _ASM_HARDIRQ_H
#define _ASM_HARDIRQ_H
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
#include <asm-generic/hardirq.h>
#endif /* _ASM_HARDIRQ_H */

View file

@ -0,0 +1,364 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H
#include <linux/stringify.h>
#define ___ssnop \
sll $0, $0, 1
#define ___ehb \
sll $0, $0, 3
/*
* TLB hazards
*/
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
/*
* MIPSR2 defines ehb for hazard avoidance
*/
#define __mtc0_tlbw_hazard \
___ehb
#define __tlbw_use_hazard \
___ehb
#define __tlb_probe_hazard \
___ehb
#define __irq_enable_hazard \
___ehb
#define __irq_disable_hazard \
___ehb
#define __back_to_back_c0_hazard \
___ehb
/*
* gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
* to work right even for 32-bit code ...
*/
#define instruction_hazard() \
do { \
unsigned long tmp; \
\
__asm__ __volatile__( \
" .set mips64r2 \n" \
" dla %0, 1f \n" \
" jr.hb %0 \n" \
" .set mips0 \n" \
"1: \n" \
: "=r" (tmp)); \
} while (0)
#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
defined(CONFIG_CPU_BMIPS)
/*
* These are slightly complicated by the fact that we guarantee R1 kernels to
* run fine on R2 processors.
*/
#define __mtc0_tlbw_hazard \
___ssnop; \
___ssnop; \
___ehb
#define __tlbw_use_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __tlb_probe_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __irq_enable_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __irq_disable_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
#define __back_to_back_c0_hazard \
___ssnop; \
___ssnop; \
___ssnop; \
___ehb
/*
* gcc has a tradition of misscompiling the previous construct using the
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
* to work right even for 32-bit code ...
*/
#define __instruction_hazard() \
do { \
unsigned long tmp; \
\
__asm__ __volatile__( \
" .set mips64r2 \n" \
" dla %0, 1f \n" \
" jr.hb %0 \n" \
" .set mips0 \n" \
"1: \n" \
: "=r" (tmp)); \
} while (0)
#define instruction_hazard() \
do { \
if (cpu_has_mips_r2) \
__instruction_hazard(); \
} while (0)
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/
#define __mtc0_tlbw_hazard
#define __tlbw_use_hazard
#define __tlb_probe_hazard
#define __irq_enable_hazard
#define __irq_disable_hazard
#define __back_to_back_c0_hazard
#define instruction_hazard() do { } while (0)
#elif defined(CONFIG_CPU_SB1)
/*
* Mostly like R4000 for historic reasons
*/
#define __mtc0_tlbw_hazard
#define __tlbw_use_hazard
#define __tlb_probe_hazard
#define __irq_enable_hazard
#define __irq_disable_hazard \
___ssnop; \
___ssnop; \
___ssnop
#define __back_to_back_c0_hazard
#define instruction_hazard() do { } while (0)
#else
/*
* Finally the catchall case for all other processors including R4000, R4400,
* R4600, R4700, R5000, RM7000, NEC VR41xx etc.
*
* The taken branch will result in a two cycle penalty for the two killed
* instructions on R4000 / R4400. Other processors only have a single cycle
* hazard so this is nice trick to have an optimal code for a range of
* processors.
*/
#define __mtc0_tlbw_hazard \
nop; \
nop
#define __tlbw_use_hazard \
nop; \
nop; \
nop
#define __tlb_probe_hazard \
nop; \
nop; \
nop
#define __irq_enable_hazard \
___ssnop; \
___ssnop; \
___ssnop
#define __irq_disable_hazard \
nop; \
nop; \
nop
#define __back_to_back_c0_hazard \
___ssnop; \
___ssnop; \
___ssnop
#define instruction_hazard() do { } while (0)
#endif
/* FPU hazards */
#if defined(CONFIG_CPU_SB1)
#define __enable_fpu_hazard \
.set push; \
.set mips64; \
.set noreorder; \
___ssnop; \
bnezl $0, .+4; \
___ssnop; \
.set pop
#define __disable_fpu_hazard
#elif defined(CONFIG_CPU_MIPSR2)
#define __enable_fpu_hazard \
___ehb
#define __disable_fpu_hazard \
___ehb
#else
#define __enable_fpu_hazard \
nop; \
nop; \
nop; \
nop
#define __disable_fpu_hazard \
___ehb
#endif
#ifdef __ASSEMBLY__
#define _ssnop ___ssnop
#define _ehb ___ehb
#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
#define tlbw_use_hazard __tlbw_use_hazard
#define tlb_probe_hazard __tlb_probe_hazard
#define irq_enable_hazard __irq_enable_hazard
#define irq_disable_hazard __irq_disable_hazard
#define back_to_back_c0_hazard __back_to_back_c0_hazard
#define enable_fpu_hazard __enable_fpu_hazard
#define disable_fpu_hazard __disable_fpu_hazard
#else
#define _ssnop() \
do { \
__asm__ __volatile__( \
__stringify(___ssnop) \
); \
} while (0)
#define _ehb() \
do { \
__asm__ __volatile__( \
__stringify(___ehb) \
); \
} while (0)
#define mtc0_tlbw_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__mtc0_tlbw_hazard) \
); \
} while (0)
#define tlbw_use_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__tlbw_use_hazard) \
); \
} while (0)
#define tlb_probe_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__tlb_probe_hazard) \
); \
} while (0)
#define irq_enable_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__irq_enable_hazard) \
); \
} while (0)
#define irq_disable_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__irq_disable_hazard) \
); \
} while (0)
#define back_to_back_c0_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__back_to_back_c0_hazard) \
); \
} while (0)
#define enable_fpu_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__enable_fpu_hazard) \
); \
} while (0)
#define disable_fpu_hazard() \
do { \
__asm__ __volatile__( \
__stringify(__disable_fpu_hazard) \
); \
} while (0)
/*
* MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
*/
extern void mips_ihb(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_HAZARDS_H */

View file

@ -0,0 +1,62 @@
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/kmap_types.h>
/* undef for production */
#define HIGHMEM_DEBUG 1
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *pkmap_page_table;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
extern void kmap_init(void);
#define kmap_prot PAGE_KERNEL
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */

View file

@ -0,0 +1,130 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
*/
#ifndef __ASM_HUGETLB_H
#define __ASM_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len)
{
return 0;
}
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr,
unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t clear;
pte_t pte = *ptep;
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
return pte;
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
}
static inline int huge_pte_none(pte_t pte)
{
unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
return !val || (val == (unsigned long)invalid_pte_table);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte,
int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
/*
* There could be some standard sized pages in there,
* get them all.
*/
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* __ASM_HUGETLB_H */

View file

@ -0,0 +1,20 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000, 2001, 2002 by Ralf Baechle
*/
#ifndef __ASM_HW_IRQ_H
#define __ASM_HW_IRQ_H
#include <linux/atomic.h>
extern atomic_t irq_err_count;
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...
*/
#endif /* __ASM_HW_IRQ_H */

View file

@ -0,0 +1,86 @@
/*
* include/asm-mips/i8259.h
*
* i8259A interrupt definitions.
*
* Copyright (C) 2003 Maciej W. Rozycki
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_I8259_H
#define _ASM_I8259_H
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <irq.h>
/* i8259A PIC registers */
#define PIC_MASTER_CMD 0x20
#define PIC_MASTER_IMR 0x21
#define PIC_MASTER_ISR PIC_MASTER_CMD
#define PIC_MASTER_POLL PIC_MASTER_ISR
#define PIC_MASTER_OCW3 PIC_MASTER_ISR
#define PIC_SLAVE_CMD 0xa0
#define PIC_SLAVE_IMR 0xa1
/* i8259A PIC related value */
#define PIC_CASCADE_IR 2
#define MASTER_ICW4_DEFAULT 0x01
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
extern raw_spinlock_t i8259A_lock;
extern int i8259A_irq_pending(unsigned int irq);
extern void make_8259A_irq(unsigned int irq);
extern void init_i8259_irqs(void);
/*
* Do the traditional i8259 interrupt polling thing. This is for the few
* cases where no better interrupt acknowledge method is available and we
* absolutely must touch the i8259.
*/
static inline int i8259_irq(void)
{
int irq;
raw_spin_lock(&i8259A_lock);
/* Perform an interrupt acknowledge cycle on controller 1. */
outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */
irq = inb(PIC_MASTER_CMD) & 7;
if (irq == PIC_CASCADE_IR) {
/*
* Interrupt is cascaded so perform interrupt
* acknowledge on controller 2.
*/
outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */
irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
}
if (unlikely(irq == 7)) {
/*
* This may be a spurious interrupt.
*
* Read the interrupt status register (ISR). If the most
* significant bit is not set then there is no valid
* interrupt.
*/
outb(0x0B, PIC_MASTER_ISR); /* ISR register */
if(~inb(PIC_MASTER_ISR) & 0x80)
irq = -1;
}
raw_spin_unlock(&i8259A_lock);
return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
}
#endif /* _ASM_I8259_H */

View file

@ -0,0 +1,13 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains the MIPS architecture specific IDE code.
*/
#ifndef __ASM_IDE_H
#define __ASM_IDE_H
#include <ide.h>
#endif /* __ASM_IDE_H */

View file

@ -0,0 +1,30 @@
#ifndef __ASM_IDLE_H
#define __ASM_IDLE_H
#include <linux/cpuidle.h>
#include <linux/linkage.h>
extern void (*cpu_wait)(void);
extern void r4k_wait(void);
extern asmlinkage void __r4k_wait(void);
extern void r4k_wait_irqoff(void);
static inline int using_rollback_handler(void)
{
return cpu_wait == r4k_wait;
}
extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
#define MIPS_CPUIDLE_WAIT_STATE {\
.enter = mips_cpuidle_wait_enter,\
.exit_latency = 1,\
.target_residency = 1,\
.power_usage = UINT_MAX,\
.flags = CPUIDLE_FLAG_TIME_VALID,\
.name = "wait",\
.desc = "MIPS wait",\
}
#endif /* __ASM_IDLE_H */

View file

@ -0,0 +1,88 @@
/*
* Format of an instruction in memory.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 2000 by Ralf Baechle
* Copyright (C) 2006 by Thiemo Seufer
*/
#ifndef _ASM_INST_H
#define _ASM_INST_H
#include <uapi/asm/inst.h>
/* HACHACHAHCAHC ... */
/* In case some other massaging is needed, keep MIPSInst as wrapper */
#define MIPSInst(x) x
#define I_OPCODE_SFT 26
#define MIPSInst_OPCODE(x) (MIPSInst(x) >> I_OPCODE_SFT)
#define I_JTARGET_SFT 0
#define MIPSInst_JTARGET(x) (MIPSInst(x) & 0x03ffffff)
#define I_RS_SFT 21
#define MIPSInst_RS(x) ((MIPSInst(x) & 0x03e00000) >> I_RS_SFT)
#define I_RT_SFT 16
#define MIPSInst_RT(x) ((MIPSInst(x) & 0x001f0000) >> I_RT_SFT)
#define I_IMM_SFT 0
#define MIPSInst_SIMM(x) ((int)((short)(MIPSInst(x) & 0xffff)))
#define MIPSInst_UIMM(x) (MIPSInst(x) & 0xffff)
#define I_CACHEOP_SFT 18
#define MIPSInst_CACHEOP(x) ((MIPSInst(x) & 0x001c0000) >> I_CACHEOP_SFT)
#define I_CACHESEL_SFT 16
#define MIPSInst_CACHESEL(x) ((MIPSInst(x) & 0x00030000) >> I_CACHESEL_SFT)
#define I_RD_SFT 11
#define MIPSInst_RD(x) ((MIPSInst(x) & 0x0000f800) >> I_RD_SFT)
#define I_RE_SFT 6
#define MIPSInst_RE(x) ((MIPSInst(x) & 0x000007c0) >> I_RE_SFT)
#define I_FUNC_SFT 0
#define MIPSInst_FUNC(x) (MIPSInst(x) & 0x0000003f)
#define I_FFMT_SFT 21
#define MIPSInst_FFMT(x) ((MIPSInst(x) & 0x01e00000) >> I_FFMT_SFT)
#define I_FT_SFT 16
#define MIPSInst_FT(x) ((MIPSInst(x) & 0x001f0000) >> I_FT_SFT)
#define I_FS_SFT 11
#define MIPSInst_FS(x) ((MIPSInst(x) & 0x0000f800) >> I_FS_SFT)
#define I_FD_SFT 6
#define MIPSInst_FD(x) ((MIPSInst(x) & 0x000007c0) >> I_FD_SFT)
#define I_FR_SFT 21
#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT)
#define I_FMA_FUNC_SFT 2
#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT)
#define I_FMA_FFMT_SFT 0
#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003)
typedef unsigned int mips_instruction;
/* microMIPS instruction decode structure. Do NOT export!!! */
struct mm_decoded_insn {
mips_instruction insn;
mips_instruction next_insn;
int pc_inc;
int next_pc_inc;
int micro_mips_mode;
};
/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
extern const int reg16to32[];
#endif /* _ASM_INST_H */

633
arch/mips/include/asm/io.h Normal file
View file

@ -0,0 +1,633 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf GmbH
* Copyright (C) 1994 - 2000, 06 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm-generic/iomap.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/processor.h>
#include <asm/string.h>
#include <ioremap.h>
#include <mangle-port.h>
/*
* Slowdown I/O port space accesses for antique hardware.
*/
#undef CONF_SLOWDOWN_IO
/*
* Raw operations are never swapped in software. OTOH values that raw
* operations are working on may or may not have been swapped by the bus
* hardware. An example use would be for flash memory that's used for
* execute in place.
*/
# define __raw_ioswabb(a, x) (x)
# define __raw_ioswabw(a, x) (x)
# define __raw_ioswabl(a, x) (x)
# define __raw_ioswabq(a, x) (x)
# define ____raw_ioswabq(a, x) (x)
/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
#define IO_SPACE_LIMIT 0xffff
/*
* On MIPS I/O ports are memory mapped, so we access them using normal
* load/store instructions. mips_io_port_base is the virtual address to
* which all ports are being mapped. For sake of efficiency some code
* assumes that this is an address that can be loaded with a single lui
* instruction, so the lower 16 bits must be zero. Should be true on
* on any sane architecture; generic code does not use this assumption.
*/
extern const unsigned long mips_io_port_base;
/*
* Gcc will generate code to load the value of mips_io_port_base after each
* function call which may be fairly wasteful in some cases. So we don't
* play quite by the book. We tell gcc mips_io_port_base is a long variable
* which solves the code generation issue. Now we need to violate the
* aliasing rules a little to make initialization possible and finally we
* will need the barrier() to fight side effects of the aliasing chat.
* This trickery will eventually collapse under gcc's optimizer. Oh well.
*/
static inline void set_io_port_base(unsigned long base)
{
* (unsigned long *) &mips_io_port_base = base;
barrier();
}
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*
*/
#define __SLOW_DOWN_IO \
__asm__ __volatile__( \
"sb\t$0,0x80(%0)" \
: : "r" (mips_io_port_base));
#ifdef CONF_SLOWDOWN_IO
#ifdef REALLY_SLOW_IO
#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
#else
#define SLOW_DOWN_IO __SLOW_DOWN_IO
#endif
#else
#define SLOW_DOWN_IO
#endif
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile const void *address)
{
return __pa(address);
}
/*
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void * phys_to_virt(unsigned long address)
{
return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
}
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
static inline unsigned long isa_virt_to_bus(volatile void * address)
{
return (unsigned long)address - PAGE_OFFSET;
}
static inline void * isa_bus_to_virt(unsigned long address)
{
return (void *)(address + PAGE_OFFSET);
}
#define isa_page_to_bus page_to_phys
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them for x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
#ifndef CONFIG_PCI
struct pci_dev;
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
#endif
static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
unsigned long flags)
{
void __iomem *addr = plat_ioremap(offset, size, flags);
if (addr)
return addr;
#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
if (cpu_has_64bit_addresses) {
u64 base = UNCAC_BASE;
/*
* R10000 supports a 2 bit uncached attribute therefore
* UNCAC_BASE may not equal IO_BASE.
*/
if (flags == _CACHE_UNCACHED)
base = (u64) IO_BASE;
return (void __iomem *) (unsigned long) (base + offset);
} else if (__builtin_constant_p(offset) &&
__builtin_constant_p(size) && __builtin_constant_p(flags)) {
phys_t phys_addr, last_addr;
phys_addr = fixup_bigphys_addr(offset, size);
/* Don't allow wraparound or zero size. */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Map uncached objects in the low 512MB of address
* space using KSEG1.
*/
if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
flags == _CACHE_UNCACHED)
return (void __iomem *)
(unsigned long)CKSEG1ADDR(phys_addr);
}
return __ioremap(offset, size, flags);
#undef __IS_LOW512
}
/*
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*/
#define ioremap(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
/*
* ioremap_nocache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many
* busses. In particular driver authors should read up on PCI writes
*
* It's useful if some control registers are in such an area and
* write combining or read caching is not desirable:
*/
#define ioremap_nocache(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
/*
* ioremap_cachable - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked cachable by
* the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
#define ioremap_cachable(offset, size) \
__ioremap_mode((offset), (size), _page_cachable_default)
/*
* These two are MIPS specific ioremap variant. ioremap_cacheable_cow
* requests a cachable mapping, ioremap_uncached_accelerated requests a
* mapping using the uncached accelerated mode which isn't supported on
* all processors.
*/
#define ioremap_cacheable_cow(offset, size) \
__ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
#define ioremap_uncached_accelerated(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
static inline void iounmap(const volatile void __iomem *addr)
{
if (plat_iounmap(addr))
return;
#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
if (cpu_has_64bit_addresses ||
(__builtin_constant_p(addr) && __IS_KSEG1(addr)))
return;
__iounmap(addr);
#undef __IS_KSEG1
}
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define war_octeon_io_reorder_wmb() wmb()
#else
#define war_octeon_io_reorder_wmb() do { } while (0)
#endif
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
\
static inline void pfx##write##bwlq(type val, \
volatile void __iomem *mem) \
{ \
volatile type *__mem; \
type __val; \
\
war_octeon_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
__val = pfx##ioswab##bwlq(__mem, val); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
*__mem = __val; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
type __tmp; \
\
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set arch=r4000" "\t\t# __writeq""\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \
".set mips0" "\n" \
: "=r" (__tmp) \
: "0" (__val), "m" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else \
BUG(); \
} \
\
static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
{ \
volatile type *__mem; \
type __val; \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
\
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set arch=r4000" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
".set mips0" "\n" \
: "=r" (__val) \
: "m" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else { \
__val = 0; \
BUG(); \
} \
\
return pfx##ioswab##bwlq(__mem, __val); \
}
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
\
static inline void pfx##out##bwlq##p(type val, unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
war_octeon_io_reorder_wmb(); \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
__val = pfx##ioswab##bwlq(__addr, val); \
\
/* Really, we want this to be atomic */ \
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
*__addr = __val; \
slow; \
} \
\
static inline type pfx##in##bwlq##p(unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
__val = *__addr; \
slow; \
\
return pfx##ioswab##bwlq(__addr, __val); \
}
#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
\
__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
#define BUILDIO_MEM(bwlq, type) \
\
__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
__BUILD_MEMORY_PFX(, bwlq, type) \
__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
BUILDIO_MEM(b, u8)
BUILDIO_MEM(w, u16)
BUILDIO_MEM(l, u32)
BUILDIO_MEM(q, u64)
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(, bwlq, type) \
__BUILD_IOPORT_PFX(__mem_, bwlq, type)
BUILDIO_IOPORT(b, u8)
BUILDIO_IOPORT(w, u16)
BUILDIO_IOPORT(l, u32)
#ifdef CONFIG_64BIT
BUILDIO_IOPORT(q, u64)
#endif
#define __BUILDIO(bwlq, type) \
\
__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
__BUILDIO(q, u64)
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#define readq_relaxed readq
#define writeb_relaxed writeb
#define writew_relaxed writew
#define writel_relaxed writel
#define writeq_relaxed writeq
#define readb_be(addr) \
__raw_readb((__force unsigned *)(addr))
#define readw_be(addr) \
be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
#define readl_be(addr) \
be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
#define readq_be(addr) \
be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
#define writeb_be(val, addr) \
__raw_writeb((val), (__force unsigned *)(addr))
#define writew_be(val, addr) \
__raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
#define writel_be(val, addr) \
__raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
#define writeq_be(val, addr) \
__raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
/*
* Some code tests for these symbols
*/
#define readq readq
#define writeq writeq
#define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void writes##bwlq(volatile void __iomem *mem, \
const void *addr, unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__mem_write##bwlq(*__addr, mem); \
__addr++; \
} \
} \
\
static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __mem_read##bwlq(mem); \
__addr++; \
} \
}
#define __BUILD_IOPORT_STRING(bwlq, type) \
\
static inline void outs##bwlq(unsigned long port, const void *addr, \
unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__mem_out##bwlq(*__addr, port); \
__addr++; \
} \
} \
\
static inline void ins##bwlq(unsigned long port, void *addr, \
unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __mem_in##bwlq(port); \
__addr++; \
} \
}
#define BUILDSTRING(bwlq, type) \
\
__BUILD_MEMORY_STRING(bwlq, type) \
__BUILD_IOPORT_STRING(bwlq, type)
BUILDSTRING(b, u8)
BUILDSTRING(w, u16)
BUILDSTRING(l, u32)
#ifdef CONFIG_64BIT
BUILDSTRING(q, u64)
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define mmiowb() wmb()
#else
/* Depends on MIPS II instruction set */
#define mmiowb() asm volatile ("sync" ::: "memory")
#endif
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
memset((void __force *) addr, val, count);
}
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
memcpy(dst, (void __force *) src, count);
}
static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
memcpy((void __force *) dst, src, count);
}
/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_wback(start, size) makes caches and coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
*
* This API used to be exported; it now is for arch code internal use only.
*/
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
#else /* Sane hardware */
#define dma_cache_wback_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_wback(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
* Avoid interrupt mucking, just adjust the address for 4-byte access.
* Assume the addresses are 8-byte aligned.
*/
#ifdef __MIPSEB__
#define __CSR_32_ADJUST 4
#else
#define __CSR_32_ADJUST 0
#endif
#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* _ASM_IO_H */

View file

@ -0,0 +1,158 @@
/*
* Definitions for the SGI CRIME (CPU, Rendering, Interconnect and Memory
* Engine)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Harald Koerfgen
*/
#ifndef __ASM_CRIME_H__
#define __ASM_CRIME_H__
/*
* Address map
*/
#define CRIME_BASE 0x14000000 /* physical */
struct sgi_crime {
volatile unsigned long id;
#define CRIME_ID_MASK 0xff
#define CRIME_ID_IDBITS 0xf0
#define CRIME_ID_IDVALUE 0xa0
#define CRIME_ID_REV 0x0f
#define CRIME_REV_PETTY 0x00
#define CRIME_REV_11 0x11
#define CRIME_REV_13 0x13
#define CRIME_REV_14 0x14
volatile unsigned long control;
#define CRIME_CONTROL_MASK 0x3fff
#define CRIME_CONTROL_TRITON_SYSADC 0x2000
#define CRIME_CONTROL_CRIME_SYSADC 0x1000
#define CRIME_CONTROL_HARD_RESET 0x0800
#define CRIME_CONTROL_SOFT_RESET 0x0400
#define CRIME_CONTROL_DOG_ENA 0x0200
#define CRIME_CONTROL_ENDIANESS 0x0100
#define CRIME_CONTROL_ENDIAN_BIG 0x0100
#define CRIME_CONTROL_ENDIAN_LITTLE 0x0000
#define CRIME_CONTROL_CQUEUE_HWM 0x000f
#define CRIME_CONTROL_CQUEUE_SHFT 0
#define CRIME_CONTROL_WBUF_HWM 0x00f0
#define CRIME_CONTROL_WBUF_SHFT 8
volatile unsigned long istat;
volatile unsigned long imask;
volatile unsigned long soft_int;
volatile unsigned long hard_int;
#define MACE_VID_IN1_INT BIT(0)
#define MACE_VID_IN2_INT BIT(1)
#define MACE_VID_OUT_INT BIT(2)
#define MACE_ETHERNET_INT BIT(3)
#define MACE_SUPERIO_INT BIT(4)
#define MACE_MISC_INT BIT(5)
#define MACE_AUDIO_INT BIT(6)
#define MACE_PCI_BRIDGE_INT BIT(7)
#define MACEPCI_SCSI0_INT BIT(8)
#define MACEPCI_SCSI1_INT BIT(9)
#define MACEPCI_SLOT0_INT BIT(10)
#define MACEPCI_SLOT1_INT BIT(11)
#define MACEPCI_SLOT2_INT BIT(12)
#define MACEPCI_SHARED0_INT BIT(13)
#define MACEPCI_SHARED1_INT BIT(14)
#define MACEPCI_SHARED2_INT BIT(15)
#define CRIME_GBE0_INT BIT(16)
#define CRIME_GBE1_INT BIT(17)
#define CRIME_GBE2_INT BIT(18)
#define CRIME_GBE3_INT BIT(19)
#define CRIME_CPUERR_INT BIT(20)
#define CRIME_MEMERR_INT BIT(21)
#define CRIME_RE_EMPTY_E_INT BIT(22)
#define CRIME_RE_FULL_E_INT BIT(23)
#define CRIME_RE_IDLE_E_INT BIT(24)
#define CRIME_RE_EMPTY_L_INT BIT(25)
#define CRIME_RE_FULL_L_INT BIT(26)
#define CRIME_RE_IDLE_L_INT BIT(27)
#define CRIME_SOFT0_INT BIT(28)
#define CRIME_SOFT1_INT BIT(29)
#define CRIME_SOFT2_INT BIT(30)
#define CRIME_SYSCORERR_INT CRIME_SOFT2_INT
#define CRIME_VICE_INT BIT(31)
/* Masks for deciding who handles the interrupt */
#define CRIME_MACE_INT_MASK 0x8f
#define CRIME_MACEISA_INT_MASK 0x70
#define CRIME_MACEPCI_INT_MASK 0xff00
#define CRIME_CRIME_INT_MASK 0xffff0000
volatile unsigned long watchdog;
#define CRIME_DOG_POWER_ON_RESET 0x00010000
#define CRIME_DOG_WARM_RESET 0x00080000
#define CRIME_DOG_TIMEOUT (CRIME_DOG_POWER_ON_RESET|CRIME_DOG_WARM_RESET)
#define CRIME_DOG_VALUE 0x00007fff
volatile unsigned long timer;
#define CRIME_MASTER_FREQ 66666500 /* Crime upcounter frequency */
#define CRIME_NS_PER_TICK 15 /* for delay_calibrate */
volatile unsigned long cpu_error_addr;
#define CRIME_CPU_ERROR_ADDR_MASK 0x3ffffffff
volatile unsigned long cpu_error_stat;
#define CRIME_CPU_ERROR_MASK 0x7 /* cpu error stat is 3 bits */
#define CRIME_CPU_ERROR_CPU_ILL_ADDR 0x4
#define CRIME_CPU_ERROR_VICE_WRT_PRTY 0x2
#define CRIME_CPU_ERROR_CPU_WRT_PRTY 0x1
unsigned long _pad0[54];
volatile unsigned long mc_ctrl;
volatile unsigned long bank_ctrl[8];
#define CRIME_MEM_BANK_CONTROL_MASK 0x11f /* 9 bits 7:5 reserved */
#define CRIME_MEM_BANK_CONTROL_ADDR 0x01f
#define CRIME_MEM_BANK_CONTROL_SDRAM_SIZE 0x100
#define CRIME_MAXBANKS 8
volatile unsigned long mem_ref_counter;
#define CRIME_MEM_REF_COUNTER_MASK 0x3ff /* 10bit */
volatile unsigned long mem_error_stat;
#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
#define CRIME_MEM_ERROR_MACE_ID 0x0000007f
#define CRIME_MEM_ERROR_MACE_ACCESS 0x00000080
#define CRIME_MEM_ERROR_RE_ID 0x00007f00
#define CRIME_MEM_ERROR_RE_ACCESS 0x00008000
#define CRIME_MEM_ERROR_GBE_ACCESS 0x00010000
#define CRIME_MEM_ERROR_VICE_ACCESS 0x00020000
#define CRIME_MEM_ERROR_CPU_ACCESS 0x00040000
#define CRIME_MEM_ERROR_RESERVED 0x00080000
#define CRIME_MEM_ERROR_SOFT_ERR 0x00100000
#define CRIME_MEM_ERROR_HARD_ERR 0x00200000
#define CRIME_MEM_ERROR_MULTIPLE 0x00400000
#define CRIME_MEM_ERROR_ECC 0x01800000
#define CRIME_MEM_ERROR_MEM_ECC_RD 0x00800000
#define CRIME_MEM_ERROR_MEM_ECC_RMW 0x01000000
#define CRIME_MEM_ERROR_INV 0x0e000000
#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
#define CRIME_MEM_ERROR_INV_MEM_ADDR_RMW 0x08000000
volatile unsigned long mem_error_addr;
#define CRIME_MEM_ERROR_ADDR_MASK 0x3fffffff
volatile unsigned long mem_ecc_syn;
#define CRIME_MEM_ERROR_ECC_SYN_MASK 0xffffffff
volatile unsigned long mem_ecc_chk;
#define CRIME_MEM_ERROR_ECC_CHK_MASK 0xffffffff
volatile unsigned long mem_ecc_repl;
#define CRIME_MEM_ERROR_ECC_REPL_MASK 0xffffffff
};
extern struct sgi_crime __iomem *crime;
#define CRIME_HI_MEM_BASE 0x40000000 /* this is where whole 1G of RAM is mapped */
#endif /* __ASM_CRIME_H__ */

View file

@ -0,0 +1,114 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Harald Koerfgen
*/
#ifndef __ASM_IP32_INTS_H
#define __ASM_IP32_INTS_H
#include <asm/irq.h>
/*
* This list reflects the assignment of interrupt numbers to
* interrupting events. Order is fairly irrelevant to handling
* priority. This differs from irix.
*/
enum ip32_irq_no {
/*
* CPU interrupts are 0 ... 7
*/
CRIME_IRQ_BASE = MIPS_CPU_IRQ_BASE + 8,
/*
* MACE
*/
MACE_VID_IN1_IRQ = CRIME_IRQ_BASE,
MACE_VID_IN2_IRQ,
MACE_VID_OUT_IRQ,
MACE_ETHERNET_IRQ,
/* SUPERIO, MISC, and AUDIO are MACEISA */
__MACE_SUPERIO,
__MACE_MISC,
__MACE_AUDIO,
MACE_PCI_BRIDGE_IRQ,
/*
* MACEPCI
*/
MACEPCI_SCSI0_IRQ,
MACEPCI_SCSI1_IRQ,
MACEPCI_SLOT0_IRQ,
MACEPCI_SLOT1_IRQ,
MACEPCI_SLOT2_IRQ,
MACEPCI_SHARED0_IRQ,
MACEPCI_SHARED1_IRQ,
MACEPCI_SHARED2_IRQ,
/*
* CRIME
*/
CRIME_GBE0_IRQ,
CRIME_GBE1_IRQ,
CRIME_GBE2_IRQ,
CRIME_GBE3_IRQ,
CRIME_CPUERR_IRQ,
CRIME_MEMERR_IRQ,
CRIME_RE_EMPTY_E_IRQ,
CRIME_RE_FULL_E_IRQ,
CRIME_RE_IDLE_E_IRQ,
CRIME_RE_EMPTY_L_IRQ,
CRIME_RE_FULL_L_IRQ,
CRIME_RE_IDLE_L_IRQ,
CRIME_SOFT0_IRQ,
CRIME_SOFT1_IRQ,
CRIME_SOFT2_IRQ,
CRIME_SYSCORERR_IRQ = CRIME_SOFT2_IRQ,
CRIME_VICE_IRQ,
/*
* MACEISA
*/
MACEISA_AUDIO_SW_IRQ,
MACEISA_AUDIO_SC_IRQ,
MACEISA_AUDIO1_DMAT_IRQ,
MACEISA_AUDIO1_OF_IRQ,
MACEISA_AUDIO2_DMAT_IRQ,
MACEISA_AUDIO2_MERR_IRQ,
MACEISA_AUDIO3_DMAT_IRQ,
MACEISA_AUDIO3_MERR_IRQ,
MACEISA_RTC_IRQ,
MACEISA_KEYB_IRQ,
/* MACEISA_KEYB_POLL is not an IRQ */
__MACEISA_KEYB_POLL,
MACEISA_MOUSE_IRQ,
/* MACEISA_MOUSE_POLL is not an IRQ */
__MACEISA_MOUSE_POLL,
MACEISA_TIMER0_IRQ,
MACEISA_TIMER1_IRQ,
MACEISA_TIMER2_IRQ,
MACEISA_PARALLEL_IRQ,
MACEISA_PAR_CTXA_IRQ,
MACEISA_PAR_CTXB_IRQ,
MACEISA_PAR_MERR_IRQ,
MACEISA_SERIAL1_IRQ,
MACEISA_SERIAL1_TDMAT_IRQ,
MACEISA_SERIAL1_TDMAPR_IRQ,
MACEISA_SERIAL1_TDMAME_IRQ,
MACEISA_SERIAL1_RDMAT_IRQ,
MACEISA_SERIAL1_RDMAOR_IRQ,
MACEISA_SERIAL2_IRQ,
MACEISA_SERIAL2_TDMAT_IRQ,
MACEISA_SERIAL2_TDMAPR_IRQ,
MACEISA_SERIAL2_TDMAME_IRQ,
MACEISA_SERIAL2_RDMAT_IRQ,
MACEISA_SERIAL2_RDMAOR_IRQ,
IP32_IRQ_MAX = MACEISA_SERIAL2_RDMAOR_IRQ
};
#endif /* __ASM_IP32_INTS_H */

View file

@ -0,0 +1,365 @@
/*
* Definitions for the SGI MACE (Multimedia, Audio and Communications Engine)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Harald Koerfgen
* Copyright (C) 2004 Ladislav Michl
*/
#ifndef __ASM_MACE_H__
#define __ASM_MACE_H__
/*
* Address map
*/
#define MACE_BASE 0x1f000000 /* physical */
/*
* PCI interface
*/
struct mace_pci {
volatile unsigned int error_addr;
volatile unsigned int error;
#define MACEPCI_ERROR_MASTER_ABORT BIT(31)
#define MACEPCI_ERROR_TARGET_ABORT BIT(30)
#define MACEPCI_ERROR_DATA_PARITY_ERR BIT(29)
#define MACEPCI_ERROR_RETRY_ERR BIT(28)
#define MACEPCI_ERROR_ILLEGAL_CMD BIT(27)
#define MACEPCI_ERROR_SYSTEM_ERR BIT(26)
#define MACEPCI_ERROR_INTERRUPT_TEST BIT(25)
#define MACEPCI_ERROR_PARITY_ERR BIT(24)
#define MACEPCI_ERROR_OVERRUN BIT(23)
#define MACEPCI_ERROR_RSVD BIT(22)
#define MACEPCI_ERROR_MEMORY_ADDR BIT(21)
#define MACEPCI_ERROR_CONFIG_ADDR BIT(20)
#define MACEPCI_ERROR_MASTER_ABORT_ADDR_VALID BIT(19)
#define MACEPCI_ERROR_TARGET_ABORT_ADDR_VALID BIT(18)
#define MACEPCI_ERROR_DATA_PARITY_ADDR_VALID BIT(17)
#define MACEPCI_ERROR_RETRY_ADDR_VALID BIT(16)
#define MACEPCI_ERROR_SIG_TABORT BIT(4)
#define MACEPCI_ERROR_DEVSEL_MASK 0xc0
#define MACEPCI_ERROR_DEVSEL_FAST 0
#define MACEPCI_ERROR_DEVSEL_MED 0x40
#define MACEPCI_ERROR_DEVSEL_SLOW 0x80
#define MACEPCI_ERROR_FBB BIT(1)
#define MACEPCI_ERROR_66MHZ BIT(0)
volatile unsigned int control;
#define MACEPCI_CONTROL_INT(x) BIT(x)
#define MACEPCI_CONTROL_INT_MASK 0xff
#define MACEPCI_CONTROL_SERR_ENA BIT(8)
#define MACEPCI_CONTROL_ARB_N6 BIT(9)
#define MACEPCI_CONTROL_PARITY_ERR BIT(10)
#define MACEPCI_CONTROL_MRMRA_ENA BIT(11)
#define MACEPCI_CONTROL_ARB_N3 BIT(12)
#define MACEPCI_CONTROL_ARB_N4 BIT(13)
#define MACEPCI_CONTROL_ARB_N5 BIT(14)
#define MACEPCI_CONTROL_PARK_LIU BIT(15)
#define MACEPCI_CONTROL_INV_INT(x) BIT(16+x)
#define MACEPCI_CONTROL_INV_INT_MASK 0x00ff0000
#define MACEPCI_CONTROL_OVERRUN_INT BIT(24)
#define MACEPCI_CONTROL_PARITY_INT BIT(25)
#define MACEPCI_CONTROL_SERR_INT BIT(26)
#define MACEPCI_CONTROL_IT_INT BIT(27)
#define MACEPCI_CONTROL_RE_INT BIT(28)
#define MACEPCI_CONTROL_DPED_INT BIT(29)
#define MACEPCI_CONTROL_TAR_INT BIT(30)
#define MACEPCI_CONTROL_MAR_INT BIT(31)
volatile unsigned int rev;
unsigned int _pad[0xcf8/4 - 4];
volatile unsigned int config_addr;
union {
volatile unsigned char b[4];
volatile unsigned short w[2];
volatile unsigned int l;
} config_data;
};
#define MACEPCI_LOW_MEMORY 0x1a000000
#define MACEPCI_LOW_IO 0x18000000
#define MACEPCI_SWAPPED_VIEW 0
#define MACEPCI_NATIVE_VIEW 0x40000000
#define MACEPCI_IO 0x80000000
#define MACEPCI_HI_MEMORY 0x280000000
#define MACEPCI_HI_IO 0x100000000
/*
* Video interface
*/
struct mace_video {
unsigned long xxx; /* later... */
};
/*
* Ethernet interface
*/
struct mace_ethernet {
volatile u64 mac_ctrl;
volatile unsigned long int_stat;
volatile unsigned long dma_ctrl;
volatile unsigned long timer;
volatile unsigned long tx_int_al;
volatile unsigned long rx_int_al;
volatile unsigned long tx_info;
volatile unsigned long tx_info_al;
volatile unsigned long rx_buff;
volatile unsigned long rx_buff_al1;
volatile unsigned long rx_buff_al2;
volatile unsigned long diag;
volatile unsigned long phy_data;
volatile unsigned long phy_regs;
volatile unsigned long phy_trans_go;
volatile unsigned long backoff_seed;
/*===================================*/
volatile unsigned long imq_reserved[4];
volatile unsigned long mac_addr;
volatile unsigned long mac_addr2;
volatile unsigned long mcast_filter;
volatile unsigned long tx_ring_base;
/* Following are read-only registers for debugging */
volatile unsigned long tx_pkt1_hdr;
volatile unsigned long tx_pkt1_ptr[3];
volatile unsigned long tx_pkt2_hdr;
volatile unsigned long tx_pkt2_ptr[3];
/*===================================*/
volatile unsigned long rx_fifo;
};
/*
* Peripherals
*/
/* Audio registers */
struct mace_audio {
volatile unsigned long control;
volatile unsigned long codec_control; /* codec status control */
volatile unsigned long codec_mask; /* codec status input mask */
volatile unsigned long codec_read; /* codec status read data */
struct {
volatile unsigned long control; /* channel control */
volatile unsigned long read_ptr; /* channel read pointer */
volatile unsigned long write_ptr; /* channel write pointer */
volatile unsigned long depth; /* channel depth */
} chan[3];
};
/* register definitions for parallel port DMA */
struct mace_parport {
/* 0 - do nothing,
* 1 - pulse terminal count to the device after buffer is drained */
#define MACEPAR_CONTEXT_LASTFLAG BIT(63)
/* Should not cross 4K page boundary */
#define MACEPAR_CONTEXT_DATA_BOUND 0x0000000000001000UL
#define MACEPAR_CONTEXT_DATALEN_MASK 0x00000fff00000000UL
#define MACEPAR_CONTEXT_DATALEN_SHIFT 32
/* Can be arbitrarily aligned on any byte boundary on output,
* 64 byte aligned on input */
#define MACEPAR_CONTEXT_BASEADDR_MASK 0x00000000ffffffffUL
volatile u64 context_a;
volatile u64 context_b;
/* 0 - mem->device, 1 - device->mem */
#define MACEPAR_CTLSTAT_DIRECTION BIT(0)
/* 0 - channel frozen, 1 - channel enabled */
#define MACEPAR_CTLSTAT_ENABLE BIT(1)
/* 0 - channel active, 1 - complete channel reset */
#define MACEPAR_CTLSTAT_RESET BIT(2)
#define MACEPAR_CTLSTAT_CTXB_VALID BIT(3)
#define MACEPAR_CTLSTAT_CTXA_VALID BIT(4)
volatile u64 cntlstat; /* Control/Status register */
#define MACEPAR_DIAG_CTXINUSE BIT(0)
/* 1 - Dma engine is enabled and processing something */
#define MACEPAR_DIAG_DMACTIVE BIT(1)
/* Counter of bytes left */
#define MACEPAR_DIAG_CTRMASK 0x0000000000003ffcUL
#define MACEPAR_DIAG_CTRSHIFT 2
volatile u64 diagnostic; /* RO: diagnostic register */
};
/* ISA Control and DMA registers */
struct mace_isactrl {
volatile unsigned long ringbase;
#define MACEISA_RINGBUFFERS_SIZE (8 * 4096)
volatile unsigned long misc;
#define MACEISA_FLASH_WE BIT(0) /* 1=> Enable FLASH writes */
#define MACEISA_PWD_CLEAR BIT(1) /* 1=> PWD CLEAR jumper detected */
#define MACEISA_NIC_DEASSERT BIT(2)
#define MACEISA_NIC_DATA BIT(3)
#define MACEISA_LED_RED BIT(4) /* 0=> Illuminate red LED */
#define MACEISA_LED_GREEN BIT(5) /* 0=> Illuminate green LED */
#define MACEISA_DP_RAM_ENABLE BIT(6)
volatile unsigned long istat;
volatile unsigned long imask;
#define MACEISA_AUDIO_SW_INT BIT(0)
#define MACEISA_AUDIO_SC_INT BIT(1)
#define MACEISA_AUDIO1_DMAT_INT BIT(2)
#define MACEISA_AUDIO1_OF_INT BIT(3)
#define MACEISA_AUDIO2_DMAT_INT BIT(4)
#define MACEISA_AUDIO2_MERR_INT BIT(5)
#define MACEISA_AUDIO3_DMAT_INT BIT(6)
#define MACEISA_AUDIO3_MERR_INT BIT(7)
#define MACEISA_RTC_INT BIT(8)
#define MACEISA_KEYB_INT BIT(9)
#define MACEISA_KEYB_POLL_INT BIT(10)
#define MACEISA_MOUSE_INT BIT(11)
#define MACEISA_MOUSE_POLL_INT BIT(12)
#define MACEISA_TIMER0_INT BIT(13)
#define MACEISA_TIMER1_INT BIT(14)
#define MACEISA_TIMER2_INT BIT(15)
#define MACEISA_PARALLEL_INT BIT(16)
#define MACEISA_PAR_CTXA_INT BIT(17)
#define MACEISA_PAR_CTXB_INT BIT(18)
#define MACEISA_PAR_MERR_INT BIT(19)
#define MACEISA_SERIAL1_INT BIT(20)
#define MACEISA_SERIAL1_TDMAT_INT BIT(21)
#define MACEISA_SERIAL1_TDMAPR_INT BIT(22)
#define MACEISA_SERIAL1_TDMAME_INT BIT(23)
#define MACEISA_SERIAL1_RDMAT_INT BIT(24)
#define MACEISA_SERIAL1_RDMAOR_INT BIT(25)
#define MACEISA_SERIAL2_INT BIT(26)
#define MACEISA_SERIAL2_TDMAT_INT BIT(27)
#define MACEISA_SERIAL2_TDMAPR_INT BIT(28)
#define MACEISA_SERIAL2_TDMAME_INT BIT(29)
#define MACEISA_SERIAL2_RDMAT_INT BIT(30)
#define MACEISA_SERIAL2_RDMAOR_INT BIT(31)
volatile unsigned long _pad[0x2000/8 - 4];
volatile unsigned long dp_ram[0x400];
struct mace_parport parport;
};
/* Keyboard & Mouse registers
* -> drivers/input/serio/maceps2.c */
struct mace_ps2port {
volatile unsigned long tx;
volatile unsigned long rx;
volatile unsigned long control;
volatile unsigned long status;
};
struct mace_ps2 {
struct mace_ps2port keyb;
struct mace_ps2port mouse;
};
/* I2C registers
* -> drivers/i2c/algos/i2c-algo-sgi.c */
struct mace_i2c {
volatile unsigned long config;
#define MACEI2C_RESET BIT(0)
#define MACEI2C_FAST BIT(1)
#define MACEI2C_DATA_OVERRIDE BIT(2)
#define MACEI2C_CLOCK_OVERRIDE BIT(3)
#define MACEI2C_DATA_STATUS BIT(4)
#define MACEI2C_CLOCK_STATUS BIT(5)
volatile unsigned long control;
volatile unsigned long data;
};
/* Timer registers */
typedef union {
volatile unsigned long ust_msc;
struct reg {
volatile unsigned int ust;
volatile unsigned int msc;
} reg;
} timer_reg;
struct mace_timers {
volatile unsigned long ust;
#define MACE_UST_PERIOD_NS 960
volatile unsigned long compare1;
volatile unsigned long compare2;
volatile unsigned long compare3;
timer_reg audio_in;
timer_reg audio_out1;
timer_reg audio_out2;
timer_reg video_in1;
timer_reg video_in2;
timer_reg video_out;
};
struct mace_perif {
struct mace_audio audio;
char _pad0[0x10000 - sizeof(struct mace_audio)];
struct mace_isactrl ctrl;
char _pad1[0x10000 - sizeof(struct mace_isactrl)];
struct mace_ps2 ps2;
char _pad2[0x10000 - sizeof(struct mace_ps2)];
struct mace_i2c i2c;
char _pad3[0x10000 - sizeof(struct mace_i2c)];
struct mace_timers timers;
char _pad4[0x10000 - sizeof(struct mace_timers)];
};
/*
* ISA peripherals
*/
/* Parallel port */
struct mace_parallel {
};
struct mace_ecp1284 { /* later... */
};
/* Serial port */
struct mace_serial {
volatile unsigned long xxx; /* later... */
};
struct mace_isa {
struct mace_parallel parallel;
char _pad1[0x8000 - sizeof(struct mace_parallel)];
struct mace_ecp1284 ecp1284;
char _pad2[0x8000 - sizeof(struct mace_ecp1284)];
struct mace_serial serial1;
char _pad3[0x8000 - sizeof(struct mace_serial)];
struct mace_serial serial2;
char _pad4[0x8000 - sizeof(struct mace_serial)];
volatile unsigned char rtc[0x10000];
};
struct sgi_mace {
char _reserved[0x80000];
struct mace_pci pci;
char _pad0[0x80000 - sizeof(struct mace_pci)];
struct mace_video video_in1;
char _pad1[0x80000 - sizeof(struct mace_video)];
struct mace_video video_in2;
char _pad2[0x80000 - sizeof(struct mace_video)];
struct mace_video video_out;
char _pad3[0x80000 - sizeof(struct mace_video)];
struct mace_ethernet eth;
char _pad4[0x80000 - sizeof(struct mace_ethernet)];
struct mace_perif perif;
char _pad5[0x80000 - sizeof(struct mace_perif)];
struct mace_isa isa;
char _pad6[0x80000 - sizeof(struct mace_isa)];
};
extern struct sgi_mace __iomem *mace;
#endif /* __ASM_MACE_H__ */

Some files were not shown because too many files have changed in this diff Show more