Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,40 @@
generic-y += auxvec.h
generic-y += bitsperlong.h
generic-y += cputime.h
generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
generic-y += termbits.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h

View file

@ -0,0 +1,113 @@
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
#include <asm/barrier.h>
#include <asm/errno.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
int arch_timer_arch_init(void);
/*
* These register accessors are marked inline so the compiler can
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
static __always_inline
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
break;
}
}
isb();
}
static __always_inline
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val = 0;
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
break;
case ARCH_TIMER_REG_TVAL:
asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
break;
}
}
return val;
}
static inline u32 arch_timer_get_cntfrq(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
return val;
}
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
isb();
asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
return cval;
}
static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
return cntkctl;
}
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
#endif
#endif

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,451 @@
/*
* arch/arm/include/asm/assembler.h
*
* Copyright (C) 1996-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains arm architecture specific defines
* for the different processors.
*
* Do not include any C declarations in this file - it is included by
* assembler source.
*/
#ifndef __ASM_ASSEMBLER_H__
#define __ASM_ASSEMBLER_H__
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
#include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#define IOMEM(x) (x)
/*
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
#define lspull lsr
#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
#define get_byte_3 lsr #24
#define put_byte_0 lsl #0
#define put_byte_1 lsl #8
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
#define lspull lsl
#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
#define get_byte_3 lsl #0
#define put_byte_0 lsl #24
#define put_byte_1 lsl #16
#define put_byte_2 lsl #8
#define put_byte_3 lsl #0
#endif
/* Select code for any configuration running in BE8 mode */
#ifdef CONFIG_CPU_ENDIAN_BE8
#define ARM_BE8(code...) code
#else
#define ARM_BE8(code...)
#endif
/*
* Data preload for architectures that support it
*/
#if __LINUX_ARM_ARCH__ >= 5
#define PLD(code...) code
#else
#define PLD(code...)
#endif
/*
* This can be used to enable code to cacheline align the destination
* pointer when bulk writing to memory. Experiments on StrongARM and
* XScale didn't show this a worthwhile thing to do when the cache is not
* set to write-allocate (this would need further testing on XScale when WA
* is used).
*
* On Feroceon there is much to gain however, regardless of cache mode.
*/
#ifdef CONFIG_CPU_FEROCEON
#define CALGN(code...) code
#else
#define CALGN(code...)
#endif
/*
* Enable and disable interrupts
*/
#if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq_notrace
cpsid i
.endm
.macro enable_irq_notrace
cpsie i
.endm
#else
.macro disable_irq_notrace
msr cpsr_c, #PSR_I_BIT | SVC_MODE
.endm
.macro enable_irq_notrace
msr cpsr_c, #SVC_MODE
.endm
#endif
.macro asm_trace_hardirqs_off
#if defined(CONFIG_TRACE_IRQFLAGS)
stmdb sp!, {r0-r3, ip, lr}
bl trace_hardirqs_off
ldmia sp!, {r0-r3, ip, lr}
#endif
.endm
.macro asm_trace_hardirqs_on_cond, cond
#if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
*/
stmdb sp!, {r0-r3, ip, lr}
bl\cond trace_hardirqs_on
ldmia sp!, {r0-r3, ip, lr}
#endif
.endm
.macro asm_trace_hardirqs_on
asm_trace_hardirqs_on_cond al
.endm
.macro disable_irq
disable_irq_notrace
asm_trace_hardirqs_off
.endm
.macro enable_irq
asm_trace_hardirqs_on
enable_irq_notrace
.endm
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr
#ifdef CONFIG_CPU_V7M
mrs \oldcpsr, primask
#else
mrs \oldcpsr, cpsr
#endif
disable_irq
.endm
.macro save_and_disable_irqs_notrace, oldcpsr
mrs \oldcpsr, cpsr
disable_irq_notrace
.endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
.macro restore_irqs_notrace, oldcpsr
#ifdef CONFIG_CPU_V7M
msr primask, \oldcpsr
#else
msr cpsr_c, \oldcpsr
#endif
.endm
.macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT
asm_trace_hardirqs_on_cond eq
restore_irqs_notrace \oldcpsr
.endm
/*
* Get current thread_info.
*/
.macro get_thread_info, rd
ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
THUMB( mov \rd, sp )
THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
.endm
/*
* Increment/decrement the preempt count.
*/
#ifdef CONFIG_PREEMPT_COUNT
.macro inc_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
add \tmp, \tmp, #1 @ increment it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
sub \tmp, \tmp, #1 @ decrement it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count_ti, ti, tmp
get_thread_info \ti
dec_preempt_count \ti, \tmp
.endm
#else
.macro inc_preempt_count, ti, tmp
.endm
.macro dec_preempt_count, ti, tmp
.endm
.macro dec_preempt_count_ti, ti, tmp
.endm
#endif
#define USER(x...) \
9999: x; \
.pushsection __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.popsection
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
/*
* Note: if you get assembler errors from ALT_UP() when building with
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
* ALT_SMP( W(instr) ... )
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
9997: instr ;\
.if . - 9997b != 4 ;\
.error "ALT_UP() content must assemble to exactly 4 bytes";\
.endif ;\
.popsection
#define ALT_UP_B(label) \
.equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
W(b) . + up_b_offset ;\
.popsection
#else
#define ALT_SMP(instr...)
#define ALT_UP(instr...) instr
#define ALT_UP_B(label) b label
#endif
/*
* Instruction barrier
*/
.macro instr_sync
#if __LINUX_ARM_ARCH__ >= 7
isb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c5, 4
#endif
.endm
/*
* SMP data memory barrier
*/
.macro smp_dmb mode
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
ALT_SMP(dmb ish)
.else
ALT_SMP(W(dmb) ish)
.endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
#else
#error Incompatible SMP platform
#endif
.ifeqs "\mode","arm"
ALT_UP(nop)
.else
ALT_UP(W(nop))
.endif
#endif
.endm
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M
* this is done in __v7m_setup, so setmode can be empty here.
*/
.macro setmode, mode, reg
.endm
#elif defined(CONFIG_THUMB2_KERNEL)
.macro setmode, mode, reg
mov \reg, #\mode
msr cpsr_c, \reg
.endm
#else
.macro setmode, mode, reg
msr cpsr_c, #\mode
.endm
#endif
/*
* Helper macro to enter SVC mode cleanly and mask interrupts. reg is
* a scratch register for the macro to overwrite.
*
* This macro is intended for forcing the CPU into SVC mode at boot time.
* you cannot return to the original mode.
*/
.macro safe_svcmode_maskall reg:req
#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
mrs \reg , cpsr
eor \reg, \reg, #HYP_MODE
tst \reg, #MODE_MASK
bic \reg , \reg , #MODE_MASK
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
adr lr, BSYM(2f)
msr spsr_cxsf, \reg
__MSR_ELR_HYP(14)
__ERET
1: msr cpsr_c, \reg
2:
#else
/*
* workaround for possibly broken pre-v6 hardware
* (akita, Sharp Zaurus C-1000, PXA270-based)
*/
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
#endif
.endm
/*
* STRT/LDRT access macros with ARM and Thumb-2 variants
*/
#ifdef CONFIG_THUMB2_KERNEL
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999:
.if \inc == 1
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
.elseif \inc == 4
\instr\cond\()\t\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
.pushsection __ex_table,"a"
.align 3
.long 9999b, \abort
.popsection
.endm
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
@ explicit IT instruction needed because of the label
@ introduced by the USER macro
.ifnc \cond,al
.if \rept == 1
itt \cond
.elseif \rept == 2
ittt \cond
.else
.error "Unsupported rept macro argument"
.endif
.endif
@ Slightly optimised to avoid incrementing the pointer twice
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
.if \rept == 2
usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
.endif
add\cond \ptr, #\rept * \inc
.endm
#else /* !CONFIG_THUMB2_KERNEL */
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
.rept \rept
9999:
.if \inc == 1
\instr\cond\()b\()\t \reg, [\ptr], #\inc
.elseif \inc == 4
\instr\cond\()\t \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
.pushsection __ex_table,"a"
.align 3
.long 9999b, \abort
.popsection
.endr
.endm
#endif /* CONFIG_THUMB2_KERNEL */
.macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
.endm
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
.endm
/* Utility macro for declaring string literals */
.macro string name:req, string
.type \name , #object
\name:
.asciz "\string"
.size \name , . - \name
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
#endif
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
mov\c pc, \reg
#else
.ifeqs "\reg", "lr"
bx\c \reg
.else
mov\c pc, \reg
.endif
#endif
.endm
.endr
.macro ret.w, reg
ret \reg
#ifdef CONFIG_THUMB2_KERNEL
nop
#endif
.endm
#endif /* __ASM_ASSEMBLER_H__ */

View file

@ -0,0 +1,446 @@
/*
* arch/arm/include/asm/atomic.h
*
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/*
* On ARM, ordinary assignment (str instruction) doesn't clear the local
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic_" #op "\n" \
"1: ldrex %0, [%3]\n" \
" " #asm_op " %0, %0, %4\n" \
" strex %1, %0, [%3]\n" \
" teq %1, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic_" #op "_return\n" \
"1: ldrex %0, [%3]\n" \
" " #asm_op " %0, %0, %4\n" \
" strex %1, %0, [%3]\n" \
" teq %1, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
}
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
"strexeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
: "r" (&ptr->counter), "Ir" (old), "r" (new)
: "cc");
} while (res);
smp_mb();
return oldval;
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__ ("@ atomic_add_unless\n"
"1: ldrex %0, [%4]\n"
" teq %0, %5\n"
" beq 2f\n"
" add %1, %0, %6\n"
" strex %2, %1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
if (oldval != u)
smp_mb();
return oldval;
}
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
#error SMP not supported on pre-ARMv6 CPUs
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter c_op i; \
raw_local_irq_restore(flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
\
raw_local_irq_save(flags); \
v->counter c_op i; \
val = v->counter; \
raw_local_irq_restore(flags); \
\
return val; \
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
raw_local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
raw_local_irq_restore(flags);
return ret;
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
c = old;
return c;
}
#endif /* __LINUX_ARM_ARCH__ */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
static inline long long atomic64_read(const atomic64_t *v)
{
long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrd %0, %H0, [%1]"
: "=&r" (result)
: "r" (&v->counter), "Qo" (v->counter)
);
return result;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
: "=Qo" (v->counter)
: "r" (&v->counter), "r" (i)
);
}
#else
static inline long long atomic64_read(const atomic64_t *v)
{
long long result;
__asm__ __volatile__("@ atomic64_read\n"
" ldrexd %0, %H0, [%1]"
: "=&r" (result)
: "r" (&v->counter), "Qo" (v->counter)
);
return result;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp), "=Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
}
#endif
#define ATOMIC64_OP(op, op1, op2) \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
long long result; \
unsigned long tmp; \
\
prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic64_" #op "\n" \
"1: ldrexd %0, %H0, [%3]\n" \
" " #op1 " %Q0, %Q0, %Q4\n" \
" " #op2 " %R0, %R0, %R4\n" \
" strexd %1, %0, %H0, [%3]\n" \
" teq %1, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
{ \
long long result; \
unsigned long tmp; \
\
smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
"1: ldrexd %0, %H0, [%3]\n" \
" " #op1 " %Q0, %Q0, %Q4\n" \
" " #op2 " %R0, %R0, %R4\n" \
" strexd %1, %0, %H0, [%3]\n" \
" teq %1, #0\n" \
" bne 1b" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
}
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_OP_RETURN(op, op1, op2)
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
long long new)
{
long long oldval;
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
"ldrexd %1, %H1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
"teqeq %H1, %H4\n"
"strexdeq %0, %5, %H5, [%3]"
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
: "r" (&ptr->counter), "r" (old), "r" (new)
: "cc");
} while (res);
smp_mb();
return oldval;
}
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
long long result;
unsigned long tmp;
smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
" strexd %1, %4, %H4, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
: "r" (&ptr->counter), "r" (new)
: "cc");
smp_mb();
return result;
}
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long result;
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, #1\n"
" sbc %R0, %R0, #0\n"
" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
smp_mb();
return result;
}
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long val;
unsigned long tmp;
int ret = 1;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
" adds %Q0, %Q0, %Q6\n"
" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
if (ret)
smp_mb();
return ret;
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
#endif

View file

@ -0,0 +1,77 @@
/*
* arch/arm/include/asm/bL_switcher.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_BL_SWITCHER_H
#define ASM_BL_SWITCHER_H
#include <linux/compiler.h>
#include <linux/types.h>
typedef void (*bL_switch_completion_handler)(void *cookie);
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
bL_switch_completion_handler completer,
void *completer_cookie);
static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
{
return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
}
/*
* Register here to be notified about runtime enabling/disabling of
* the switcher.
*
* The notifier chain is called with the switcher activation lock held:
* the switcher will not be enabled or disabled during callbacks.
* Callbacks must not call bL_switcher_{get,put}_enabled().
*/
#define BL_NOTIFY_PRE_ENABLE 0
#define BL_NOTIFY_POST_ENABLE 1
#define BL_NOTIFY_PRE_DISABLE 2
#define BL_NOTIFY_POST_DISABLE 3
#ifdef CONFIG_BL_SWITCHER
int bL_switcher_register_notifier(struct notifier_block *nb);
int bL_switcher_unregister_notifier(struct notifier_block *nb);
/*
* Use these functions to temporarily prevent enabling/disabling of
* the switcher.
* bL_switcher_get_enabled() returns true if the switcher is currently
* enabled. Each call to bL_switcher_get_enabled() must be followed
* by a call to bL_switcher_put_enabled(). These functions are not
* recursive.
*/
bool bL_switcher_get_enabled(void);
void bL_switcher_put_enabled(void);
int bL_switcher_trace_trigger(void);
int bL_switcher_get_logical_index(u32 mpidr);
#else
static inline int bL_switcher_register_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
{
return 0;
}
static inline bool bL_switcher_get_enabled(void) { return false; }
static inline void bL_switcher_put_enabled(void) { }
static inline int bL_switcher_trace_trigger(void) { return 0; }
static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
#endif /* CONFIG_BL_SWITCHER */
#endif

View file

@ -0,0 +1,86 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#if __LINUX_ARM_ARCH__ >= 7 || \
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#endif
#if __LINUX_ARM_ARCH__ >= 7
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
: : "r" (0) : "memory")
#elif defined(CONFIG_CPU_FA526)
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#else
#define isb(x) __asm__ __volatile__ ("" : : : "memory")
#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
#define wmb() do { dsb(st); outer_sync(); } while (0)
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#endif
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
#define smp_mb() dmb(ish)
#define smp_rmb() smp_mb()
#define smp_wmb() dmb(ishst)
#endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */

View file

@ -0,0 +1,349 @@
/*
* Copyright 1995, Russell King.
* Various bits and pieces copyrights include:
* Linus Torvalds (test_bit).
* Big endian support: Copyright 2001, Nicolas Pitre
* reworked by rmk.
*
* bit 0 is the LSB of an "unsigned long" quantity.
*
* Please note that the code in this file should never be included
* from user space. Many of these are not implemented in assembler
* since they would be too costly. Also, they require privileged
* instructions (which are not available from user mode) to ensure
* that they are atomic.
*/
#ifndef __ASM_ARM_BITOPS_H
#define __ASM_ARM_BITOPS_H
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
/*
* These functions are the basis of our bit ops.
*
* First, the atomic bitops. These use native endian.
*/
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
*p |= mask;
raw_local_irq_restore(flags);
}
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
*p &= ~mask;
raw_local_irq_restore(flags);
}
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
*p ^= mask;
raw_local_irq_restore(flags);
}
static inline int
____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
res = *p;
*p = res | mask;
raw_local_irq_restore(flags);
return (res & mask) != 0;
}
static inline int
____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
res = *p;
*p = res & ~mask;
raw_local_irq_restore(flags);
return (res & mask) != 0;
}
static inline int
____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
raw_local_irq_save(flags);
res = *p;
*p = res ^ mask;
raw_local_irq_restore(flags);
return (res & mask) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
/*
* A note about Endian-ness.
* -------------------------
*
* When the ARM is put into big endian mode via CR15, the processor
* merely swaps the order of bytes within words, thus:
*
* ------------ physical data bus bits -----------
* D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0
* little byte 3 byte 2 byte 1 byte 0
* big byte 0 byte 1 byte 2 byte 3
*
* This means that reading a 32-bit word at address 0 returns the same
* value irrespective of the endian mode bit.
*
* Peripheral devices should be connected with the data bus reversed in
* "Big Endian" mode. ARM Application Note 61 is applicable, and is
* available from http://www.arm.com/.
*
* The following assumes that the data bus connectivity for big endian
* mode has been followed.
*
* Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
*/
/*
* Native endian assembly bitops. nr = 0 -> word 0 bit 0.
*/
extern void _set_bit(int nr, volatile unsigned long * p);
extern void _clear_bit(int nr, volatile unsigned long * p);
extern void _change_bit(int nr, volatile unsigned long * p);
extern int _test_and_set_bit(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#ifndef CONFIG_SMP
/*
* The __* form of bitops are non-atomic and may be reordered.
*/
#define ATOMIC_BITOP(name,nr,p) \
(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
#else
#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
#endif
/*
* Native endian atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
#ifndef __ARMEB__
/*
* These are the little endian, atomic definitions.
*/
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
#else
/*
* These are the big endian, atomic definitions.
*/
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
#endif
#if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffs.h>
#else
static inline int constant_fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
/*
* On ARMv5 and above those functions can be implemented around the
* clz instruction for much better code efficiency. __clz returns
* the number of leading zeros, zero input will return 32, and
* 0x80000000 will return 0.
*/
static inline unsigned int __clz(unsigned int x)
{
unsigned int ret;
asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
return ret;
}
/*
* fls() returns zero if the input is zero, otherwise returns the bit
* position of the last set bit, where the LSB is 1 and MSB is 32.
*/
static inline int fls(int x)
{
if (__builtin_constant_p(x))
return constant_fls(x);
return 32 - __clz(x);
}
/*
* __fls() returns the bit position of the last bit set, where the
* LSB is 0 and MSB is 31. Zero input is undefined.
*/
static inline unsigned long __fls(unsigned long x)
{
return fls(x) - 1;
}
/*
* ffs() returns zero if the input was zero, otherwise returns the bit
* position of the first set bit, where the LSB is 1 and MSB is 32.
*/
static inline int ffs(int x)
{
return fls(x & -x);
}
/*
* __ffs() returns the bit position of the first bit set, where the
* LSB is 0 and MSB is 31. Zero input is undefined.
*/
static inline unsigned long __ffs(unsigned long x)
{
return ffs(x) - 1;
}
#define ffz(x) __ffs( ~(x) )
#endif
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#ifdef __ARMEB__
static inline int find_first_zero_bit_le(const void *p, unsigned size)
{
return _find_first_zero_bit_le(p, size);
}
#define find_first_zero_bit_le find_first_zero_bit_le
static inline int find_next_zero_bit_le(const void *p, int size, int offset)
{
return _find_next_zero_bit_le(p, size, offset);
}
#define find_next_zero_bit_le find_next_zero_bit_le
static inline int find_next_bit_le(const void *p, int size, int offset)
{
return _find_next_bit_le(p, size, offset);
}
#define find_next_bit_le find_next_bit_le
#endif
#include <asm-generic/bitops/le.h>
/*
* Ext2 is defined to use little-endian byte ordering.
*/
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#endif /* _ARM_BITOPS_H */

View file

@ -0,0 +1,92 @@
#ifndef _ASMARM_BUG_H
#define _ASMARM_BUG_H
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/opcodes.h>
#ifdef CONFIG_BUG
/*
* Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
* We need to be careful not to conflict with those used by other modules and
* the register_undef_hook() system.
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
#define BUG_INSTR(__value) __inst_arm(__value)
#endif
#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE)
#define _BUG(file, line, value) __BUG(file, line, value)
#ifdef CONFIG_DEBUG_BUGVERBOSE
/*
* The extra indirection is to ensure that the __FILE__ string comes through
* OK. Many version of gcc do not support the asm %c parameter which would be
* preferable to this unpleasantness. We use mergeable string sections to
* avoid multiple copies of the string appearing in the kernel image.
*/
#define __BUG(__file, __line, __value) \
do { \
asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
".pushsection __bug_table,\"a\"\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
".popsection"); \
unreachable(); \
} while (0)
#else /* not CONFIG_DEBUG_BUGVERBOSE */
#define __BUG(__file, __line, __value) \
do { \
asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err);
struct siginfo;
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
unsigned long err, unsigned long trap);
#ifdef CONFIG_ARM_LPAE
#define FAULT_CODE_ALIGNMENT 33
#define FAULT_CODE_DEBUG 34
#else
#define FAULT_CODE_ALIGNMENT 1
#define FAULT_CODE_DEBUG 2
#endif
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
#endif

View file

@ -0,0 +1,21 @@
/*
* arch/arm/include/asm/bugs.h
*
* Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs()
#else
#define check_bugs() do { } while (0)
#endif
#endif

View file

@ -0,0 +1,28 @@
/*
* arch/arm/include/asm/cache.h
*/
#ifndef __ASMARM_CACHE_H
#define __ASMARM_CACHE_H
#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise,
* unrelated code may cause parts of the buffer to be read into the
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
*/
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define ARCH_SLAB_MINALIGN 8
#endif
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif

View file

@ -0,0 +1,492 @@
/*
* arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
#include <linux/mm.h>
#include <asm/glue-cache.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
*/
#define PG_dcache_clean PG_arch_1
/*
* MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
* Currently only needed for cache-v6.S and cache-v7.S, see
* __flush_icache_all for the generic implementation.
*
* flush_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_kern_louis()
*
* Flush data cache levels up to the level of unification
* inner shareable and invalidate the I-cache.
* Only needed from v7 onwards, falls back to flush_cache_all()
* for all other processor versions.
*
* flush_user_all()
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* flush_kern_dcache_area(kaddr, size)
*
* Ensure that the data held in page is written back.
* - kaddr - page address
* - size - region size
*
* DMA Cache Coherency
* ===================
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
void (*flush_kern_louis)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
int (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
};
/*
* Select the calling method
*/
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_kern_louis(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
} while (0)
/*
* Convert calls to our calling convention.
*/
/* Invalidate I-cache */
#define __flush_icache_all_generic() \
asm("mcr p15, 0, %0, c7, c5, 0" \
: : "r" (0));
/* Invalidate I-cache inner shareable */
#define __flush_icache_all_v7_smp() \
asm("mcr p15, 0, %0, c7, c1, 0" \
: : "r" (0));
/*
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic.
*/
#if (defined(CONFIG_CPU_V7) && \
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define __flush_icache_preferred __flush_icache_all_v7_smp
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
#define __flush_icache_preferred __cpuc_flush_icache_all
#else
#define __flush_icache_preferred __flush_icache_all_generic
#endif
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
dsb(ishst);
}
/*
* Flush caches up to Level of Unification Inner Shareable
*/
#define flush_cache_louis() __cpuc_flush_kern_louis()
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
static inline void
vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
}
#ifndef CONFIG_CPU_CACHE_VIPT
#define flush_cache_mm(mm) \
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
vivt_flush_cache_page(vma,addr,pfn)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
* flush_cache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
__cpuc_flush_dcache_area(addr, (size_t)size);
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
__cpuc_flush_dcache_area(addr, (size_t)size);
}
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
extern void __flush_anon_page(struct vm_area_struct *vma,
struct page *, unsigned long);
if (PageAnon(page))
__flush_anon_page(vma, page, vmaddr);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
* caches, since the direct-mappings of these pages may contain cached
* data, we need to do a full cache flush to ensure that writebacks
* don't corrupt data placed into these pages via the new mappings.
*/
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
flush_cache_all();
else
/*
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
dsb(ishst);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
flush_cache_all();
}
/*
* Memory synchronization helpers for mixed cached vs non cached accesses.
*
* Some synchronization algorithms have to set states in memory with the
* cache enabled or disabled depending on the code path. It is crucial
* to always ensure proper cache maintenance to update main memory right
* away in that case.
*
* Any cached write must be followed by a cache clean operation.
* Any cached read must be preceded by a cache invalidate operation.
* Yet, in the read case, a cache flush i.e. atomic clean+invalidate
* operation is needed to avoid discarding possible concurrent writes to the
* accessed memory.
*
* Also, in order to prevent a cached writer from interfering with an
* adjacent non-cached writer, each state variable must be located to
* a separate cache line.
*/
/*
* This needs to be >= the max cache writeback size of all
* supported platforms included in the current kernel configuration.
* This is used to align state variables to their own cache lines.
*/
#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
/*
* There is no __cpuc_clean_dcache_area but we use it anyway for
* code intent clarity, and alias it to __cpuc_flush_dcache_area.
*/
#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
/*
* Ensure preceding writes to *p by this CPU are visible to
* subsequent reads by other CPUs:
*/
static inline void __sync_cache_range_w(volatile void *p, size_t size)
{
char *_p = (char *)p;
__cpuc_clean_dcache_area(_p, size);
outer_clean_range(__pa(_p), __pa(_p + size));
}
/*
* Ensure preceding writes to *p by other CPUs are visible to
* subsequent reads by this CPU. We must be careful not to
* discard data simultaneously written by another CPU, hence the
* usage of flush rather than invalidate operations.
*/
static inline void __sync_cache_range_r(volatile void *p, size_t size)
{
char *_p = (char *)p;
#ifdef CONFIG_OUTER_CACHE
if (outer_cache.flush_range) {
/*
* Ensure dirty data migrated from other CPUs into our cache
* are cleaned out safely before the outer cache is cleaned:
*/
__cpuc_clean_dcache_area(_p, size);
/* Clean and invalidate stale data for *p from outer ... */
outer_flush_range(__pa(_p), __pa(_p + size));
}
#endif
/* ... and inner cache: */
__cpuc_flush_dcache_area(_p, size);
}
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
/*
* Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
* To do so we must:
*
* - Clear the SCTLR.C bit to prevent further cache allocations
* - Flush the desired level of cache
* - Clear the ACTLR "SMP" bit to disable local coherency
*
* ... and so without any intervening memory access in between those steps,
* not even to the stack.
*
* WARNING -- After this has been called:
*
* - No ldrex/strex (and similar) instructions must be used.
* - The CPU is obviously no longer coherent with the other CPUs.
* - This is unlikely to work as expected if Linux is running non-secure.
*
* Note:
*
* - This is known to apply to several ARMv7 processor implementations,
* however some exceptions may exist. Caveat emptor.
*
* - The clobber list is dictated by the call to v7_flush_dcache_*.
* fp is preserved to the stack explicitly prior disabling the cache
* since adding it to the clobber list is incompatible with having
* CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
* trampoline are inserted by the linker and to keep sp 64-bit aligned.
*/
#define v7_exit_coherency_flush(level) \
asm volatile( \
".arch armv7-a \n\t" \
"stmfd sp!, {fp, ip} \n\t" \
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
"isb \n\t" \
"bl v7_flush_dcache_"__stringify(level)" \n\t" \
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
"isb \n\t" \
"dsb \n\t" \
"ldmfd sp!, {fp, ip}" \
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
"r9","r10","lr","memory" )
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
#endif

View file

@ -0,0 +1,59 @@
#ifndef __ASM_ARM_CACHETYPE_H
#define __ASM_ARM_CACHETYPE_H
#define CACHEID_VIVT (1 << 0)
#define CACHEID_VIPT_NONALIASING (1 << 1)
#define CACHEID_VIPT_ALIASING (1 << 2)
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
#define CACHEID_ASID_TAGGED (1 << 3)
#define CACHEID_VIPT_I_ALIASING (1 << 4)
#define CACHEID_PIPT (1 << 5)
extern unsigned int cacheid;
#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
* Mask out support which will never be present on newer CPUs.
* - v6+ is never VIVT
* - v7+ VIPT never aliases on D-side
*/
#if __LINUX_ARM_ARCH__ >= 7
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
CACHEID_ASID_TAGGED |\
CACHEID_VIPT_I_ALIASING |\
CACHEID_PIPT)
#elif __LINUX_ARM_ARCH__ >= 6
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
#else
#define __CACHEID_ARCH_MIN (~0)
#endif
/*
* Mask out support which isn't configured
*/
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
#define __CACHEID_ALWAYS (CACHEID_VIVT)
#define __CACHEID_NEVER (~CACHEID_VIVT)
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
#define __CACHEID_ALWAYS (0)
#define __CACHEID_NEVER (CACHEID_VIVT)
#else
#define __CACHEID_ALWAYS (0)
#define __CACHEID_NEVER (0)
#endif
static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
{
return (__CACHEID_ALWAYS & mask) |
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
}
#endif

View file

@ -0,0 +1,153 @@
/*
* arch/arm/include/asm/checksum.h
*
* IP checksum routines
*
* Copyright (C) Original authors of ../asm-i386/checksum.h
* Copyright (C) 1996-1999 Russell King
*/
#ifndef __ASM_ARM_CHECKSUM_H
#define __ASM_ARM_CHECKSUM_H
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
"add %0, %1, %1, ror #16 @ csum_fold"
: "=r" (sum)
: "r" (sum)
: "cc");
return (__force __sum16)(~(__force u32)sum >> 16);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
static inline __sum16
ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int tmp1;
__wsum sum;
__asm__ __volatile__(
"ldr %0, [%1], #4 @ ip_fast_csum \n\
ldr %3, [%1], #4 \n\
sub %2, %2, #5 \n\
adds %0, %0, %3 \n\
ldr %3, [%1], #4 \n\
adcs %0, %0, %3 \n\
ldr %3, [%1], #4 \n\
1: adcs %0, %0, %3 \n\
ldr %3, [%1], #4 \n\
tst %2, #15 @ do this carefully \n\
subne %2, %2, #1 @ without destroying \n\
bne 1b @ the carry flag \n\
adcs %0, %0, %3 \n\
adc %0, %0, #0"
: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
: "1" (iph), "2" (ihl)
: "cc", "memory");
return csum_fold(sum);
}
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
u32 lenprot = len | proto << 16;
if (__builtin_constant_p(sum) && sum == 0) {
__asm__(
"adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
#ifdef __ARMEB__
"adcs %0, %0, %3 \n\t"
#else
"adcs %0, %0, %3, ror #8 \n\t"
#endif
"adc %0, %0, #0"
: "=&r" (sum)
: "r" (daddr), "r" (saddr), "r" (lenprot)
: "cc");
} else {
__asm__(
"adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
"adcs %0, %0, %3 \n\t"
#ifdef __ARMEB__
"adcs %0, %0, %4 \n\t"
#else
"adcs %0, %0, %4, ror #8 \n\t"
#endif
"adc %0, %0, #0"
: "=&r"(sum)
: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
: "cc");
}
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16
ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
extern __wsum
__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
__be32 proto, __wsum sum);
static inline __sum16
csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
unsigned short proto, __wsum sum)
{
return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
htonl(proto), sum));
}
#endif

View file

@ -0,0 +1,31 @@
/*
* arch/arm/include/asm/clkdev.h
*
* Copyright (C) 2008 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __ASM_CLKDEV_H
#define __ASM_CLKDEV_H
#include <linux/slab.h>
#ifndef CONFIG_COMMON_CLK
#ifdef CONFIG_HAVE_MACH_CLKDEV
#include <mach/clkdev.h>
#else
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
#endif
#endif
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
#endif

View file

@ -0,0 +1,288 @@
#ifndef __ASM_ARM_CMPXCHG_H
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
* cache totally. This means that the cache becomes inconsistent, and,
* since we use normal loads/stores as well, this is really bad.
* Typically, this causes oopsen in filp_close, but could have other,
* more disastrous effects. There are two work-arounds:
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache, perform atomic swap, flush the cache
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
*
* NOTE that this solution won't work on an SMP system, so explcitly
* forbid it here.
*/
#define swp_is_buggy
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
#ifdef swp_is_buggy
unsigned long flags;
#endif
#if __LINUX_ARM_ARCH__ >= 6
unsigned int tmp;
#endif
smp_mb();
prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n"
" strexb %1, %2, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (ret), "=&r" (tmp)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
case 4:
asm volatile("@ __xchg4\n"
"1: ldrex %0, [%3]\n"
" strex %1, %2, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (ret), "=&r" (tmp)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#elif defined(swp_is_buggy)
#ifdef CONFIG_SMP
#error SMP is not supported on this platform
#endif
case 1:
raw_local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
raw_local_irq_restore(flags);
break;
case 4:
raw_local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
raw_local_irq_restore(flags);
break;
#else
case 1:
asm volatile("@ __xchg1\n"
" swpb %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
case 4:
asm volatile("@ __xchg4\n"
" swp %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#endif
default:
__bad_xchg(ptr, size), ret = 0;
break;
}
smp_mb();
return ret;
}
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#include <asm-generic/cmpxchg-local.h>
#if __LINUX_ARM_ARCH__ < 6
/* min ARCH < ARMv6 */
#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
#endif
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#else /* min ARCH >= ARMv6 */
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
* cmpxchg only support 32-bits operands on ARMv6.
*/
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long oldval, res;
prefetchw((const void *)ptr);
switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexb %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexbeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
case 2:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexh %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexheq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
#endif
case 4:
do {
asm volatile("@ __cmpxchg4\n"
" ldrex %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
default:
__bad_cmpxchg(ptr, size);
oldval = 0;
}
return oldval;
}
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long ret;
smp_mb();
ret = __cmpxchg(ptr, old, new, size);
smp_mb();
return ret;
}
#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long ret;
switch (size) {
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
#endif
default:
ret = __cmpxchg(ptr, old, new, size);
}
return ret;
}
static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long oldval;
unsigned long res;
prefetchw(ptr);
__asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n"
" teqeq %H1, %H4\n"
" bne 2f\n"
" strexd %0, %5, %H5, [%3]\n"
" teq %0, #0\n"
" bne 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
: "r" (ptr), "r" (old), "r" (new)
: "cc");
return oldval;
}
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;
smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();
return ret;
}
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_relaxed(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
#endif /* __ASM_ARM_CMPXCHG_H */

View file

@ -0,0 +1,15 @@
#ifndef __ASM_ARM_COMPILER_H
#define __ASM_ARM_COMPILER_H
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#endif /* __ASM_ARM_COMPILER_H */

119
arch/arm/include/asm/cp15.h Normal file
View file

@ -0,0 +1,119 @@
#ifndef __ASM_ARM_CP15_H
#define __ASM_ARM_CP15_H
#include <asm/barrier.h>
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
#define CR_C (1 << 2) /* Dcache enable */
#define CR_W (1 << 3) /* Write buffer enable */
#define CR_P (1 << 4) /* 32-bit exception handler */
#define CR_D (1 << 5) /* 32-bit data address range */
#define CR_L (1 << 6) /* Implementation defined */
#define CR_B (1 << 7) /* Big endian */
#define CR_S (1 << 8) /* System MMU protection */
#define CR_R (1 << 9) /* ROM MMU protection */
#define CR_F (1 << 10) /* Implementation defined */
#define CR_Z (1 << 11) /* Implementation defined */
#define CR_I (1 << 12) /* Icache enable */
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
#ifdef CONFIG_MMU
#define CR_HA (1 << 17) /* Hardware management of Access Flag */
#else
#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */
#endif
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
#define CR_U (1 << 22) /* Unaligned access operation */
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
#define CR_EE (1 << 25) /* Exception (Big) Endian */
#define CR_TRE (1 << 28) /* TEX remap enable */
#define CR_AFE (1 << 29) /* Access flag enable */
#define CR_TE (1 << 30) /* Thumb exception enable */
#ifndef __ASSEMBLY__
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_high() (get_cr() & CR_V)
#else
#define vectors_high() (0)
#endif
#ifdef CONFIG_CPU_CP15
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
{
unsigned long val;
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
return val;
}
static inline void set_cr(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
: : "r" (val) : "cc");
isb();
}
static inline unsigned int get_auxcr(void)
{
unsigned int val;
asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
return val;
}
static inline void set_auxcr(unsigned int val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
: : "r" (val));
isb();
}
#define CPACC_FULL(n) (3 << (n * 2))
#define CPACC_SVC(n) (1 << (n * 2))
#define CPACC_DISABLE(n) (0 << (n * 2))
static inline unsigned int get_copro_access(void)
{
unsigned int val;
asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
: "=r" (val) : : "cc");
return val;
}
static inline void set_copro_access(unsigned int val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
: : "r" (val) : "cc");
isb();
}
#else /* ifdef CONFIG_CPU_CP15 */
/*
* cr_alignment is tightly coupled to cp15 (at least in the minds of the
* developers). Yielding 0 for machines without a cp15 (and making it
* read-only) is fine for most cases and saves quite some #ifdeffery.
*/
#define cr_alignment UL(0)
static inline unsigned long get_cr(void)
{
return 0;
}
#endif /* ifdef CONFIG_CPU_CP15 / else */
#endif /* ifndef __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,26 @@
/*
* arch/arm/include/asm/cpu.h
*
* Copyright (C) 2004-2005 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_CPU_H
#define __ASM_ARM_CPU_H
#include <linux/percpu.h>
#include <linux/cpu.h>
struct cpuinfo_arm {
struct cpu cpu;
u32 cpuid;
#ifdef CONFIG_SMP
unsigned int loops_per_jiffy;
#endif
};
DECLARE_PER_CPU(struct cpuinfo_arm, cpu_data);
#endif

View file

@ -0,0 +1,29 @@
#ifndef __ASM_ARM_CPUIDLE_H
#define __ASM_ARM_CPUIDLE_H
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
#endif
/* Common ARM WFI state */
#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
.enter = arm_cpuidle_simple_enter,\
.exit_latency = 1,\
.target_residency = 1,\
.power_usage = p,\
.flags = CPUIDLE_FLAG_TIME_VALID,\
.name = "WFI",\
.desc = "ARM WFI",\
}
/*
* in case power_specified == 1, give a default WFI power value needed
* by some governors
*/
#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
#endif

View file

@ -0,0 +1,256 @@
#ifndef __ASM_ARM_CPUTYPE_H
#define __ASM_ARM_CPUTYPE_H
#include <linux/stringify.h>
#include <linux/kernel.h>
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
#define CPUID_REVIDR 6
#ifdef CONFIG_CPU_V7M
#define CPUID_EXT_PFR0 0x40
#define CPUID_EXT_PFR1 0x44
#define CPUID_EXT_DFR0 0x48
#define CPUID_EXT_AFR0 0x4c
#define CPUID_EXT_MMFR0 0x50
#define CPUID_EXT_MMFR1 0x54
#define CPUID_EXT_MMFR2 0x58
#define CPUID_EXT_MMFR3 0x5c
#define CPUID_EXT_ISAR0 0x60
#define CPUID_EXT_ISAR1 0x64
#define CPUID_EXT_ISAR2 0x68
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
#define CPUID_EXT_DFR0 "c1, 2"
#define CPUID_EXT_AFR0 "c1, 3"
#define CPUID_EXT_MMFR0 "c1, 4"
#define CPUID_EXT_MMFR1 "c1, 5"
#define CPUID_EXT_MMFR2 "c1, 6"
#define CPUID_EXT_MMFR3 "c1, 7"
#define CPUID_EXT_ISAR0 "c2, 0"
#define CPUID_EXT_ISAR1 "c2, 1"
#define CPUID_EXT_ISAR2 "c2, 2"
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
#define MPIDR_SMP_VALUE (0x2 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24)
#define MPIDR_HWID_BITMASK 0xFFFFFF
#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_INTEL 0x69
/* ARM implemented processors */
#define ARM_CPU_PART_ARM1136 0x4100b360
#define ARM_CPU_PART_ARM1156 0x4100b560
#define ARM_CPU_PART_ARM1176 0x4100b760
#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
#define ARM_CPU_PART_CORTEX_A8 0x4100c080
#define ARM_CPU_PART_CORTEX_A9 0x4100c090
#define ARM_CPU_PART_CORTEX_A5 0x4100c050
#define ARM_CPU_PART_CORTEX_A7 0x4100c070
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_MASK 0xff00fff0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
: "=r" (__val) \
: \
: "cc"); \
__val; \
})
/*
* The memory clobber prevents gcc 4.5 from reordering the mrc before
* any is_smp() tests, which can cause undefined instruction aborts on
* ARM1136 r0 due to the missing extended CP15 registers.
*/
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
: "memory"); \
__val; \
})
#elif defined(CONFIG_CPU_V7M)
#include <asm/io.h>
#include <asm/v7m.h>
#define read_cpuid(reg) \
({ \
WARN_ON_ONCE(1); \
0; \
})
static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
{
return readl(BASEADDR_V7M_SCB + offset);
}
#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
/*
* read_cpuid and read_cpuid_ext should only ever be called on machines that
* have cp15 so warn on other usages.
*/
#define read_cpuid(reg) \
({ \
WARN_ON_ONCE(1); \
0; \
})
#define read_cpuid_ext(reg) read_cpuid(reg)
#endif /* ifdef CONFIG_CPU_CP15 / else */
#ifdef CONFIG_CPU_CP15
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid() directly.
*/
static inline unsigned int __attribute_const__ read_cpuid_id(void)
{
return read_cpuid(CPUID_ID);
}
#elif defined(CONFIG_CPU_V7M)
static inline unsigned int __attribute_const__ read_cpuid_id(void)
{
return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
}
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
{
return processor_id;
}
#endif /* ifdef CONFIG_CPU_CP15 / else */
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
{
return (read_cpuid_id() & 0xFF000000) >> 24;
}
/*
* The CPU part number is meaningless without referring to the CPU
* implementer: implementers are free to define their own part numbers
* which are permitted to clash with other implementer part numbers.
*/
static inline unsigned int __attribute_const__ read_cpuid_part(void)
{
return read_cpuid_id() & ARM_CPU_PART_MASK;
}
static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
{
return read_cpuid_id() & 0xFFF0;
}
static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
{
return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
}
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
}
static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
{
return read_cpuid(CPUID_TCM);
}
static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(CPUID_MPIDR);
}
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
* this reason, we need a way to explicitly test for this type of CPU.
*/
#ifndef CONFIG_CPU_XSC3
#define cpu_is_xsc3() 0
#else
static inline int cpu_is_xsc3(void)
{
unsigned int id;
id = read_cpuid_id() & 0xffffe000;
/* It covers both Intel ID and Marvell ID */
if ((id == 0x69056000) || (id == 0x56056000))
return 1;
return 0;
}
#endif
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
#define cpu_is_xscale() 0
#else
#define cpu_is_xscale() 1
#endif
/*
* Marvell's PJ4 and PJ4B cores are based on V7 version,
* but require a specical sequence for enabling coprocessors.
* For this reason, we need a way to distinguish them.
*/
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
static inline int cpu_is_pj4(void)
{
unsigned int id;
id = read_cpuid_id();
if ((id & 0xff0fff00) == 0x560f5800)
return 1;
return 0;
}
#else
#define cpu_is_pj4() 0
#endif
#endif

View file

@ -0,0 +1,10 @@
#ifndef ASM_ARM_CRYPTO_SHA1_H
#define ASM_ARM_CRYPTO_SHA1_H
#include <linux/crypto.h>
#include <crypto/sha.h>
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif

159
arch/arm/include/asm/cti.h Normal file
View file

@ -0,0 +1,159 @@
#ifndef __ASMARM_CTI_H
#define __ASMARM_CTI_H
#include <asm/io.h>
#include <asm/hardware/coresight.h>
/* The registers' definition is from section 3.2 of
* Embedded Cross Trigger Revision: r0p0
*/
#define CTICONTROL 0x000
#define CTISTATUS 0x004
#define CTILOCK 0x008
#define CTIPROTECTION 0x00C
#define CTIINTACK 0x010
#define CTIAPPSET 0x014
#define CTIAPPCLEAR 0x018
#define CTIAPPPULSE 0x01c
#define CTIINEN 0x020
#define CTIOUTEN 0x0A0
#define CTITRIGINSTATUS 0x130
#define CTITRIGOUTSTATUS 0x134
#define CTICHINSTATUS 0x138
#define CTICHOUTSTATUS 0x13c
#define CTIPERIPHID0 0xFE0
#define CTIPERIPHID1 0xFE4
#define CTIPERIPHID2 0xFE8
#define CTIPERIPHID3 0xFEC
#define CTIPCELLID0 0xFF0
#define CTIPCELLID1 0xFF4
#define CTIPCELLID2 0xFF8
#define CTIPCELLID3 0xFFC
/* The below are from section 3.6.4 of
* CoreSight v1.0 Architecture Specification
*/
#define LOCKACCESS 0xFB0
#define LOCKSTATUS 0xFB4
/**
* struct cti - cross trigger interface struct
* @base: mapped virtual address for the cti base
* @irq: irq number for the cti
* @trig_out_for_irq: triger out number which will cause
* the @irq happen
*
* cti struct used to operate cti registers.
*/
struct cti {
void __iomem *base;
int irq;
int trig_out_for_irq;
};
/**
* cti_init - initialize the cti instance
* @cti: cti instance
* @base: mapped virtual address for the cti base
* @irq: irq number for the cti
* @trig_out: triger out number which will cause
* the @irq happen
*
* called by machine code to pass the board dependent
* @base, @irq and @trig_out to cti.
*/
static inline void cti_init(struct cti *cti,
void __iomem *base, int irq, int trig_out)
{
cti->base = base;
cti->irq = irq;
cti->trig_out_for_irq = trig_out;
}
/**
* cti_map_trigger - use the @chan to map @trig_in to @trig_out
* @cti: cti instance
* @trig_in: trigger in number
* @trig_out: trigger out number
* @channel: channel number
*
* This function maps one trigger in of @trig_in to one trigger
* out of @trig_out using the channel @chan.
*/
static inline void cti_map_trigger(struct cti *cti,
int trig_in, int trig_out, int chan)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + CTIINEN + trig_in * 4);
val |= BIT(chan);
__raw_writel(val, base + CTIINEN + trig_in * 4);
val = __raw_readl(base + CTIOUTEN + trig_out * 4);
val |= BIT(chan);
__raw_writel(val, base + CTIOUTEN + trig_out * 4);
}
/**
* cti_enable - enable the cti module
* @cti: cti instance
*
* enable the cti module
*/
static inline void cti_enable(struct cti *cti)
{
__raw_writel(0x1, cti->base + CTICONTROL);
}
/**
* cti_disable - disable the cti module
* @cti: cti instance
*
* enable the cti module
*/
static inline void cti_disable(struct cti *cti)
{
__raw_writel(0, cti->base + CTICONTROL);
}
/**
* cti_irq_ack - clear the cti irq
* @cti: cti instance
*
* clear the cti irq
*/
static inline void cti_irq_ack(struct cti *cti)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + CTIINTACK);
val |= BIT(cti->trig_out_for_irq);
__raw_writel(val, base + CTIINTACK);
}
/**
* cti_unlock - unlock cti module
* @cti: cti instance
*
* unlock the cti module, or else any writes to the cti
* module is not allowed.
*/
static inline void cti_unlock(struct cti *cti)
{
__raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
}
/**
* cti_lock - lock cti module
* @cti: cti instance
*
* lock the cti module, so any writes to the cti
* module will be not allowed.
*/
static inline void cti_lock(struct cti *cti)
{
__raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
}
#endif

View file

@ -0,0 +1,41 @@
/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/barrier.h>
static inline u32 __dcc_getstatus(void)
{
u32 __ret;
asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
: "=r" (__ret) : : "cc");
return __ret;
}
static inline char __dcc_getchar(void)
{
char __c;
asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
: "=r" (__c));
isb();
return __c;
}
static inline void __dcc_putchar(char c)
{
asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
: /* no output register */
: "r" (c));
isb();
}

View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 1995-2004 Russell King
*
* Delay routines, using a pre-computed "loops_per_second" value.
*/
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
#include <asm/memory.h>
#include <asm/param.h> /* HZ */
#define MAX_UDELAY_MS 2
#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
#define UDELAY_SHIFT 30
#ifndef __ASSEMBLY__
struct delay_timer {
unsigned long (*read_current_timer)(void);
unsigned long freq;
};
extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
unsigned long ticks_per_jiffy;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
/*
* This function intentionally does not exist; if you see references to
* it, it means that you're calling udelay() with an out of range value.
*
* With currently imposed limits, this means that we support a max delay
* of 2000us. Further limits: HZ<=1000 and bogomips<=3355
*/
extern void __bad_udelay(void);
/*
* division by multiplication: you don't have to worry about
* loss of precision.
*
* Use only for very small delays ( < 2 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
#define __udelay(n) arm_delay_ops.udelay(n)
#define __const_udelay(n) arm_delay_ops.const_udelay(n)
#define udelay(n) \
(__builtin_constant_p(n) ? \
((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
__const_udelay((n) * UDELAY_MULT)) : \
__udelay(n))
/* Loop-based definitions for assembly code. */
extern void __loop_delay(unsigned long loops);
extern void __loop_udelay(unsigned long usecs);
extern void __loop_const_udelay(unsigned long);
/* Delay-loop timer registration. */
#define ARCH_HAS_READ_CURRENT_TIMER
extern void register_current_timer_delay(const struct delay_timer *timer);
#endif /* __ASSEMBLY__ */
#endif /* defined(_ARM_DELAY_H) */

View file

@ -0,0 +1,36 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef ASMARM_DEVICE_H
#define ASMARM_DEVICE_H
struct dev_archdata {
struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
};
struct omap_device;
struct pdev_archdata {
#ifdef CONFIG_ARCH_OMAP
struct omap_device *od;
#endif
};
#ifdef CONFIG_ARM_DMA_USE_IOMMU
#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
#else
#define to_dma_iommu_mapping(dev) NULL
#endif
#endif

View file

@ -0,0 +1,227 @@
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
#include <linux/types.h>
#include <asm/compiler.h>
/*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
* return remainder;
* }
*
* In other words, a 64-bit dividend with a 32-bit divisor producing
* a 64-bit result and a 32-bit remainder. To accomplish this optimally
* we call a special __do_div64 helper with completely non standard
* calling convention for arguments and results (beware).
*/
#ifdef __ARMEB__
#define __xh "r0"
#define __xl "r1"
#else
#define __xl "r0"
#define __xh "r1"
#endif
#define __do_div_asm(n, base) \
({ \
register unsigned int __base asm("r4") = base; \
register unsigned long long __n asm("r0") = n; \
register unsigned long long __res asm("r2"); \
register unsigned int __rem asm(__xh); \
asm( __asmeq("%0", __xh) \
__asmeq("%1", "r2") \
__asmeq("%2", "r0") \
__asmeq("%3", "r4") \
"bl __do_div64" \
: "=r" (__rem), "=r" (__res) \
: "r" (__n), "r" (__base) \
: "ip", "lr", "cc"); \
n = __res; \
__rem; \
})
#if __GNUC__ < 4 || !defined(CONFIG_AEABI)
/*
* gcc versions earlier than 4.0 are simply too problematic for the
* optimized implementation below. First there is gcc PR 15089 that
* tend to trig on more complex constructs, spurious .global __udivsi3
* are inserted even if none of those symbols are referenced in the
* generated code, and those gcc versions are not able to do constant
* propagation on long long values anyway.
*/
#define do_div(n, base) __do_div_asm(n, base)
#elif __GNUC__ >= 4
#include <asm/bug.h>
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
* multiplications instead which is much faster. And yet only if compiling
* for ARMv4 or higher (we need umull/umlal) and if the gcc version is
* sufficiently recent to perform proper long long constant propagation.
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
#define do_div(n, base) \
({ \
unsigned int __r, __b = (base); \
if (!__builtin_constant_p(__b) || __b == 0 || \
(__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \
/* non-constant divisor (or zero): slow path */ \
__r = __do_div_asm(n, __b); \
} else if ((__b & (__b - 1)) == 0) { \
/* Trivial: __b is constant and a power of 2 */ \
/* gcc does the right thing with this code. */ \
__r = n; \
__r &= (__b - 1); \
n /= __b; \
} else { \
/* Multiply by inverse of __b: n/b = n*(p/b)/p */ \
/* We rely on the fact that most of this code gets */ \
/* optimized away at compile time due to constant */ \
/* propagation and only a couple inline assembly */ \
/* instructions should remain. Better avoid any */ \
/* code construct that might prevent that. */ \
unsigned long long __res, __x, __t, __m, __n = n; \
unsigned int __c, __p, __z = 0; \
/* preserve low part of n for reminder computation */ \
__r = __n; \
/* determine number of bits to represent __b */ \
__p = 1 << __div64_fls(__b); \
/* compute __m = ((__p << 64) + __b - 1) / __b */ \
__m = (~0ULL / __b) * __p; \
__m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \
/* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \
__x = ~0ULL / __b * __b - 1; \
__res = (__m & 0xffffffff) * (__x & 0xffffffff); \
__res >>= 32; \
__res += (__m & 0xffffffff) * (__x >> 32); \
__t = __res; \
__res += (__x & 0xffffffff) * (__m >> 32); \
__t = (__res < __t) ? (1ULL << 32) : 0; \
__res = (__res >> 32) + __t; \
__res += (__m >> 32) * (__x >> 32); \
__res /= __p; \
/* Now sanitize and optimize what we've got. */ \
if (~0ULL % (__b / (__b & -__b)) == 0) { \
/* those cases can be simplified with: */ \
__n /= (__b & -__b); \
__m = ~0ULL / (__b / (__b & -__b)); \
__p = 1; \
__c = 1; \
} else if (__res != __x / __b) { \
/* We can't get away without a correction */ \
/* to compensate for bit truncation errors. */ \
/* To avoid it we'd need an additional bit */ \
/* to represent __m which would overflow it. */ \
/* Instead we do m=p/b and n/b=(n*m+m)/p. */ \
__c = 1; \
/* Compute __m = (__p << 64) / __b */ \
__m = (~0ULL / __b) * __p; \
__m += ((~0ULL % __b + 1) * __p) / __b; \
} else { \
/* Reduce __m/__p, and try to clear bit 31 */ \
/* of __m when possible otherwise that'll */ \
/* need extra overflow handling later. */ \
unsigned int __bits = -(__m & -__m); \
__bits |= __m >> 32; \
__bits = (~__bits) << 1; \
/* If __bits == 0 then setting bit 31 is */ \
/* unavoidable. Simply apply the maximum */ \
/* possible reduction in that case. */ \
/* Otherwise the MSB of __bits indicates the */ \
/* best reduction we should apply. */ \
if (!__bits) { \
__p /= (__m & -__m); \
__m /= (__m & -__m); \
} else { \
__p >>= __div64_fls(__bits); \
__m >>= __div64_fls(__bits); \
} \
/* No correction needed. */ \
__c = 0; \
} \
/* Now we have a combination of 2 conditions: */ \
/* 1) whether or not we need a correction (__c), and */ \
/* 2) whether or not there might be an overflow in */ \
/* the cross product (__m & ((1<<63) | (1<<31))) */ \
/* Select the best insn combination to perform the */ \
/* actual __m * __n / (__p << 64) operation. */ \
if (!__c) { \
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
__res = __m; \
asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "+&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else { \
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"cmn %Q0, %Q1\n\t" \
"adcs %R0, %R0, %R1\n\t" \
"adc %Q0, %3, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n), "r" (__z) \
: "cc" ); \
} \
if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \
"umlal %R0, %Q0, %Q1, %R2\n\t" \
"mov %R0, #0\n\t" \
"umlal %Q0, %R0, %R1, %R2" \
: "+&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else { \
asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \
"umlal %R0, %1, %Q2, %R3\n\t" \
"mov %R0, #0\n\t" \
"adds %Q0, %1, %Q0\n\t" \
"adc %R0, %R0, #0\n\t" \
"umlal %Q0, %R0, %R2, %R3" \
: "+&r" (__res), "+&r" (__z) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} \
__res /= __p; \
/* The reminder can be computed with 32-bit regs */ \
/* only, and gcc is good at that. */ \
{ \
unsigned int __res0 = __res; \
unsigned int __b0 = __b; \
__r -= __res0 * __b0; \
} \
/* BUG_ON(__r >= __b || __res * __b + __r != n); */ \
n = __res; \
} \
__r; \
})
/* our own fls implementation to make sure constant propagation is fine */
#define __div64_fls(bits) \
({ \
unsigned int __left = (bits), __nr = 0; \
if (__left & 0xffff0000) __nr += 16, __left >>= 16; \
if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \
if (__left & 0x000000f0) __nr += 4, __left >>= 4; \
if (__left & 0x0000000c) __nr += 2, __left >>= 2; \
if (__left & 0x00000002) __nr += 1; \
__nr; \
})
#endif
#endif

View file

@ -0,0 +1,14 @@
#ifndef ASMARM_DMA_CONTIGUOUS_H
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
#endif
#endif
#endif

View file

@ -0,0 +1,37 @@
#ifndef ASMARM_DMA_IOMMU_H
#define ASMARM_DMA_IOMMU_H
#ifdef __KERNEL__
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
#include <linux/kref.h>
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
unsigned long **bitmaps; /* array of bitmaps */
unsigned int nr_bitmaps; /* nr of elements in array */
unsigned int extensions;
size_t bitmap_size; /* size of a single bitmap */
size_t bits; /* per bitmap */
dma_addr_t base;
spinlock_t lock;
struct kref kref;
};
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
#endif /* __KERNEL__ */
#endif

View file

@ -0,0 +1,332 @@
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H
#ifdef __KERNEL__
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
else
return __generic_dma_ops(dev);
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
dev->archdata.dma_ops = ops;
}
#include <asm-generic/dma-mapping-common.h>
static inline int dma_set_mask(struct device *dev, u64 mask)
{
return get_dma_ops(dev)->set_dma_mask(dev, mask);
}
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
/*
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
* functions used internally by the DMA-mapping API to provide DMA
* addresses. They must not be used by drivers.
*/
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
if (dev)
pfn -= dev->dma_pfn_offset;
return (dma_addr_t)__pfn_to_bus(pfn);
}
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
unsigned long pfn = __bus_to_pfn(addr);
if (dev)
pfn += dev->dma_pfn_offset;
return pfn;
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
if (dev) {
unsigned long pfn = dma_to_pfn(dev, addr);
return phys_to_virt(__pfn_to_phys(pfn));
}
return (void *)__bus_to_virt((unsigned long)addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
if (dev)
return pfn_to_dma(dev, virt_to_pfn(addr));
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
return __arch_pfn_to_dma(dev, pfn);
}
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_pfn(dev, addr);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return __arch_virt_to_dma(dev, addr);
}
#endif
/* The ARM override for dma_max_pfn() */
static inline unsigned long dma_max_pfn(struct device *dev)
{
return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
set_dma_ops(dev, &arm_coherent_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
unsigned int offset = paddr & ~PAGE_MASK;
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{
unsigned int offset = dev_addr & ~PAGE_MASK;
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
u64 limit, mask;
if (!dev->dma_mask)
return 0;
mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit)
return 0;
if ((addr | (addr + size - 1)) & ~mask)
return 0;
return 1;
}
static inline void dma_mark_clean(void *addr, size_t size) { }
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
return dma_addr == DMA_ERROR_CODE;
}
/*
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
* function so drivers using this API are highlighted with build warnings.
*/
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
return NULL;
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle)
{
}
extern int dma_supported(struct device *dev, u64 mask);
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
* @attrs: optinal attributes that specific mapping properties
*
* Allocate some memory for a device for performing DMA. This function
* allocates pages, and will return the CPU-viewed address, and sets @handle
* to be the device-viewed address.
*/
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs);
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
/**
* arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
* arm_dma_alloc().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs);
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
/**
* arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
* @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256KiB. It must be called
* before postcore_initcall.
*/
extern void __init init_dma_coherent_pool_size(unsigned long size);
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
*
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
* On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
*
* The following are helper functions used by the dmabounce subystem
*
*/
/**
* dmabounce_register_dev
*
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
* @needs_bounce_fn: called to determine whether buffer needs bouncing
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
*/
extern int dmabounce_register_dev(struct device *, unsigned long,
unsigned long, int (*)(struct device *, dma_addr_t, size_t));
/**
* dmabounce_unregister_dev
*
* @dev: valid struct device pointer
*
* This function should be called by low-level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system.
*
*/
extern void dmabounce_unregister_dev(struct device *);
/*
* The scatter list versions of the above methods.
*/
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction, struct dma_attrs *attrs);
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction, struct dma_attrs *attrs);
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
#endif /* __KERNEL__ */
#endif

151
arch/arm/include/asm/dma.h Normal file
View file

@ -0,0 +1,151 @@
#ifndef __ASM_ARM_DMA_H
#define __ASM_ARM_DMA_H
/*
* This is the maximum virtual address which can be DMA'd from.
*/
#ifndef CONFIG_ZONE_DMA
#define MAX_DMA_ADDRESS 0xffffffffUL
#else
#define MAX_DMA_ADDRESS ({ \
extern phys_addr_t arm_dma_zone_size; \
arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \
(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
#endif
#ifdef CONFIG_ISA_DMA_API
/*
* This is used to support drivers written for the x86 ISA DMA API.
* It should not be re-used except for that purpose.
*/
#include <linux/spinlock.h>
#include <asm/scatterlist.h>
#include <mach/isa-dma.h>
/*
* The DMA modes reflect the settings for the ISA DMA controller
*/
#define DMA_MODE_MASK 0xcc
#define DMA_MODE_READ 0x44
#define DMA_MODE_WRITE 0x48
#define DMA_MODE_CASCADE 0xc0
#define DMA_AUTOINIT 0x10
extern raw_spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
*/
#define clear_dma_ff(chan)
/* Set only the page register bits of the transfer address.
*
* NOTE: This is an architecture specific function, and should
* be hidden from the drivers
*/
extern void set_dma_page(unsigned int chan, char pagenr);
/* Request a DMA channel
*
* Some architectures may need to do allocate an interrupt
*/
extern int request_dma(unsigned int chan, const char * device_id);
/* Free a DMA channel
*
* Some architectures may need to do free an interrupt
*/
extern void free_dma(unsigned int chan);
/* Enable DMA for this channel
*
* On some architectures, this may have other side effects like
* enabling an interrupt and setting the DMA registers.
*/
extern void enable_dma(unsigned int chan);
/* Disable DMA for this channel
*
* On some architectures, this may have other side effects like
* disabling an interrupt or whatever.
*/
extern void disable_dma(unsigned int chan);
/* Test whether the specified channel has an active DMA transfer
*/
extern int dma_channel_active(unsigned int chan);
/* Set the DMA scatter gather list for this channel
*
* This should not be called if a DMA channel is enabled,
* especially since some DMA architectures don't update the
* DMA address immediately, but defer it to the enable_dma().
*/
extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
/* Set the DMA address for this channel
*
* This should not be called if a DMA channel is enabled,
* especially since some DMA architectures don't update the
* DMA address immediately, but defer it to the enable_dma().
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
__set_dma_addr(chan, (void *)__bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
* This should not be called if a DMA channel is enabled,
* especially since some DMA architectures don't update the
* DMA count immediately, but defer it to the enable_dma().
*/
extern void set_dma_count(unsigned int chan, unsigned long count);
/* Set the transfer direction for this channel
*
* This should not be called if a DMA channel is enabled,
* especially since some DMA architectures don't update the
* DMA transfer direction immediately, but defer it to the
* enable_dma().
*/
extern void set_dma_mode(unsigned int chan, unsigned int mode);
/* Set the transfer speed for this channel
*/
extern void set_dma_speed(unsigned int chan, int cycle_ns);
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*/
extern int get_dma_residue(unsigned int chan);
#ifndef NO_DMA
#define NO_DMA 255
#endif
#endif /* CONFIG_ISA_DMA_API */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __ASM_ARM_DMA_H */

View file

@ -0,0 +1,109 @@
/*
* arch/arm/include/asm/domain.h
*
* Copyright (C) 1999 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_DOMAIN_H
#define __ASM_PROC_DOMAIN_H
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
#endif
/*
* Domain numbers
*
* DOMAIN_IO - domain 2 includes all IO only
* DOMAIN_USER - domain 1 includes all user memory only
* DOMAIN_KERNEL - domain 0 includes all kernel memory only
*
* The domain numbering depends on whether we support 36 physical
* address for I/O or not. Addresses above the 32 bit boundary can
* only be mapped using supersections and supersections can only
* be set for domain 0. We could just default to DOMAIN_IO as zero,
* but there may be systems with supersection support and no 36-bit
* addressing. In such cases, we want to map system memory with
* supersections to reduce TLB misses and footprint.
*
* 36-bit addressing and supersections are only available on
* CPUs based on ARMv6+ or the Intel XSC3 core.
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
/*
* Domain types
*/
#define DOMAIN_NOACCESS 0
#define DOMAIN_CLIENT 1
#ifdef CONFIG_CPU_USE_DOMAINS
#define DOMAIN_MANAGER 3
#else
#define DOMAIN_MANAGER 1
#endif
#define domain_val(dom,type) ((type) << (2*(dom)))
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS
static inline void set_domain(unsigned val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
: : "r" (val));
isb();
}
#define modify_domain(dom,type) \
do { \
struct thread_info *thread = current_thread_info(); \
unsigned int domain = thread->cpu_domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
thread->cpu_domain = domain | domain_val(dom, type); \
set_domain(thread->cpu_domain); \
} while (0)
#else
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
#define TUSER(instr) #instr "t"
#else
#define TUSER(instr) #instr
#endif
#else /* __ASSEMBLY__ */
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions
*/
#ifdef CONFIG_CPU_USE_DOMAINS
#define TUSER(instr) instr ## t
#else
#define TUSER(instr) instr
#endif
#endif /* __ASSEMBLY__ */
#endif /* !__ASM_PROC_DOMAIN_H */

View file

@ -0,0 +1,218 @@
/*
* arch/arm/include/asm/ecard.h
*
* definitions for expansion cards
*
* This is a new system as from Linux 1.2.3
*
* Changelog:
* 11-12-1996 RMK Further minor improvements
* 12-09-1997 RMK Added interrupt enable/disable for card level
*
* Reference: Acorns Risc OS 3 Programmers Reference Manuals.
*/
#ifndef __ASM_ECARD_H
#define __ASM_ECARD_H
/*
* Currently understood cards (but not necessarily
* supported):
* Manufacturer Product ID
*/
#define MANU_ACORN 0x0000
#define PROD_ACORN_SCSI 0x0002
#define PROD_ACORN_ETHER1 0x0003
#define PROD_ACORN_MFM 0x000b
#define MANU_ANT2 0x0011
#define PROD_ANT_ETHER3 0x00a4
#define MANU_ATOMWIDE 0x0017
#define PROD_ATOMWIDE_3PSERIAL 0x0090
#define MANU_IRLAM_INSTRUMENTS 0x001f
#define MANU_IRLAM_INSTRUMENTS_ETHERN 0x5678
#define MANU_OAK 0x0021
#define PROD_OAK_SCSI 0x0058
#define MANU_MORLEY 0x002b
#define PROD_MORLEY_SCSI_UNCACHED 0x0067
#define MANU_CUMANA 0x003a
#define PROD_CUMANA_SCSI_2 0x003a
#define PROD_CUMANA_SCSI_1 0x00a0
#define MANU_ICS 0x003c
#define PROD_ICS_IDE 0x00ae
#define MANU_ICS2 0x003d
#define PROD_ICS2_IDE 0x00ae
#define MANU_SERPORT 0x003f
#define PROD_SERPORT_DSPORT 0x00b9
#define MANU_ARXE 0x0041
#define PROD_ARXE_SCSI 0x00be
#define MANU_I3 0x0046
#define PROD_I3_ETHERLAN500 0x00d4
#define PROD_I3_ETHERLAN600 0x00ec
#define PROD_I3_ETHERLAN600A 0x011e
#define MANU_ANT 0x0053
#define PROD_ANT_ETHERM 0x00d8
#define PROD_ANT_ETHERB 0x00e4
#define MANU_ALSYSTEMS 0x005b
#define PROD_ALSYS_SCSIATAPI 0x0107
#define MANU_MCS 0x0063
#define PROD_MCS_CONNECT32 0x0125
#define MANU_EESOX 0x0064
#define PROD_EESOX_SCSI2 0x008c
#define MANU_YELLOWSTONE 0x0096
#define PROD_YELLOWSTONE_RAPIDE32 0x0120
#ifdef ECARD_C
#define CONST
#else
#define CONST const
#endif
#define MAX_ECARDS 9
struct ecard_id { /* Card ID structure */
unsigned short manufacturer;
unsigned short product;
void *data;
};
struct in_ecid { /* Packed card ID information */
unsigned short product; /* Product code */
unsigned short manufacturer; /* Manufacturer code */
unsigned char id:4; /* Simple ID */
unsigned char cd:1; /* Chunk dir present */
unsigned char is:1; /* Interrupt status pointers */
unsigned char w:2; /* Width */
unsigned char country; /* Country */
unsigned char irqmask; /* IRQ mask */
unsigned char fiqmask; /* FIQ mask */
unsigned long irqoff; /* IRQ offset */
unsigned long fiqoff; /* FIQ offset */
};
typedef struct expansion_card ecard_t;
typedef unsigned long *loader_t;
typedef struct expansion_card_ops { /* Card handler routines */
void (*irqenable)(ecard_t *ec, int irqnr);
void (*irqdisable)(ecard_t *ec, int irqnr);
int (*irqpending)(ecard_t *ec);
void (*fiqenable)(ecard_t *ec, int fiqnr);
void (*fiqdisable)(ecard_t *ec, int fiqnr);
int (*fiqpending)(ecard_t *ec);
} expansioncard_ops_t;
#define ECARD_NUM_RESOURCES (6)
#define ECARD_RES_IOCSLOW (0)
#define ECARD_RES_IOCMEDIUM (1)
#define ECARD_RES_IOCFAST (2)
#define ECARD_RES_IOCSYNC (3)
#define ECARD_RES_MEMC (4)
#define ECARD_RES_EASI (5)
#define ecard_resource_start(ec,nr) ((ec)->resource[nr].start)
#define ecard_resource_end(ec,nr) ((ec)->resource[nr].end)
#define ecard_resource_len(ec,nr) ((ec)->resource[nr].end - \
(ec)->resource[nr].start + 1)
#define ecard_resource_flags(ec,nr) ((ec)->resource[nr].flags)
/*
* This contains all the info needed on an expansion card
*/
struct expansion_card {
struct expansion_card *next;
struct device dev;
struct resource resource[ECARD_NUM_RESOURCES];
/* Public data */
void __iomem *irqaddr; /* address of IRQ register */
void __iomem *fiqaddr; /* address of FIQ register */
unsigned char irqmask; /* IRQ mask */
unsigned char fiqmask; /* FIQ mask */
unsigned char claimed; /* Card claimed? */
unsigned char easi; /* EASI card */
void *irq_data; /* Data for use for IRQ by card */
void *fiq_data; /* Data for use for FIQ by card */
const expansioncard_ops_t *ops; /* Enable/Disable Ops for card */
CONST unsigned int slot_no; /* Slot number */
CONST unsigned int dma; /* DMA number (for request_dma) */
CONST unsigned int irq; /* IRQ number (for request_irq) */
CONST unsigned int fiq; /* FIQ number (for request_irq) */
CONST struct in_ecid cid; /* Card Identification */
/* Private internal data */
const char *card_desc; /* Card description */
CONST loader_t loader; /* loader program */
u64 dma_mask;
};
void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data);
struct in_chunk_dir {
unsigned int start_offset;
union {
unsigned char string[256];
unsigned char data[1];
} d;
};
/*
* Read a chunk from an expansion card
* cd : where to put read data
* ec : expansion card info struct
* id : id number to find
* num: (n+1)'th id to find.
*/
extern int ecard_readchunk (struct in_chunk_dir *cd, struct expansion_card *ec, int id, int num);
/*
* Request and release ecard resources
*/
extern int ecard_request_resources(struct expansion_card *ec);
extern void ecard_release_resources(struct expansion_card *ec);
void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
unsigned long offset, unsigned long maxsize);
#define ecardm_iounmap(__ec, __addr) devm_iounmap(&(__ec)->dev, __addr)
extern struct bus_type ecard_bus_type;
#define ECARD_DEV(_d) container_of((_d), struct expansion_card, dev)
struct ecard_driver {
int (*probe)(struct expansion_card *, const struct ecard_id *id);
void (*remove)(struct expansion_card *);
void (*shutdown)(struct expansion_card *);
const struct ecard_id *id_table;
unsigned int id;
struct device_driver drv;
};
#define ECARD_DRV(_d) container_of((_d), struct ecard_driver, drv)
#define ecard_set_drvdata(ec,data) dev_set_drvdata(&(ec)->dev, (data))
#define ecard_get_drvdata(ec) dev_get_drvdata(&(ec)->dev)
int ecard_register_driver(struct ecard_driver *);
void ecard_remove_driver(struct ecard_driver *);
#endif

View file

@ -0,0 +1,48 @@
/*
* Copyright 2011 Calxeda, Inc.
* Based on PPC version Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/*
* ECC atomic, DMA, SMP and interrupt safe scrub function.
* Implements the per arch atomic_scrub() that EDAC use for software
* ECC scrubbing. It reads memory and then writes back the original
* value, allowing the hardware to detect and correct memory errors.
*/
static inline void atomic_scrub(void *va, u32 size)
{
#if __LINUX_ARM_ARCH__ >= 6
unsigned int *virt_addr = va;
unsigned int temp, temp2;
unsigned int i;
for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("\n"
"1: ldrex %0, [%2]\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b\n"
: "=&r"(temp), "=&r"(temp2)
: "r"(virt_addr)
: "cc");
}
#endif
}
#endif

138
arch/arm/include/asm/elf.h Normal file
View file

@ -0,0 +1,138 @@
#ifndef __ASMARM_ELF_H
#define __ASMARM_ELF_H
#include <asm/hwcap.h>
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
struct task_struct;
typedef unsigned long elf_greg_t;
typedef unsigned long elf_freg_t[3];
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
#define EF_ARM_EABI_MASK 0xff000000
#define EF_ARM_EABI_UNKNOWN 0x00000000
#define EF_ARM_EABI_VER1 0x01000000
#define EF_ARM_EABI_VER2 0x02000000
#define EF_ARM_EABI_VER3 0x03000000
#define EF_ARM_EABI_VER4 0x04000000
#define EF_ARM_EABI_VER5 0x05000000
#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
#define EF_ARM_PIC 0x00000020 /* ABI 0 */
#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
#define EF_ARM_HASENTRY 0x00000002 /* All */
#define EF_ARM_RELEXEC 0x00000001 /* All */
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_TARGET1 38
#define R_ARM_V4BX 40
#define R_ARM_PREL31 42
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
#define R_ARM_THM_MOVW_ABS_NC 47
#define R_ARM_THM_MOVT_ABS 48
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#ifdef __ARMEB__
#define ELF_DATA ELFDATA2MSB
#else
#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_ARM
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*
* For now we just provide a fairly general string that describes the
* processor family. This could be made more specific later if someone
* implemented optimisations that require it. 26-bit CPUs give you
* "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't
* supported). 32-bit CPUs give you "v3[lb]" for anything based on an
* ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1
* core.
*/
#define ELF_PLATFORM_SIZE 8
#define ELF_PLATFORM (elf_platform)
extern char elf_platform[];
struct elf32_hdr;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
extern int elf_check_arch(const struct elf32_hdr *);
#define elf_check_arch elf_check_arch
#define vmcore_elf64_check_arch(x) (0)
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
struct task_struct;
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_MMU
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
int arch_setup_additional_pages(struct linux_binprm *, int);
#endif
#endif

View file

@ -0,0 +1,39 @@
#include <asm/assembler.h>
/*
* Interrupt handling. Preserves r7, r8, r9
*/
.macro arch_irq_handler_default
get_irqnr_preamble r6, lr
1: get_irqnr_and_base r0, r2, r6, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, BSYM(1b)
bne asm_do_IRQ
#ifdef CONFIG_SMP
/*
* XXX
*
* this macro assumes that irqstat (r2) and base (r6) are
* preserved from get_irqnr_and_base above
*/
ALT_SMP(test_for_ipi r0, r2, r6, lr)
ALT_UP_B(9997f)
movne r1, sp
adrne lr, BSYM(1b)
bne do_IPI
#endif
9997:
.endm
.macro arch_irq_handler, symbol_name
.align 5
.global \symbol_name
\symbol_name:
mov r8, lr
arch_irq_handler_default
ret r8
.endm

View file

@ -0,0 +1,19 @@
/*
* Annotations for marking C functions as exception handlers.
*
* These should only be used for C functions that are called from the low
* level exception entry code and not any intervening C code.
*/
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H
#include <linux/ftrace.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
#endif /* __ASM_ARM_EXCEPTION_H */

19
arch/arm/include/asm/fb.h Normal file
View file

@ -0,0 +1,19 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View file

@ -0,0 +1,56 @@
/*
* arch/arm/include/asm/fiq.h
*
* Support for FIQ on ARM architectures.
* Written by Philip Blundell <philb@gnu.org>, 1998
* Re-written by Russell King
*
* NOTE: The FIQ mode registers are not magically preserved across
* suspend/resume.
*
* Drivers which require these registers to be preserved across power
* management operations must implement appropriate suspend/resume handlers to
* save and restore them.
*/
#ifndef __ASM_FIQ_H
#define __ASM_FIQ_H
#include <asm/ptrace.h>
struct fiq_handler {
struct fiq_handler *next;
/* Name
*/
const char *name;
/* Called to ask driver to relinquish/
* reacquire FIQ
* return zero to accept, or -<errno>
*/
int (*fiq_op)(void *, int relinquish);
/* data for the relinquish/reacquire functions
*/
void *dev_id;
};
extern int claim_fiq(struct fiq_handler *f);
extern void release_fiq(struct fiq_handler *f);
extern void set_fiq_handler(void *start, unsigned int length);
extern void enable_fiq(int fiq);
extern void disable_fiq(int fiq);
/* helpers defined in fiqasm.S: */
extern void __set_fiq_regs(unsigned long const *regs);
extern void __get_fiq_regs(unsigned long *regs);
static inline void set_fiq_regs(struct pt_regs const *regs)
{
__set_fiq_regs(&regs->ARM_r8);
}
static inline void get_fiq_regs(struct pt_regs *regs)
{
__get_fiq_regs(&regs->ARM_r8);
}
#endif

View file

@ -0,0 +1,33 @@
/*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ASM_FIQ_GLUE_H
#define __ASM_FIQ_GLUE_H
struct fiq_glue_handler {
void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
void (*resume)(struct fiq_glue_handler *h);
};
typedef void (*fiq_return_handler_t)(void);
int fiq_glue_register_handler(struct fiq_glue_handler *handler);
int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
#ifdef CONFIG_FIQ_GLUE
void fiq_glue_resume(void);
#else
static inline void fiq_glue_resume(void) {}
#endif
#endif

View file

@ -0,0 +1,70 @@
/*
* Copyright (C) 2012 Samsung Electronics.
* Kyungmin Park <kyungmin.park@samsung.com>
* Tomasz Figa <t.figa@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FIRMWARE_H
#define __ASM_ARM_FIRMWARE_H
#include <linux/bug.h>
/*
* struct firmware_ops
*
* A structure to specify available firmware operations.
*
* A filled up structure can be registered with register_firmware_ops().
*/
struct firmware_ops {
/*
* Inform the firmware we intend to enter CPU idle mode
*/
int (*prepare_idle)(void);
/*
* Enters CPU idle mode
*/
int (*do_idle)(void);
/*
* Sets boot address of specified physical CPU
*/
int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
/*
* Boots specified physical CPU
*/
int (*cpu_boot)(int cpu);
/*
* Initializes L2 cache
*/
int (*l2x0_init)(void);
};
/* Global pointer for current firmware_ops structure, can't be NULL. */
extern const struct firmware_ops *firmware_ops;
/*
* call_firmware_op(op, ...)
*
* Checks if firmware operation is present and calls it,
* otherwise returns -ENOSYS
*/
#define call_firmware_op(op, ...) \
((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS))
/*
* register_firmware_ops(ops)
*
* A function to register platform firmware_ops struct.
*/
static inline void register_firmware_ops(const struct firmware_ops *ops)
{
BUG_ON(!ops);
firmware_ops = ops;
}
#endif

View file

@ -0,0 +1,28 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
#define FIXADDR_TOP 0xffe00000UL
#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
static inline unsigned long fix_to_virt(const unsigned int idx)
{
if (idx >= FIX_KMAP_NR_PTES)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned int virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif

View file

@ -0,0 +1,16 @@
/*
* arch/arm/include/asm/flat.h -- uClinux flat-format executables
*/
#ifndef __ARM_FLAT_H__
#define __ARM_FLAT_H__
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
#endif /* __ARM_FLAT_H__ */

View file

@ -0,0 +1,148 @@
/*
* arch/arm/include/asm/floppy.h
*
* Copyright (C) 1996-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here
*/
#ifndef __ASM_ARM_FLOPPY_H
#define __ASM_ARM_FLOPPY_H
#if 0
#include <mach/floppy.h>
#endif
#define fd_outb(val,port) \
do { \
if ((port) == FD_DOR) \
fd_setdor((val)); \
else \
outb((val),(port)); \
} while(0)
#define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
static inline int fd_dma_setup(void *data, unsigned int length,
unsigned int mode, unsigned long addr)
{
set_dma_mode(DMA_FLOPPY, mode);
__set_dma_addr(DMA_FLOPPY, data);
set_dma_count(DMA_FLOPPY, length);
virtual_dma_port = addr;
enable_dma(DMA_FLOPPY);
return 0;
}
#define fd_dma_setup fd_dma_setup
#define fd_request_dma() request_dma(DMA_FLOPPY,"floppy")
#define fd_free_dma() free_dma(DMA_FLOPPY)
#define fd_disable_dma() disable_dma(DMA_FLOPPY)
/* need to clean up dma.h */
#define DMA_FLOPPYDISK DMA_FLOPPY
/* Floppy_selects is the list of DOR's to select drive fd
*
* On initialisation, the floppy list is scanned, and the drives allocated
* in the order that they are found. This is done by seeking the drive
* to a non-zero track, and then restoring it to track 0. If an error occurs,
* then there is no floppy drive present. [to be put back in again]
*/
static unsigned char floppy_selects[2][4] =
{
{ 0x10, 0x21, 0x23, 0x33 },
{ 0x10, 0x21, 0x23, 0x33 }
};
#define fd_setdor(dor) \
do { \
int new_dor = (dor); \
if (new_dor & 0xf0) \
new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \
else \
new_dor &= 0x0c; \
outb(new_dor, FD_DOR); \
} while (0)
/*
* Someday, we'll automatically detect which drives are present...
*/
static inline void fd_scandrives (void)
{
#if 0
int floppy, drive_count;
fd_disable_irq();
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
raw_cmd->track = 0;
raw_cmd->rate = ?;
drive_count = 0;
for (floppy = 0; floppy < 4; floppy ++) {
current_drive = drive_count;
/*
* Turn on floppy motor
*/
if (start_motor(redo_fd_request))
continue;
/*
* Set up FDC
*/
fdc_specify();
/*
* Tell FDC to recalibrate
*/
output_byte(FD_RECALIBRATE);
LAST_OUT(UNIT(floppy));
/* wait for command to complete */
if (!successful) {
int i;
for (i = drive_count; i < 3; i--)
floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
floppy_selects[fdc][3] = 0;
floppy -= 1;
} else
drive_count++;
}
#else
floppy_selects[0][0] = 0x10;
floppy_selects[0][1] = 0x21;
floppy_selects[0][2] = 0x23;
floppy_selects[0][3] = 0x33;
#endif
}
#define FDC1 (0x3f0)
#define FLOPPY0_TYPE 4
#define FLOPPY1_TYPE 4
#define N_FDC 1
#define N_DRIVE 4
#define CROSS_64KB(a,s) (0)
/*
* This allows people to reverse the order of
* fd0 and fd1, in case their hardware is
* strangely connected (as some RiscPCs
* and A5000s seem to be).
*/
static void driveswap(int *ints, int dummy, int dummy2)
{
floppy_selects[0][0] ^= floppy_selects[0][1];
floppy_selects[0][1] ^= floppy_selects[0][0];
floppy_selects[0][0] ^= floppy_selects[0][1];
}
#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
#endif

View file

@ -0,0 +1,94 @@
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* These macros are intended for use when there is a need to copy a low-level
* function body into special memory.
*
* For example, when reconfiguring the SDRAM controller, the code doing the
* reconfiguration may need to run from SRAM.
*
* NOTE: that the copied function body must be entirely self-contained and
* position-independent in order for this to work properly.
*
* NOTE: in order for embedded literals and data to get referenced correctly,
* the alignment of functions must be preserved when copying. To ensure this,
* the source and destination addresses for fncpy() must be aligned to a
* multiple of 8 bytes: you will be get a BUG() if this condition is not met.
* You will typically need a ".align 3" directive in the assembler where the
* function to be copied is defined, and ensure that your allocator for the
* destination buffer returns 8-byte-aligned pointers.
*
* Typical usage example:
*
* extern int f(args);
* extern uint32_t size_of_f;
* int (*copied_f)(args);
* void *sram_buffer;
*
* copied_f = fncpy(sram_buffer, &f, size_of_f);
*
* ... later, call the function: ...
*
* copied_f(args);
*
* The size of the function to be copied can't be determined from C:
* this must be determined by other means, such as adding assmbler directives
* in the file where f is defined.
*/
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Minimum alignment requirement for the source and destination addresses
* for function copying.
*/
#define FNCPY_ALIGN 8
#define fncpy(dest_buf, funcp, size) ({ \
uintptr_t __funcp_address; \
typeof(funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (funcp)); \
\
/* \
* Ensure alignment of source and destination addresses, \
* disregarding the function's Thumb bit: \
*/ \
BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
\
memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
flush_icache_range((unsigned long)(dest_buf), \
(unsigned long)(dest_buf) + (size)); \
\
asm("" : "=r" (__result) \
: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
\
__result; \
})
#endif /* !__ASM_FNCPY_H */

View file

@ -0,0 +1,93 @@
/*
* arch/arm/include/asm/fpstate.h
*
* Copyright (C) 1995 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FPSTATE_H
#define __ASM_ARM_FPSTATE_H
#ifndef __ASSEMBLY__
/*
* VFP storage area has:
* - FPEXC, FPSCR, FPINST and FPINST2.
* - 16 or 32 double precision data registers
* - an implementation-dependent word of state for FLDMX/FSTMX (pre-ARMv6)
*
* FPEXC will always be non-zero once the VFP has been used in this process.
*/
struct vfp_hard_struct {
#ifdef CONFIG_VFPv3
__u64 fpregs[32];
#else
__u64 fpregs[16];
#endif
#if __LINUX_ARM_ARCH__ < 6
__u32 fpmx_state;
#endif
__u32 fpexc;
__u32 fpscr;
/*
* VFP implementation specific state
*/
__u32 fpinst;
__u32 fpinst2;
#ifdef CONFIG_SMP
__u32 cpu;
#endif
};
union vfp_state {
struct vfp_hard_struct hard;
};
extern void vfp_flush_thread(union vfp_state *);
extern void vfp_release_thread(union vfp_state *);
#define FP_HARD_SIZE 35
struct fp_hard_struct {
unsigned int save[FP_HARD_SIZE]; /* as yet undefined */
};
#define FP_SOFT_SIZE 35
struct fp_soft_struct {
unsigned int save[FP_SOFT_SIZE]; /* undefined information */
};
#define IWMMXT_SIZE 0x98
struct iwmmxt_struct {
unsigned int save[IWMMXT_SIZE / sizeof(unsigned int)];
};
union fp_state {
struct fp_hard_struct hard;
struct fp_soft_struct soft;
#ifdef CONFIG_IWMMXT
struct iwmmxt_struct iwmmxt;
#endif
};
#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
struct crunch_state {
unsigned int mvdx[16][2];
unsigned int mvax[4][3];
unsigned int dspsc[2];
};
#define CRUNCH_SIZE sizeof(struct crunch_state)
#endif
#endif

View file

@ -0,0 +1,59 @@
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void mcount(void);
extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
#ifdef CONFIG_OLD_MCOUNT
bool old_mcount;
#endif
};
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1;
}
extern void ftrace_caller_old(void);
extern void ftrace_call_old(void);
#endif
#endif
#endif
#ifndef __ASSEMBLY__
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
/*
* return_address uses walk_stackframe to do it's work. If both
* CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind
* information. For this to work in the function tracer many functions would
* have to be marked with __notrace. So for now just depend on
* !CONFIG_ARM_UNWIND.
*/
void *return_address(unsigned int);
#else
static inline void *return_address(unsigned int level)
{
return NULL;
}
#endif
#define ftrace_return_address(n) return_address(n)
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */

View file

@ -0,0 +1,166 @@
#ifndef _ASM_ARM_FUTEX_H
#define _ASM_ARM_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
#define __futex_atomic_ex_table(err_reg) \
"3:\n" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4f, 2b, 4f\n" \
" .popsection\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, " err_reg "\n" \
" b 3b\n" \
" .popsection"
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
prefetchw(uaddr); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
"2: strex %2, %0, [%3]\n" \
" teq %2, #0\n" \
" bne 1b\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret;
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
" ite eq @ explicit IT needed for the 2b label\n"
"2: strexeq %0, %3, [%4]\n"
" movne %0, #0\n"
" teq %0, #0\n"
" bne 1b\n"
__futex_atomic_ex_table("%5")
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
smp_mb();
*uval = val;
return ret;
}
#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
#include <linux/preempt.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
"2: " TUSER(str) " %0, [%3]\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n"
"2: " TUSER(streq) " %3, [%4]\n"
__futex_atomic_ex_table("%5")
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
*uval = val;
return ret;
}
#endif /* !SMP */
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable(); /* subsumes preempt_enable() */
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */

View file

@ -0,0 +1,166 @@
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v6
# endif
#endif
#if defined(CONFIG_CPU_V7)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v7
# endif
#endif
#if defined(CONFIG_CPU_V7M)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE nop
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintenance model
#endif
#ifndef __ASSEMBLER__
static inline void nop_flush_icache_all(void) { }
static inline void nop_flush_kern_cache_all(void) { }
static inline void nop_flush_kern_cache_louis(void) { }
static inline void nop_flush_user_cache_all(void) { }
static inline void nop_flush_user_cache_range(unsigned long a,
unsigned long b, unsigned int c) { }
static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
static inline int nop_coherent_user_range(unsigned long a,
unsigned long b) { return 0; }
static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
static inline void nop_dma_flush_range(const void *a, const void *b) { }
static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#endif
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
#endif

View file

@ -0,0 +1,102 @@
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
#include <asm/glue.h>
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5t_early - ARMv5 with Thumb early abort handler
* v5tj_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_NOMMU
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER nommu_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
#endif

View file

@ -0,0 +1,57 @@
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
#include <asm/glue.h>
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif

View file

@ -0,0 +1,264 @@
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
#include <asm/glue.h>
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7M
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7m
# endif
#endif
#ifdef CONFIG_CPU_PJ4B
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_pj4b
# endif
#endif
#ifdef CONFIG_CPU_V7
/*
* Cortex-A9 needs a different suspend/resume function, so we need
* multiple CPU support for ARMv7 anyway.
*/
# undef MULTI_CPU
# define MULTI_CPU
#endif
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
#define cpu_reset __glue(CPU_NAME,_reset)
#define cpu_do_idle __glue(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
#define cpu_do_resume __glue(CPU_NAME,_do_resume)
#endif
#endif

View file

@ -0,0 +1,25 @@
/*
* arch/arm/include/asm/glue.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file provides the glue to stick the processor-specific bits
* into the kernel in an efficient manner. The idea is to use branches
* when we're only targeting one class of TLB, or indirect calls
* when we're targeting multiple classes of TLBs.
*/
#ifdef __KERNEL__
#ifdef __STDC__
#define ____glue(name,fn) name##fn
#else
#define ____glue(name,fn) name/**/fn
#endif
#define __glue(name,fn) ____glue(name,fn)
#endif

View file

@ -0,0 +1,25 @@
#ifndef _ARCH_ARM_GPIO_H
#define _ARCH_ARM_GPIO_H
#if CONFIG_ARCH_NR_GPIO > 0
#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
#endif
/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
#include <asm-generic/gpio.h>
/* The trivial gpiolib dispatchers */
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
#define gpio_cansleep __gpio_cansleep
/*
* Provide a default gpio_to_irq() which should satisfy every case.
* However, some platforms want to do this differently, so allow them
* to override it.
*/
#ifndef gpio_to_irq
#define gpio_to_irq __gpio_to_irq
#endif
#endif /* _ARCH_ARM_GPIO_H */

View file

@ -0,0 +1,32 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <linux/cache.h>
#include <linux/threads.h>
#include <asm/irq.h>
#define NR_IPI 9
typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
#ifdef CONFIG_SMP
u64 smp_irq_stat_cpu(unsigned int cpu);
#else
#define smp_irq_stat_cpu(cpu) 0
#endif
#define arch_irq_stat_cpu smp_irq_stat_cpu
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
#endif /* __ASM_HARDIRQ_H */

View file

@ -0,0 +1,35 @@
#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
#define __ASM_ARM_HARDWARE_ARM_TIMER_H
/*
* ARM timer implementation, found in Integrator, Versatile and Realview
* platforms. Not all platforms support all registers and bits in these
* registers, so we mark them with A for Integrator AP, C for Integrator
* CP, V for Versatile and R for Realview.
*
* Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview
* can have 16-bit or 32-bit selectable via a bit in the control register.
*
* Every SP804 contains two identical timers.
*/
#define TIMER_1_BASE 0x00
#define TIMER_2_BASE 0x20
#define TIMER_LOAD 0x00 /* ACVR rw */
#define TIMER_VALUE 0x04 /* ACVR ro */
#define TIMER_CTRL 0x08 /* ACVR rw */
#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */
#define TIMER_CTRL_32BIT (1 << 1) /* CVR */
#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */
#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */
#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */
#define TIMER_CTRL_IE (1 << 5) /* VR */
#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */
#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */
#define TIMER_INTCLR 0x0c /* ACVR wo */
#define TIMER_RIS 0x10 /* CVR ro */
#define TIMER_MIS 0x14 /* CVR ro */
#define TIMER_BGLOAD 0x18 /* CVR rw */
#endif

View file

@ -0,0 +1,13 @@
/*
* arch/arm/include/asm/hardware/cache-feroceon-l2.h
*
* Copyright (C) 2008 Marvell Semiconductor
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
extern void __init feroceon_l2_init(int l2_wt_override);
extern int __init feroceon_of_init(void);

View file

@ -0,0 +1,181 @@
/*
* arch/arm/include/asm/hardware/cache-l2x0.h
*
* Copyright (C) 2007 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARM_HARDWARE_L2X0_H
#define __ASM_ARM_HARDWARE_L2X0_H
#include <linux/errno.h>
#define L2X0_CACHE_ID 0x000
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
#define L310_TAG_LATENCY_CTRL 0x108
#define L310_DATA_LATENCY_CTRL 0x10C
#define L2X0_EVENT_CNT_CTRL 0x200
#define L2X0_EVENT_CNT1_CFG 0x204
#define L2X0_EVENT_CNT0_CFG 0x208
#define L2X0_EVENT_CNT1_VAL 0x20C
#define L2X0_EVENT_CNT0_VAL 0x210
#define L2X0_INTR_MASK 0x214
#define L2X0_MASKED_INTR_STAT 0x218
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0
#define L2X0_CLEAN_LINE_IDX 0x7B8
#define L2X0_CLEAN_WAY 0x7BC
#define L2X0_CLEAN_INV_LINE_PA 0x7F0
#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
#define L2X0_CLEAN_INV_WAY 0x7FC
/*
* The lockdown registers repeat 8 times for L310, the L210 has only one
* D and one I lockdown register at 0x0900 and 0x0904.
*/
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
#define L310_ADDR_FILTER_START 0xC00
#define L310_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40
#define L310_PREFETCH_CTRL 0xF60
#define L310_POWER_CTRL 0xF80
#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
#define L310_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L220 (2 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_ID_RTL_MASK 0x3f
#define L210_CACHE_ID_RTL_R0P2_02 0x00
#define L210_CACHE_ID_RTL_R0P1 0x01
#define L210_CACHE_ID_RTL_R0P2_01 0x02
#define L210_CACHE_ID_RTL_R0P3 0x03
#define L210_CACHE_ID_RTL_R0P4 0x0b
#define L210_CACHE_ID_RTL_R0P5 0x0f
#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
#define L310_CACHE_ID_RTL_R0P0 0x00
#define L310_CACHE_ID_RTL_R1P0 0x02
#define L310_CACHE_ID_RTL_R2P0 0x04
#define L310_CACHE_ID_RTL_R3P0 0x05
#define L310_CACHE_ID_RTL_R3P1 0x06
#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
#define L310_CACHE_ID_RTL_R3P2 0x08
#define L310_CACHE_ID_RTL_R3P3 0x09
/* L2C auxiliary control register - bits common to L2C-210/220/310 */
#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
/* L2C-210/220 common bits */
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
/* L2C-210 specific bits */
#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
/* L2C-220 specific bits */
#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L220_AUX_CTRL_FWA_SHIFT 23
#define L220_AUX_CTRL_FWA_MASK (3 << 23)
#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
/* L2C-310 specific bits */
#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
#define L310_ADDR_FILTER_EN 1
#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
#define L2X0_CTRL_EN 1
#define L2X0_WAY_SIZE_SHIFT 3
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
#else
static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
{
return -ENODEV;
}
#endif
struct l2x0_regs {
unsigned long phy_base;
unsigned long aux_ctrl;
/*
* Whether the following registers need to be saved/restored
* depends on platform
*/
unsigned long tag_latency;
unsigned long data_latency;
unsigned long filter_start;
unsigned long filter_end;
unsigned long prefetch_ctrl;
unsigned long pwr_ctrl;
unsigned long ctrl;
unsigned long aux2_ctrl;
};
extern struct l2x0_regs l2x0_saved_regs;
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,14 @@
/*
* arch/arm/include/asm/hardware/cache-tauros2.h
*
* Copyright (C) 2008 Marvell Semiconductor
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#define CACHE_TAUROS2_PREFETCH_ON (1 << 0)
#define CACHE_TAUROS2_LINEFILL_BURST8 (1 << 1)
extern void __init tauros2_init(unsigned int features);

View file

@ -0,0 +1,185 @@
/*
* linux/arch/arm/include/asm/hardware/coresight.h
*
* CoreSight components' registers
*
* Copyright (C) 2009 Nokia Corporation.
* Alexander Shishkin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_HARDWARE_CORESIGHT_H
#define __ASM_HARDWARE_CORESIGHT_H
#define TRACER_ACCESSED_BIT 0
#define TRACER_RUNNING_BIT 1
#define TRACER_CYCLE_ACC_BIT 2
#define TRACER_TRACE_DATA_BIT 3
#define TRACER_TIMESTAMP_BIT 4
#define TRACER_BRANCHOUTPUT_BIT 5
#define TRACER_RETURN_STACK_BIT 6
#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
#define TRACER_TRACE_DATA BIT(TRACER_TRACE_DATA_BIT)
#define TRACER_TIMESTAMP BIT(TRACER_TIMESTAMP_BIT)
#define TRACER_BRANCHOUTPUT BIT(TRACER_BRANCHOUTPUT_BIT)
#define TRACER_RETURN_STACK BIT(TRACER_RETURN_STACK_BIT)
#define TRACER_TIMEOUT 10000
#define etm_writel(t, id, v, x) \
(writel_relaxed((v), (t)->etm_regs[(id)] + (x)))
#define etm_readl(t, id, x) (readl_relaxed((t)->etm_regs[(id)] + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
#define CSMR_LOCKSTATUS 0xfb4
#define CSMR_AUTHSTATUS 0xfb8
#define CSMR_DEVID 0xfc8
#define CSMR_DEVTYPE 0xfcc
/* CoreSight Component Registers */
#define CSCR_CLASS 0xff4
#define CS_LAR_KEY 0xc5acce55
/* ETM control register, "ETM Architecture", 3.3.1 */
#define ETMR_CTRL 0
#define ETMCTRL_POWERDOWN 1
#define ETMCTRL_PROGRAM (1 << 10)
#define ETMCTRL_PORTSEL (1 << 11)
#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
#define ETMCTRL_PORTMASK1 (7 << 4)
#define ETMCTRL_PORTMASK2 (1 << 21)
#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
#define ETMCTRL_DO_CPRT (1 << 1)
#define ETMCTRL_DATAMASK (3 << 2)
#define ETMCTRL_DATA_DO_DATA (1 << 2)
#define ETMCTRL_DATA_DO_ADDR (1 << 3)
#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
#define ETMCTRL_CYCLEACCURATE (1 << 12)
#define ETMCTRL_TIMESTAMP_EN (1 << 28)
#define ETMCTRL_RETURN_STACK_EN (1 << 29)
/* ETM configuration code register */
#define ETMR_CONFCODE (0x04)
#define ETMCCR_ETMIDR_PRESENT BIT(31)
/* ETM trace start/stop resource control register */
#define ETMR_TRACESSCTRL (0x18)
/* ETM trigger event register */
#define ETMR_TRIGEVT (0x08)
/* address access type register bits, "ETM architecture",
* table 3-27 */
/* - access type */
#define ETMAAT_IFETCH 0
#define ETMAAT_IEXEC 1
#define ETMAAT_IEXECPASS 2
#define ETMAAT_IEXECFAIL 3
#define ETMAAT_DLOADSTORE 4
#define ETMAAT_DLOAD 5
#define ETMAAT_DSTORE 6
/* - comparison access size */
#define ETMAAT_JAVA (0 << 3)
#define ETMAAT_THUMB (1 << 3)
#define ETMAAT_ARM (3 << 3)
/* - data value comparison control */
#define ETMAAT_NOVALCMP (0 << 5)
#define ETMAAT_VALMATCH (1 << 5)
#define ETMAAT_VALNOMATCH (3 << 5)
/* - exact match */
#define ETMAAT_EXACTMATCH (1 << 7)
/* - context id comparator control */
#define ETMAAT_IGNCONTEXTID (0 << 8)
#define ETMAAT_VALUE1 (1 << 8)
#define ETMAAT_VALUE2 (2 << 8)
#define ETMAAT_VALUE3 (3 << 8)
/* - security level control */
#define ETMAAT_IGNSECURITY (0 << 10)
#define ETMAAT_NSONLY (1 << 10)
#define ETMAAT_SONLY (2 << 10)
#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
/* ETM status register, "ETM Architecture", 3.3.2 */
#define ETMR_STATUS (0x10)
#define ETMST_OVERFLOW BIT(0)
#define ETMST_PROGBIT BIT(1)
#define ETMST_STARTSTOP BIT(2)
#define ETMST_TRIGGER BIT(3)
#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
#define ETMR_TRACEENCTRL2 0x1c
#define ETMR_TRACEENCTRL 0x24
#define ETMTE_INCLEXCL BIT(24)
#define ETMR_TRACEENEVT 0x20
#define ETMR_VIEWDATAEVT 0x30
#define ETMR_VIEWDATACTRL1 0x34
#define ETMR_VIEWDATACTRL2 0x38
#define ETMR_VIEWDATACTRL3 0x3c
#define ETMVDC3_EXCLONLY BIT(16)
#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT)
#define ETMR_ID 0x1e4
#define ETMIDR_VERSION(x) (((x) >> 4) & 0xff)
#define ETMIDR_VERSION_3_1 0x21
#define ETMIDR_VERSION_PFT_1_0 0x30
#define ETMR_CCE 0x1e8
#define ETMCCER_RETURN_STACK_IMPLEMENTED BIT(23)
#define ETMCCER_TIMESTAMPING_IMPLEMENTED BIT(22)
#define ETMR_TRACEIDR 0x200
/* ETM management registers, "ETM Architecture", 3.5.24 */
#define ETMMR_OSLAR 0x300
#define ETMMR_OSLSR 0x304
#define ETMMR_OSSRR 0x308
#define ETMMR_PDSR 0x314
/* ETB registers, "CoreSight Components TRM", 9.3 */
#define ETBR_DEPTH 0x04
#define ETBR_STATUS 0x0c
#define ETBR_READMEM 0x10
#define ETBR_READADDR 0x14
#define ETBR_WRITEADDR 0x18
#define ETBR_TRIGGERCOUNT 0x1c
#define ETBR_CTRL 0x20
#define ETBR_FORMATTERCTRL 0x304
#define ETBFF_ENFTC 1
#define ETBFF_ENFCONT BIT(1)
#define ETBFF_FONFLIN BIT(4)
#define ETBFF_MANUAL_FLUSH BIT(6)
#define ETBFF_TRIGIN BIT(8)
#define ETBFF_TRIGEVT BIT(9)
#define ETBFF_TRIGFL BIT(10)
#define ETBFF_STOPFL BIT(12)
#define etb_writel(t, v, x) \
(writel_relaxed((v), (t)->etb_regs + (x)))
#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t, id) \
do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t, id) \
do { etm_writel((t), (id), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#endif /* __ASM_HARDWARE_CORESIGHT_H */

View file

@ -0,0 +1,147 @@
/*
* arch/arm/include/asm/hardware/dec21285.h
*
* Copyright (C) 1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DC21285 registers
*/
#define DC21285_PCI_IACK 0x79000000
#define DC21285_ARMCSR_BASE 0x42000000
#define DC21285_PCI_TYPE_0_CONFIG 0x7b000000
#define DC21285_PCI_TYPE_1_CONFIG 0x7a000000
#define DC21285_OUTBOUND_WRITE_FLUSH 0x78000000
#define DC21285_FLASH 0x41000000
#define DC21285_PCI_IO 0x7c000000
#define DC21285_PCI_MEM 0x80000000
#ifndef __ASSEMBLY__
#include <mach/hardware.h>
#define DC21285_IO(x) ((volatile unsigned long *)(ARMCSR_BASE+(x)))
#else
#define DC21285_IO(x) (x)
#endif
#define CSR_PCICMD DC21285_IO(0x0004)
#define CSR_CLASSREV DC21285_IO(0x0008)
#define CSR_PCICACHELINESIZE DC21285_IO(0x000c)
#define CSR_PCICSRBASE DC21285_IO(0x0010)
#define CSR_PCICSRIOBASE DC21285_IO(0x0014)
#define CSR_PCISDRAMBASE DC21285_IO(0x0018)
#define CSR_PCIROMBASE DC21285_IO(0x0030)
#define CSR_MBOX0 DC21285_IO(0x0050)
#define CSR_MBOX1 DC21285_IO(0x0054)
#define CSR_MBOX2 DC21285_IO(0x0058)
#define CSR_MBOX3 DC21285_IO(0x005c)
#define CSR_DOORBELL DC21285_IO(0x0060)
#define CSR_DOORBELL_SETUP DC21285_IO(0x0064)
#define CSR_ROMWRITEREG DC21285_IO(0x0068)
#define CSR_CSRBASEMASK DC21285_IO(0x00f8)
#define CSR_CSRBASEOFFSET DC21285_IO(0x00fc)
#define CSR_SDRAMBASEMASK DC21285_IO(0x0100)
#define CSR_SDRAMBASEOFFSET DC21285_IO(0x0104)
#define CSR_ROMBASEMASK DC21285_IO(0x0108)
#define CSR_SDRAMTIMING DC21285_IO(0x010c)
#define CSR_SDRAMADDRSIZE0 DC21285_IO(0x0110)
#define CSR_SDRAMADDRSIZE1 DC21285_IO(0x0114)
#define CSR_SDRAMADDRSIZE2 DC21285_IO(0x0118)
#define CSR_SDRAMADDRSIZE3 DC21285_IO(0x011c)
#define CSR_I2O_INFREEHEAD DC21285_IO(0x0120)
#define CSR_I2O_INPOSTTAIL DC21285_IO(0x0124)
#define CSR_I2O_OUTPOSTHEAD DC21285_IO(0x0128)
#define CSR_I2O_OUTFREETAIL DC21285_IO(0x012c)
#define CSR_I2O_INFREECOUNT DC21285_IO(0x0130)
#define CSR_I2O_OUTPOSTCOUNT DC21285_IO(0x0134)
#define CSR_I2O_INPOSTCOUNT DC21285_IO(0x0138)
#define CSR_SA110_CNTL DC21285_IO(0x013c)
#define SA110_CNTL_INITCMPLETE (1 << 0)
#define SA110_CNTL_ASSERTSERR (1 << 1)
#define SA110_CNTL_RXSERR (1 << 3)
#define SA110_CNTL_SA110DRAMPARITY (1 << 4)
#define SA110_CNTL_PCISDRAMPARITY (1 << 5)
#define SA110_CNTL_DMASDRAMPARITY (1 << 6)
#define SA110_CNTL_DISCARDTIMER (1 << 8)
#define SA110_CNTL_PCINRESET (1 << 9)
#define SA110_CNTL_I2O_256 (0 << 10)
#define SA110_CNTL_I20_512 (1 << 10)
#define SA110_CNTL_I2O_1024 (2 << 10)
#define SA110_CNTL_I2O_2048 (3 << 10)
#define SA110_CNTL_I2O_4096 (4 << 10)
#define SA110_CNTL_I2O_8192 (5 << 10)
#define SA110_CNTL_I2O_16384 (6 << 10)
#define SA110_CNTL_I2O_32768 (7 << 10)
#define SA110_CNTL_WATCHDOG (1 << 13)
#define SA110_CNTL_ROMWIDTH_UNDEF (0 << 14)
#define SA110_CNTL_ROMWIDTH_16 (1 << 14)
#define SA110_CNTL_ROMWIDTH_32 (2 << 14)
#define SA110_CNTL_ROMWIDTH_8 (3 << 14)
#define SA110_CNTL_ROMACCESSTIME(x) ((x)<<16)
#define SA110_CNTL_ROMBURSTTIME(x) ((x)<<20)
#define SA110_CNTL_ROMTRISTATETIME(x) ((x)<<24)
#define SA110_CNTL_XCSDIR(x) ((x)<<28)
#define SA110_CNTL_PCICFN (1 << 31)
/*
* footbridge_cfn_mode() is used when we want
* to check whether we are the central function
*/
#define __footbridge_cfn_mode() (*CSR_SA110_CNTL & SA110_CNTL_PCICFN)
#if defined(CONFIG_FOOTBRIDGE_HOST) && defined(CONFIG_FOOTBRIDGE_ADDIN)
#define footbridge_cfn_mode() __footbridge_cfn_mode()
#elif defined(CONFIG_FOOTBRIDGE_HOST)
#define footbridge_cfn_mode() (1)
#else
#define footbridge_cfn_mode() (0)
#endif
#define CSR_PCIADDR_EXTN DC21285_IO(0x0140)
#define CSR_PREFETCHMEMRANGE DC21285_IO(0x0144)
#define CSR_XBUS_CYCLE DC21285_IO(0x0148)
#define CSR_XBUS_IOSTROBE DC21285_IO(0x014c)
#define CSR_DOORBELL_PCI DC21285_IO(0x0150)
#define CSR_DOORBELL_SA110 DC21285_IO(0x0154)
#define CSR_UARTDR DC21285_IO(0x0160)
#define CSR_RXSTAT DC21285_IO(0x0164)
#define CSR_H_UBRLCR DC21285_IO(0x0168)
#define CSR_M_UBRLCR DC21285_IO(0x016c)
#define CSR_L_UBRLCR DC21285_IO(0x0170)
#define CSR_UARTCON DC21285_IO(0x0174)
#define CSR_UARTFLG DC21285_IO(0x0178)
#define CSR_IRQ_STATUS DC21285_IO(0x0180)
#define CSR_IRQ_RAWSTATUS DC21285_IO(0x0184)
#define CSR_IRQ_ENABLE DC21285_IO(0x0188)
#define CSR_IRQ_DISABLE DC21285_IO(0x018c)
#define CSR_IRQ_SOFT DC21285_IO(0x0190)
#define CSR_FIQ_STATUS DC21285_IO(0x0280)
#define CSR_FIQ_RAWSTATUS DC21285_IO(0x0284)
#define CSR_FIQ_ENABLE DC21285_IO(0x0288)
#define CSR_FIQ_DISABLE DC21285_IO(0x028c)
#define CSR_FIQ_SOFT DC21285_IO(0x0290)
#define CSR_TIMER1_LOAD DC21285_IO(0x0300)
#define CSR_TIMER1_VALUE DC21285_IO(0x0304)
#define CSR_TIMER1_CNTL DC21285_IO(0x0308)
#define CSR_TIMER1_CLR DC21285_IO(0x030c)
#define CSR_TIMER2_LOAD DC21285_IO(0x0320)
#define CSR_TIMER2_VALUE DC21285_IO(0x0324)
#define CSR_TIMER2_CNTL DC21285_IO(0x0328)
#define CSR_TIMER2_CLR DC21285_IO(0x032c)
#define CSR_TIMER3_LOAD DC21285_IO(0x0340)
#define CSR_TIMER3_VALUE DC21285_IO(0x0344)
#define CSR_TIMER3_CNTL DC21285_IO(0x0348)
#define CSR_TIMER3_CLR DC21285_IO(0x034c)
#define CSR_TIMER4_LOAD DC21285_IO(0x0360)
#define CSR_TIMER4_VALUE DC21285_IO(0x0364)
#define CSR_TIMER4_CNTL DC21285_IO(0x0368)
#define CSR_TIMER4_CLR DC21285_IO(0x036c)
#define TIMER_CNTL_ENABLE (1 << 7)
#define TIMER_CNTL_AUTORELOAD (1 << 6)
#define TIMER_CNTL_DIV1 (0)
#define TIMER_CNTL_DIV16 (1 << 2)
#define TIMER_CNTL_DIV256 (2 << 2)
#define TIMER_CNTL_CNTEXT (3 << 2)

View file

@ -0,0 +1,131 @@
/*
* arch/arm/include/asm/hardware/entry-macro-iomd.S
*
* Low-level IRQ helper macros for IOC/IOMD based platforms
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/* IOC / IOMD based hardware */
#include <asm/hardware/iomd.h>
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
ldr \tmp, =irq_prio_h
teq \irqstat, #0
#ifdef IOMD_BASE
ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma
addeq \tmp, \tmp, #256 @ irq_prio_h table size
teqeq \irqstat, #0
bne 2406f
#endif
ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
addeq \tmp, \tmp, #256 @ irq_prio_d table size
teqeq \irqstat, #0
#ifdef IOMD_IRQREQC
ldreqb \irqstat, [\base, #IOMD_IRQREQC]
addeq \tmp, \tmp, #256 @ irq_prio_l table size
teqeq \irqstat, #0
#endif
#ifdef IOMD_IRQREQD
ldreqb \irqstat, [\base, #IOMD_IRQREQD]
addeq \tmp, \tmp, #256 @ irq_prio_lc table size
teqeq \irqstat, #0
#endif
2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number
.endm
/*
* Interrupt table (incorporates priority). Please note that we
* rely on the order of these tables (see above code).
*/
.align 5
irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
.byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
#ifdef IOMD_BASE
irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
#endif
irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
.byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
#ifdef IOMD_IRQREQC
irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
.byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
#endif
#ifdef IOMD_IRQREQD
irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
.byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
#endif

View file

@ -0,0 +1,59 @@
/*
* arch/arm/include/asm/hardware/icst.h
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Support functions for calculating clocks/divisors for the ICST
* clock generators. See http://www.idt.com/ for more information
* on these devices.
*/
#ifndef ASMARM_HARDWARE_ICST_H
#define ASMARM_HARDWARE_ICST_H
struct icst_params {
unsigned long ref;
unsigned long vco_max; /* inclusive */
unsigned long vco_min; /* exclusive */
unsigned short vd_min; /* inclusive */
unsigned short vd_max; /* inclusive */
unsigned char rd_min; /* inclusive */
unsigned char rd_max; /* inclusive */
const unsigned char *s2div; /* chip specific s2div array */
const unsigned char *idx2s; /* chip specific idx2s array */
};
struct icst_vco {
unsigned short v;
unsigned char r;
unsigned char s;
};
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
/*
* ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
* This frequency is pre-output divider.
*/
#define ICST307_VCO_MIN 6000000
#define ICST307_VCO_MAX 200000000
extern const unsigned char icst307_s2div[];
extern const unsigned char icst307_idx2s[];
/*
* ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
* This frequency is pre-output divider.
*/
#define ICST525_VCO_MIN 10000000
#define ICST525_VCO_MAX_3V 200000000
#define ICST525_VCO_MAX_5V 320000000
extern const unsigned char icst525_s2div[];
extern const unsigned char icst525_idx2s[];
#endif

View file

@ -0,0 +1,72 @@
/*
* arch/arm/include/asm/hardware/ioc.h
*
* Copyright (C) Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Use these macros to read/write the IOC. All it does is perform the actual
* read/write.
*/
#ifndef __ASMARM_HARDWARE_IOC_H
#define __ASMARM_HARDWARE_IOC_H
#ifndef __ASSEMBLY__
/*
* We use __raw_base variants here so that we give the compiler the
* chance to keep IOC_BASE in a register.
*/
#define ioc_readb(off) __raw_readb(IOC_BASE + (off))
#define ioc_writeb(val,off) __raw_writeb(val, IOC_BASE + (off))
#endif
#define IOC_CONTROL (0x00)
#define IOC_KARTTX (0x04)
#define IOC_KARTRX (0x04)
#define IOC_IRQSTATA (0x10)
#define IOC_IRQREQA (0x14)
#define IOC_IRQCLRA (0x14)
#define IOC_IRQMASKA (0x18)
#define IOC_IRQSTATB (0x20)
#define IOC_IRQREQB (0x24)
#define IOC_IRQMASKB (0x28)
#define IOC_FIQSTAT (0x30)
#define IOC_FIQREQ (0x34)
#define IOC_FIQMASK (0x38)
#define IOC_T0CNTL (0x40)
#define IOC_T0LTCHL (0x40)
#define IOC_T0CNTH (0x44)
#define IOC_T0LTCHH (0x44)
#define IOC_T0GO (0x48)
#define IOC_T0LATCH (0x4c)
#define IOC_T1CNTL (0x50)
#define IOC_T1LTCHL (0x50)
#define IOC_T1CNTH (0x54)
#define IOC_T1LTCHH (0x54)
#define IOC_T1GO (0x58)
#define IOC_T1LATCH (0x5c)
#define IOC_T2CNTL (0x60)
#define IOC_T2LTCHL (0x60)
#define IOC_T2CNTH (0x64)
#define IOC_T2LTCHH (0x64)
#define IOC_T2GO (0x68)
#define IOC_T2LATCH (0x6c)
#define IOC_T3CNTL (0x70)
#define IOC_T3LTCHL (0x70)
#define IOC_T3CNTH (0x74)
#define IOC_T3LTCHH (0x74)
#define IOC_T3GO (0x78)
#define IOC_T3LATCH (0x7c)
#endif

View file

@ -0,0 +1,185 @@
/*
* arch/arm/include/asm/hardware/iomd.h
*
* Copyright (C) 1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains information out the IOMD ASIC used in the
* Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
*/
#ifndef __ASMARM_HARDWARE_IOMD_H
#define __ASMARM_HARDWARE_IOMD_H
#ifndef __ASSEMBLY__
/*
* We use __raw_base variants here so that we give the compiler the
* chance to keep IOC_BASE in a register.
*/
#define iomd_readb(off) __raw_readb(IOMD_BASE + (off))
#define iomd_readl(off) __raw_readl(IOMD_BASE + (off))
#define iomd_writeb(val,off) __raw_writeb(val, IOMD_BASE + (off))
#define iomd_writel(val,off) __raw_writel(val, IOMD_BASE + (off))
#endif
#define IOMD_CONTROL (0x000)
#define IOMD_KARTTX (0x004)
#define IOMD_KARTRX (0x004)
#define IOMD_KCTRL (0x008)
#define IOMD_IRQSTATA (0x010)
#define IOMD_IRQREQA (0x014)
#define IOMD_IRQCLRA (0x014)
#define IOMD_IRQMASKA (0x018)
#define IOMD_IRQSTATB (0x020)
#define IOMD_IRQREQB (0x024)
#define IOMD_IRQMASKB (0x028)
#define IOMD_FIQSTAT (0x030)
#define IOMD_FIQREQ (0x034)
#define IOMD_FIQMASK (0x038)
#define IOMD_T0CNTL (0x040)
#define IOMD_T0LTCHL (0x040)
#define IOMD_T0CNTH (0x044)
#define IOMD_T0LTCHH (0x044)
#define IOMD_T0GO (0x048)
#define IOMD_T0LATCH (0x04c)
#define IOMD_T1CNTL (0x050)
#define IOMD_T1LTCHL (0x050)
#define IOMD_T1CNTH (0x054)
#define IOMD_T1LTCHH (0x054)
#define IOMD_T1GO (0x058)
#define IOMD_T1LATCH (0x05c)
#define IOMD_ROMCR0 (0x080)
#define IOMD_ROMCR1 (0x084)
#ifdef CONFIG_ARCH_RPC
#define IOMD_DRAMCR (0x088)
#endif
#define IOMD_REFCR (0x08C)
#define IOMD_FSIZE (0x090)
#define IOMD_ID0 (0x094)
#define IOMD_ID1 (0x098)
#define IOMD_VERSION (0x09C)
#ifdef CONFIG_ARCH_RPC
#define IOMD_MOUSEX (0x0A0)
#define IOMD_MOUSEY (0x0A4)
#endif
#ifdef CONFIG_ARCH_RPC
#define IOMD_DMATCR (0x0C0)
#endif
#define IOMD_IOTCR (0x0C4)
#define IOMD_ECTCR (0x0C8)
#ifdef CONFIG_ARCH_RPC
#define IOMD_DMAEXT (0x0CC)
#endif
#ifdef CONFIG_ARCH_RPC
#define DMA_EXT_IO0 1
#define DMA_EXT_IO1 2
#define DMA_EXT_IO2 4
#define DMA_EXT_IO3 8
#define IOMD_IO0CURA (0x100)
#define IOMD_IO0ENDA (0x104)
#define IOMD_IO0CURB (0x108)
#define IOMD_IO0ENDB (0x10C)
#define IOMD_IO0CR (0x110)
#define IOMD_IO0ST (0x114)
#define IOMD_IO1CURA (0x120)
#define IOMD_IO1ENDA (0x124)
#define IOMD_IO1CURB (0x128)
#define IOMD_IO1ENDB (0x12C)
#define IOMD_IO1CR (0x130)
#define IOMD_IO1ST (0x134)
#define IOMD_IO2CURA (0x140)
#define IOMD_IO2ENDA (0x144)
#define IOMD_IO2CURB (0x148)
#define IOMD_IO2ENDB (0x14C)
#define IOMD_IO2CR (0x150)
#define IOMD_IO2ST (0x154)
#define IOMD_IO3CURA (0x160)
#define IOMD_IO3ENDA (0x164)
#define IOMD_IO3CURB (0x168)
#define IOMD_IO3ENDB (0x16C)
#define IOMD_IO3CR (0x170)
#define IOMD_IO3ST (0x174)
#endif
#define IOMD_SD0CURA (0x180)
#define IOMD_SD0ENDA (0x184)
#define IOMD_SD0CURB (0x188)
#define IOMD_SD0ENDB (0x18C)
#define IOMD_SD0CR (0x190)
#define IOMD_SD0ST (0x194)
#ifdef CONFIG_ARCH_RPC
#define IOMD_SD1CURA (0x1A0)
#define IOMD_SD1ENDA (0x1A4)
#define IOMD_SD1CURB (0x1A8)
#define IOMD_SD1ENDB (0x1AC)
#define IOMD_SD1CR (0x1B0)
#define IOMD_SD1ST (0x1B4)
#endif
#define IOMD_CURSCUR (0x1C0)
#define IOMD_CURSINIT (0x1C4)
#define IOMD_VIDCUR (0x1D0)
#define IOMD_VIDEND (0x1D4)
#define IOMD_VIDSTART (0x1D8)
#define IOMD_VIDINIT (0x1DC)
#define IOMD_VIDCR (0x1E0)
#define IOMD_DMASTAT (0x1F0)
#define IOMD_DMAREQ (0x1F4)
#define IOMD_DMAMASK (0x1F8)
#define DMA_END_S (1 << 31)
#define DMA_END_L (1 << 30)
#define DMA_CR_C 0x80
#define DMA_CR_D 0x40
#define DMA_CR_E 0x20
#define DMA_ST_OFL 4
#define DMA_ST_INT 2
#define DMA_ST_AB 1
/*
* DMA (MEMC) compatibility
*/
#define HALF_SAM vram_half_sam
#define VDMA_ALIGNMENT (HALF_SAM * 2)
#define VDMA_XFERSIZE (HALF_SAM)
#define VDMA_INIT IOMD_VIDINIT
#define VDMA_START IOMD_VIDSTART
#define VDMA_END IOMD_VIDEND
#ifndef __ASSEMBLY__
extern unsigned int vram_half_sam;
#define video_set_dma(start,end,offset) \
do { \
outl (SCREEN_START + start, VDMA_START); \
outl (SCREEN_START + end - VDMA_XFERSIZE, VDMA_END); \
if (offset >= end - VDMA_XFERSIZE) \
offset |= 0x40000000; \
outl (SCREEN_START + offset, VDMA_INIT); \
} while (0)
#endif
#endif

View file

@ -0,0 +1,932 @@
/*
* Copyright © 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _ADMA_H
#define _ADMA_H
#include <linux/types.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/hardware/iop_adma.h>
/* Memory copy units */
#define DMA_CCR(chan) (chan->mmr_base + 0x0)
#define DMA_CSR(chan) (chan->mmr_base + 0x4)
#define DMA_DAR(chan) (chan->mmr_base + 0xc)
#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
#define DMA_PADR(chan) (chan->mmr_base + 0x14)
#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
#define DMA_BCR(chan) (chan->mmr_base + 0x20)
#define DMA_DCR(chan) (chan->mmr_base + 0x24)
/* Application accelerator unit */
#define AAU_ACR(chan) (chan->mmr_base + 0x0)
#define AAU_ASR(chan) (chan->mmr_base + 0x4)
#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
#define AAU_DAR(chan) (chan->mmr_base + 0x20)
#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
#define AAU_EDCR0_IDX 8
#define AAU_EDCR1_IDX 17
#define AAU_EDCR2_IDX 26
#define DMA0_ID 0
#define DMA1_ID 1
#define AAU_ID 2
struct iop3xx_aau_desc_ctrl {
unsigned int int_en:1;
unsigned int blk1_cmd_ctrl:3;
unsigned int blk2_cmd_ctrl:3;
unsigned int blk3_cmd_ctrl:3;
unsigned int blk4_cmd_ctrl:3;
unsigned int blk5_cmd_ctrl:3;
unsigned int blk6_cmd_ctrl:3;
unsigned int blk7_cmd_ctrl:3;
unsigned int blk8_cmd_ctrl:3;
unsigned int blk_ctrl:2;
unsigned int dual_xor_en:1;
unsigned int tx_complete:1;
unsigned int zero_result_err:1;
unsigned int zero_result_en:1;
unsigned int dest_write_en:1;
};
struct iop3xx_aau_e_desc_ctrl {
unsigned int reserved:1;
unsigned int blk1_cmd_ctrl:3;
unsigned int blk2_cmd_ctrl:3;
unsigned int blk3_cmd_ctrl:3;
unsigned int blk4_cmd_ctrl:3;
unsigned int blk5_cmd_ctrl:3;
unsigned int blk6_cmd_ctrl:3;
unsigned int blk7_cmd_ctrl:3;
unsigned int blk8_cmd_ctrl:3;
unsigned int reserved2:7;
};
struct iop3xx_dma_desc_ctrl {
unsigned int pci_transaction:4;
unsigned int int_en:1;
unsigned int dac_cycle_en:1;
unsigned int mem_to_mem_en:1;
unsigned int crc_data_tx_en:1;
unsigned int crc_gen_en:1;
unsigned int crc_seed_dis:1;
unsigned int reserved:21;
unsigned int crc_tx_complete:1;
};
struct iop3xx_desc_dma {
u32 next_desc;
union {
u32 pci_src_addr;
u32 pci_dest_addr;
u32 src_addr;
};
union {
u32 upper_pci_src_addr;
u32 upper_pci_dest_addr;
};
union {
u32 local_pci_src_addr;
u32 local_pci_dest_addr;
u32 dest_addr;
};
u32 byte_count;
union {
u32 desc_ctrl;
struct iop3xx_dma_desc_ctrl desc_ctrl_field;
};
u32 crc_addr;
};
struct iop3xx_desc_aau {
u32 next_desc;
u32 src[4];
u32 dest_addr;
u32 byte_count;
union {
u32 desc_ctrl;
struct iop3xx_aau_desc_ctrl desc_ctrl_field;
};
union {
u32 src_addr;
u32 e_desc_ctrl;
struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
} src_edc[31];
};
struct iop3xx_aau_gfmr {
unsigned int gfmr1:8;
unsigned int gfmr2:8;
unsigned int gfmr3:8;
unsigned int gfmr4:8;
};
struct iop3xx_desc_pq_xor {
u32 next_desc;
u32 src[3];
union {
u32 data_mult1;
struct iop3xx_aau_gfmr data_mult1_field;
};
u32 dest_addr;
u32 byte_count;
union {
u32 desc_ctrl;
struct iop3xx_aau_desc_ctrl desc_ctrl_field;
};
union {
u32 src_addr;
u32 e_desc_ctrl;
struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
u32 data_multiplier;
struct iop3xx_aau_gfmr data_mult_field;
u32 reserved;
} src_edc_gfmr[19];
};
struct iop3xx_desc_dual_xor {
u32 next_desc;
u32 src0_addr;
u32 src1_addr;
u32 h_src_addr;
u32 d_src_addr;
u32 h_dest_addr;
u32 byte_count;
union {
u32 desc_ctrl;
struct iop3xx_aau_desc_ctrl desc_ctrl_field;
};
u32 d_dest_addr;
};
union iop3xx_desc {
struct iop3xx_desc_aau *aau;
struct iop3xx_desc_dma *dma;
struct iop3xx_desc_pq_xor *pq_xor;
struct iop3xx_desc_dual_xor *dual_xor;
void *ptr;
};
/* No support for p+q operations */
static inline int
iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
BUG();
return 0;
}
static inline void
iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
BUG();
}
static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
{
BUG();
}
static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
dma_addr_t addr, unsigned char coef)
{
BUG();
}
static inline int
iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
{
BUG();
return 0;
}
static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
BUG();
}
static inline void
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
{
BUG();
}
#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
dma_addr_t *src)
{
BUG();
}
static inline int iop_adma_get_max_xor(void)
{
return 32;
}
static inline int iop_adma_get_max_pq(void)
{
BUG();
return 0;
}
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{
int id = chan->device->id;
switch (id) {
case DMA0_ID:
case DMA1_ID:
return __raw_readl(DMA_DAR(chan));
case AAU_ID:
return __raw_readl(AAU_ADAR(chan));
default:
BUG();
}
return 0;
}
static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
u32 next_desc_addr)
{
int id = chan->device->id;
switch (id) {
case DMA0_ID:
case DMA1_ID:
__raw_writel(next_desc_addr, DMA_NDAR(chan));
break;
case AAU_ID:
__raw_writel(next_desc_addr, AAU_ANDAR(chan));
break;
}
}
#define IOP_ADMA_STATUS_BUSY (1 << 10)
#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
{
u32 status = __raw_readl(DMA_CSR(chan));
return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
}
static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
int num_slots)
{
/* num_slots will only ever be 1, 2, 4, or 8 */
return (desc->idx & (num_slots - 1)) ? 0 : 1;
}
/* to do: support large (i.e. > hw max) buffer sizes */
static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
{
*slots_per_op = 1;
return 1;
}
/* to do: support large (i.e. > hw max) buffer sizes */
static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
{
*slots_per_op = 1;
return 1;
}
static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
int *slots_per_op)
{
static const char slot_count_table[] = {
1, 1, 1, 1, /* 01 - 04 */
2, 2, 2, 2, /* 05 - 08 */
4, 4, 4, 4, /* 09 - 12 */
4, 4, 4, 4, /* 13 - 16 */
8, 8, 8, 8, /* 17 - 20 */
8, 8, 8, 8, /* 21 - 24 */
8, 8, 8, 8, /* 25 - 28 */
8, 8, 8, 8, /* 29 - 32 */
};
*slots_per_op = slot_count_table[src_cnt - 1];
return *slots_per_op;
}
static inline int
iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
{
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return iop_chan_memcpy_slot_count(0, slots_per_op);
case AAU_ID:
return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
default:
BUG();
}
return 0;
}
static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
int *slots_per_op)
{
int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
return slot_cnt;
len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
slot_cnt += *slots_per_op;
}
slot_cnt += *slots_per_op;
return slot_cnt;
}
/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
* descriptors
*/
static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
int *slots_per_op)
{
int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
return slot_cnt;
len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
slot_cnt += *slots_per_op;
}
slot_cnt += *slots_per_op;
return slot_cnt;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return hw_desc.dma->byte_count;
case AAU_ID:
return hw_desc.aau->byte_count;
default:
BUG();
}
return 0;
}
/* translate the src_idx to a descriptor word index */
static inline int __desc_idx(int src_idx)
{
static const int desc_idx_table[] = { 0, 0, 0, 0,
0, 1, 2, 3,
5, 6, 7, 8,
9, 10, 11, 12,
14, 15, 16, 17,
18, 19, 20, 21,
23, 24, 25, 26,
27, 28, 29, 30,
};
return desc_idx_table[src_idx];
}
static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan,
int src_idx)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return hw_desc.dma->src_addr;
case AAU_ID:
break;
default:
BUG();
}
if (src_idx < 4)
return hw_desc.aau->src[src_idx];
else
return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
}
static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
int src_idx, dma_addr_t addr)
{
if (src_idx < 4)
hw_desc->src[src_idx] = addr;
else
hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
}
static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop3xx_dma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
u_desc_ctrl.field.mem_to_mem_en = 1;
u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->upper_pci_src_addr = 0;
hw_desc->crc_addr = 0;
}
static inline void
iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop3xx_aau_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
u_desc_ctrl.field.dest_write_en = 1;
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline u32
iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
unsigned long flags)
{
int i, shift;
u32 edcr;
union {
u32 value;
struct iop3xx_aau_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
switch (src_cnt) {
case 25 ... 32:
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
edcr = 0;
shift = 1;
for (i = 24; i < src_cnt; i++) {
edcr |= (1 << shift);
shift += 3;
}
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
src_cnt = 24;
/* fall through */
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
}
edcr = 0;
shift = 1;
for (i = 16; i < src_cnt; i++) {
edcr |= (1 << shift);
shift += 3;
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
src_cnt = 16;
/* fall through */
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
edcr = 0;
shift = 1;
for (i = 8; i < src_cnt; i++) {
edcr |= (1 << shift);
shift += 3;
}
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
src_cnt = 8;
/* fall through */
case 2 ... 8:
shift = 1;
for (i = 0; i < src_cnt; i++) {
u_desc_ctrl.value |= (1 << shift);
shift += 3;
}
if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
}
u_desc_ctrl.field.dest_write_en = 1;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
return u_desc_ctrl.value;
}
static inline void
iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
}
/* return the number of operations */
static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
union {
u32 value;
struct iop3xx_aau_desc_ctrl field;
} u_desc_ctrl;
int i, j;
hw_desc = desc->hw_desc;
for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
i += slots_per_op, j++) {
iter = iop_hw_desc_slot_idx(hw_desc, i);
u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
u_desc_ctrl.field.dest_write_en = 0;
u_desc_ctrl.field.zero_result_en = 1;
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
iter->desc_ctrl = u_desc_ctrl.value;
/* for the subsequent descriptors preserve the store queue
* and chain them together
*/
if (i) {
prev_hw_desc =
iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
prev_hw_desc->next_desc =
(u32) (desc->async_tx.phys + (i << 5));
}
}
return j;
}
static inline void
iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop3xx_aau_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = 0;
switch (src_cnt) {
case 25 ... 32:
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
/* fall through */
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
/* fall through */
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
/* fall through */
case 1 ... 8:
if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
}
u_desc_ctrl.field.dest_write_en = 0;
u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan,
u32 byte_count)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
hw_desc.dma->byte_count = byte_count;
break;
case AAU_ID:
hw_desc.aau->byte_count = byte_count;
break;
default:
BUG();
}
}
static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
iop_desc_init_memcpy(desc, 1);
hw_desc.dma->byte_count = 0;
hw_desc.dma->dest_addr = 0;
hw_desc.dma->src_addr = 0;
break;
case AAU_ID:
iop_desc_init_null_xor(desc, 2, 1);
hw_desc.aau->byte_count = 0;
hw_desc.aau->dest_addr = 0;
hw_desc.aau->src[0] = 0;
hw_desc.aau->src[1] = 0;
break;
default:
BUG();
}
}
static inline void
iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
{
int slots_per_op = desc->slots_per_op;
struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
int i = 0;
if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
hw_desc->byte_count = len;
} else {
do {
iter = iop_hw_desc_slot_idx(hw_desc, i);
iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
i += slots_per_op;
} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
iter = iop_hw_desc_slot_idx(hw_desc, i);
iter->byte_count = len;
}
}
static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan,
dma_addr_t addr)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
hw_desc.dma->dest_addr = addr;
break;
case AAU_ID:
hw_desc.aau->dest_addr = addr;
break;
default:
BUG();
}
}
static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
dma_addr_t addr)
{
struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
hw_desc->src_addr = addr;
}
static inline void
iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
dma_addr_t addr)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
int i;
for (i = 0; (slot_cnt -= slots_per_op) >= 0;
i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
iter = iop_hw_desc_slot_idx(hw_desc, i);
iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
}
}
static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
int src_idx, dma_addr_t addr)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
int i;
for (i = 0; (slot_cnt -= slots_per_op) >= 0;
i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
iter = iop_hw_desc_slot_idx(hw_desc, i);
iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
}
}
static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
u32 next_desc_addr)
{
/* hw_desc->next_desc is the same location for all channels */
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
iop_paranoia(hw_desc.dma->next_desc);
hw_desc.dma->next_desc = next_desc_addr;
}
static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
{
/* hw_desc->next_desc is the same location for all channels */
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
return hw_desc.dma->next_desc;
}
static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
{
/* hw_desc->next_desc is the same location for all channels */
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
hw_desc.dma->next_desc = 0;
}
static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
u32 val)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
hw_desc->src[0] = val;
}
static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
return desc_ctrl.zero_result_err << SUM_CHECK_P;
}
static inline void iop_chan_append(struct iop_adma_chan *chan)
{
u32 dma_chan_ctrl;
dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
dma_chan_ctrl |= 0x2;
__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
}
static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
{
return __raw_readl(DMA_CSR(chan));
}
static inline void iop_chan_disable(struct iop_adma_chan *chan)
{
u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
dma_chan_ctrl &= ~1;
__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
}
static inline void iop_chan_enable(struct iop_adma_chan *chan)
{
u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
dma_chan_ctrl |= 1;
__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
}
static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
{
u32 status = __raw_readl(DMA_CSR(chan));
status &= (1 << 9);
__raw_writel(status, DMA_CSR(chan));
}
static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
{
u32 status = __raw_readl(DMA_CSR(chan));
status &= (1 << 8);
__raw_writel(status, DMA_CSR(chan));
}
static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
{
u32 status = __raw_readl(DMA_CSR(chan));
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
break;
case AAU_ID:
status &= (1 << 5);
break;
default:
BUG();
}
__raw_writel(status, DMA_CSR(chan));
}
static inline int
iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
{
return 0;
}
static inline int
iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
{
return 0;
}
static inline int
iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
{
return 0;
}
static inline int
iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
{
return test_bit(5, &status);
}
static inline int
iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
{
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return test_bit(2, &status);
default:
return 0;
}
}
static inline int
iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
{
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return test_bit(3, &status);
default:
return 0;
}
}
static inline int
iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
{
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return test_bit(1, &status);
default:
return 0;
}
}
#endif /* _ADMA_H */

View file

@ -0,0 +1,312 @@
/*
* arch/arm/include/asm/hardware/iop3xx.h
*
* Intel IOP32X and IOP33X register definitions
*
* Author: Rory Bolt <rorybolt@pacbell.net>
* Copyright (C) 2002 Rory Bolt
* Copyright (C) 2004 Intel Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __IOP3XX_H
#define __IOP3XX_H
/*
* IOP3XX GPIO handling
*/
#define IOP3XX_GPIO_LINE(x) (x)
#ifndef __ASSEMBLY__
extern int init_atu;
extern int iop3xx_get_init_atu(void);
#endif
/*
* IOP3XX processor registers
*/
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
#define IOP3XX_PERIPHERAL_UPPER_VA (IOP3XX_PERIPHERAL_VIRT_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
#define IOP3XX_PMMR_PHYS_TO_VIRT(addr) (u32) ((u32) (addr) -\
(IOP3XX_PERIPHERAL_PHYS_BASE\
- IOP3XX_PERIPHERAL_VIRT_BASE))
#define IOP3XX_REG_ADDR(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + (reg))
/* Address Translation Unit */
#define IOP3XX_ATUVID (volatile u16 *)IOP3XX_REG_ADDR(0x0100)
#define IOP3XX_ATUDID (volatile u16 *)IOP3XX_REG_ADDR(0x0102)
#define IOP3XX_ATUCMD (volatile u16 *)IOP3XX_REG_ADDR(0x0104)
#define IOP3XX_ATUSR (volatile u16 *)IOP3XX_REG_ADDR(0x0106)
#define IOP3XX_ATURID (volatile u8 *)IOP3XX_REG_ADDR(0x0108)
#define IOP3XX_ATUCCR (volatile u32 *)IOP3XX_REG_ADDR(0x0109)
#define IOP3XX_ATUCLSR (volatile u8 *)IOP3XX_REG_ADDR(0x010c)
#define IOP3XX_ATULT (volatile u8 *)IOP3XX_REG_ADDR(0x010d)
#define IOP3XX_ATUHTR (volatile u8 *)IOP3XX_REG_ADDR(0x010e)
#define IOP3XX_ATUBIST (volatile u8 *)IOP3XX_REG_ADDR(0x010f)
#define IOP3XX_IABAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0110)
#define IOP3XX_IAUBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0114)
#define IOP3XX_IABAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0118)
#define IOP3XX_IAUBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x011c)
#define IOP3XX_IABAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0120)
#define IOP3XX_IAUBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0124)
#define IOP3XX_ASVIR (volatile u16 *)IOP3XX_REG_ADDR(0x012c)
#define IOP3XX_ASIR (volatile u16 *)IOP3XX_REG_ADDR(0x012e)
#define IOP3XX_ERBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0130)
#define IOP3XX_ATUILR (volatile u8 *)IOP3XX_REG_ADDR(0x013c)
#define IOP3XX_ATUIPR (volatile u8 *)IOP3XX_REG_ADDR(0x013d)
#define IOP3XX_ATUMGNT (volatile u8 *)IOP3XX_REG_ADDR(0x013e)
#define IOP3XX_ATUMLAT (volatile u8 *)IOP3XX_REG_ADDR(0x013f)
#define IOP3XX_IALR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0140)
#define IOP3XX_IATVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0144)
#define IOP3XX_ERLR (volatile u32 *)IOP3XX_REG_ADDR(0x0148)
#define IOP3XX_ERTVR (volatile u32 *)IOP3XX_REG_ADDR(0x014c)
#define IOP3XX_IALR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0150)
#define IOP3XX_IALR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0154)
#define IOP3XX_IATVR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0158)
#define IOP3XX_OIOWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x015c)
#define IOP3XX_OMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0160)
#define IOP3XX_OUMWTVR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0164)
#define IOP3XX_OMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0168)
#define IOP3XX_OUMWTVR1 (volatile u32 *)IOP3XX_REG_ADDR(0x016c)
#define IOP3XX_OUDWTVR (volatile u32 *)IOP3XX_REG_ADDR(0x0178)
#define IOP3XX_ATUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0180)
#define IOP3XX_PCSR (volatile u32 *)IOP3XX_REG_ADDR(0x0184)
#define IOP3XX_ATUISR (volatile u32 *)IOP3XX_REG_ADDR(0x0188)
#define IOP3XX_ATUIMR (volatile u32 *)IOP3XX_REG_ADDR(0x018c)
#define IOP3XX_IABAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0190)
#define IOP3XX_IAUBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0194)
#define IOP3XX_IALR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0198)
#define IOP3XX_IATVR3 (volatile u32 *)IOP3XX_REG_ADDR(0x019c)
#define IOP3XX_OCCAR (volatile u32 *)IOP3XX_REG_ADDR(0x01a4)
#define IOP3XX_OCCDR (volatile u32 *)IOP3XX_REG_ADDR(0x01ac)
#define IOP3XX_PDSCR (volatile u32 *)IOP3XX_REG_ADDR(0x01bc)
#define IOP3XX_PMCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01c0)
#define IOP3XX_PMNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01c1)
#define IOP3XX_APMCR (volatile u16 *)IOP3XX_REG_ADDR(0x01c2)
#define IOP3XX_APMCSR (volatile u16 *)IOP3XX_REG_ADDR(0x01c4)
#define IOP3XX_PCIXCAPID (volatile u8 *)IOP3XX_REG_ADDR(0x01e0)
#define IOP3XX_PCIXNEXT (volatile u8 *)IOP3XX_REG_ADDR(0x01e1)
#define IOP3XX_PCIXCMD (volatile u16 *)IOP3XX_REG_ADDR(0x01e2)
#define IOP3XX_PCIXSR (volatile u32 *)IOP3XX_REG_ADDR(0x01e4)
#define IOP3XX_PCIIRSR (volatile u32 *)IOP3XX_REG_ADDR(0x01ec)
#define IOP3XX_PCSR_OUT_Q_BUSY (1 << 15)
#define IOP3XX_PCSR_IN_Q_BUSY (1 << 14)
#define IOP3XX_ATUCR_OUT_EN (1 << 1)
#define IOP3XX_INIT_ATU_DEFAULT 0
#define IOP3XX_INIT_ATU_DISABLE -1
#define IOP3XX_INIT_ATU_ENABLE 1
/* Messaging Unit */
#define IOP3XX_IMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0310)
#define IOP3XX_IMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0314)
#define IOP3XX_OMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0318)
#define IOP3XX_OMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x031c)
#define IOP3XX_IDR (volatile u32 *)IOP3XX_REG_ADDR(0x0320)
#define IOP3XX_IISR (volatile u32 *)IOP3XX_REG_ADDR(0x0324)
#define IOP3XX_IIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0328)
#define IOP3XX_ODR (volatile u32 *)IOP3XX_REG_ADDR(0x032c)
#define IOP3XX_OISR (volatile u32 *)IOP3XX_REG_ADDR(0x0330)
#define IOP3XX_OIMR (volatile u32 *)IOP3XX_REG_ADDR(0x0334)
#define IOP3XX_MUCR (volatile u32 *)IOP3XX_REG_ADDR(0x0350)
#define IOP3XX_QBAR (volatile u32 *)IOP3XX_REG_ADDR(0x0354)
#define IOP3XX_IFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0360)
#define IOP3XX_IFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0364)
#define IOP3XX_IPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0368)
#define IOP3XX_IPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x036c)
#define IOP3XX_OFHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0370)
#define IOP3XX_OFTPR (volatile u32 *)IOP3XX_REG_ADDR(0x0374)
#define IOP3XX_OPHPR (volatile u32 *)IOP3XX_REG_ADDR(0x0378)
#define IOP3XX_OPTPR (volatile u32 *)IOP3XX_REG_ADDR(0x037c)
#define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380)
/* DMA Controller */
#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
(0x400 + (chan << 6)))
#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
/* Peripheral bus interface */
#define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680)
#define IOP3XX_PBISR (volatile u32 *)IOP3XX_REG_ADDR(0x0684)
#define IOP3XX_PBBAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0688)
#define IOP3XX_PBLR0 (volatile u32 *)IOP3XX_REG_ADDR(0x068c)
#define IOP3XX_PBBAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0690)
#define IOP3XX_PBLR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0694)
#define IOP3XX_PBBAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0698)
#define IOP3XX_PBLR2 (volatile u32 *)IOP3XX_REG_ADDR(0x069c)
#define IOP3XX_PBBAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a0)
#define IOP3XX_PBLR3 (volatile u32 *)IOP3XX_REG_ADDR(0x06a4)
#define IOP3XX_PBBAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06a8)
#define IOP3XX_PBLR4 (volatile u32 *)IOP3XX_REG_ADDR(0x06ac)
#define IOP3XX_PBBAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b0)
#define IOP3XX_PBLR5 (volatile u32 *)IOP3XX_REG_ADDR(0x06b4)
#define IOP3XX_PMBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x06c0)
#define IOP3XX_PMBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x06e0)
#define IOP3XX_PMBR2 (volatile u32 *)IOP3XX_REG_ADDR(0x06e4)
/* Peripheral performance monitoring unit */
#define IOP3XX_GTMR (volatile u32 *)IOP3XX_REG_ADDR(0x0700)
#define IOP3XX_ESR (volatile u32 *)IOP3XX_REG_ADDR(0x0704)
#define IOP3XX_EMISR (volatile u32 *)IOP3XX_REG_ADDR(0x0708)
#define IOP3XX_GTSR (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
/* PERCR0 DOESN'T EXIST - index from 1! */
#define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
/* Timers */
#define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
#define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004)
#define IOP3XX_TU_TCR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0008)
#define IOP3XX_TU_TCR1 (volatile u32 *)IOP3XX_TIMER_REG(0x000c)
#define IOP3XX_TU_TRR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0010)
#define IOP3XX_TU_TRR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0014)
#define IOP3XX_TU_TISR (volatile u32 *)IOP3XX_TIMER_REG(0x0018)
#define IOP3XX_TU_WDTCR (volatile u32 *)IOP3XX_TIMER_REG(0x001c)
#define IOP_TMR_EN 0x02
#define IOP_TMR_RELOAD 0x04
#define IOP_TMR_PRIVILEGED 0x08
#define IOP_TMR_RATIO_1_1 0x00
/* Watchdog timer definitions */
#define IOP_WDTCR_EN_ARM 0x1e1e1e1e
#define IOP_WDTCR_EN 0xe1e1e1e1
/* iop3xx does not support stopping the watchdog, so we just re-arm */
#define IOP_WDTCR_DIS_ARM (IOP_WDTCR_EN_ARM)
#define IOP_WDTCR_DIS (IOP_WDTCR_EN)
/* Application accelerator unit */
#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
/* I2C bus interface unit */
#define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680)
#define IOP3XX_ISR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1684)
#define IOP3XX_ISAR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1688)
#define IOP3XX_IDBR0 (volatile u32 *)IOP3XX_REG_ADDR(0x168c)
#define IOP3XX_IBMR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1694)
#define IOP3XX_ICR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a0)
#define IOP3XX_ISR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a4)
#define IOP3XX_ISAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16a8)
#define IOP3XX_IDBR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16ac)
#define IOP3XX_IBMR1 (volatile u32 *)IOP3XX_REG_ADDR(0x16b4)
/*
* IOP3XX I/O and Mem space regions for PCI autoconfiguration
*/
#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
#define IOP3XX_PCI_LOWER_IO_BA 0x00000000
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/reboot.h>
void iop3xx_map_io(void);
void iop_init_cp6_handler(void);
void iop_init_time(unsigned long tickrate);
void iop3xx_restart(enum reboot_mode, const char *);
static inline u32 read_tmr0(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
return val;
}
static inline void write_tmr0(u32 val)
{
asm volatile("mcr p6, 0, %0, c0, c1, 0" : : "r" (val));
}
static inline void write_tmr1(u32 val)
{
asm volatile("mcr p6, 0, %0, c1, c1, 0" : : "r" (val));
}
static inline u32 read_tcr0(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c2, c1, 0" : "=r" (val));
return val;
}
static inline void write_tcr0(u32 val)
{
asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
}
static inline u32 read_tcr1(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c3, c1, 0" : "=r" (val));
return val;
}
static inline void write_tcr1(u32 val)
{
asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
}
static inline void write_trr0(u32 val)
{
asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
}
static inline void write_trr1(u32 val)
{
asm volatile("mcr p6, 0, %0, c5, c1, 0" : : "r" (val));
}
static inline void write_tisr(u32 val)
{
asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
}
static inline u32 read_wdtcr(void)
{
u32 val;
asm volatile("mrc p6, 0, %0, c7, c1, 0":"=r" (val));
return val;
}
static inline void write_wdtcr(u32 val)
{
asm volatile("mcr p6, 0, %0, c7, c1, 0"::"r" (val));
}
extern unsigned long get_iop_tick_rate(void);
/* only iop13xx has these registers, we define these to present a
* common register interface for the iop_wdt driver.
*/
#define IOP_RCSR_WDT (0)
static inline u32 read_rcsr(void)
{
return 0;
}
static inline void write_wdtsr(u32 val)
{
do { } while (0);
}
extern struct platform_device iop3xx_dma_0_channel;
extern struct platform_device iop3xx_dma_1_channel;
extern struct platform_device iop3xx_aau_channel;
extern struct platform_device iop3xx_i2c0_device;
extern struct platform_device iop3xx_i2c1_device;
#endif
#endif

View file

@ -0,0 +1,119 @@
/*
* Copyright © 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef IOP_ADMA_H
#define IOP_ADMA_H
#include <linux/types.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#define IOP_ADMA_SLOT_SIZE 32
#define IOP_ADMA_THRESHOLD 4
#ifdef DEBUG
#define IOP_PARANOIA 1
#else
#define IOP_PARANOIA 0
#endif
#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x))
/**
* struct iop_adma_device - internal representation of an ADMA device
* @pdev: Platform device
* @id: HW ADMA Device selector
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
* @common: embedded struct dma_device
*/
struct iop_adma_device {
struct platform_device *pdev;
int id;
dma_addr_t dma_desc_pool;
void *dma_desc_pool_virt;
struct dma_device common;
};
/**
* struct iop_adma_chan - internal representation of an ADMA device
* @pending: allows batching of hardware operations
* @lock: serializes enqueue/dequeue operations to the slot pool
* @mmr_base: memory mapped register base
* @chain: device chain view of the descriptors
* @device: parent device
* @common: common dmaengine channel object members
* @last_used: place holder for allocation to continue from where it left off
* @all_slots: complete domain of slots usable by the channel
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
*/
struct iop_adma_chan {
int pending;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
struct list_head chain;
struct iop_adma_device *device;
struct dma_chan common;
struct iop_adma_desc_slot *last_used;
struct list_head all_slots;
int slots_allocated;
struct tasklet_struct irq_tasklet;
};
/**
* struct iop_adma_desc_slot - IOP-ADMA software descriptor
* @slot_node: node on the iop_adma_chan.all_slots list
* @chain_node: node on the op_adma_chan.chain list
* @hw_desc: virtual address of the hardware descriptor chain
* @phys: hardware address of the hardware descriptor chain
* @group_head: first operation in a transaction
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max
* @xor_check_result: result of zero sum
* @crc32_result: result crc calculation
*/
struct iop_adma_desc_slot {
struct list_head slot_node;
struct list_head chain_node;
void *hw_desc;
struct iop_adma_desc_slot *group_head;
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
u32 *xor_check_result;
u32 *crc32_result;
u32 *pq_check_result;
};
};
struct iop_adma_platform_data {
int hw_id;
dma_cap_mask_t cap_mask;
size_t pool_size;
};
#define to_iop_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
#define iop_hw_desc_slot_idx(hw_desc, idx) \
( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
#endif

View file

@ -0,0 +1,115 @@
/*
* linux/include/arm/hardware/it8152.h
*
* Copyright Compulab Ltd., 2006,2007
* Mike Rapoport <mike@compulab.co.il>
*
* ITE 8152 companion chip register definitions
*/
#ifndef __ASM_HARDWARE_IT8152_H
#define __ASM_HARDWARE_IT8152_H
#include <mach/irqs.h>
extern void __iomem *it8152_base_address;
#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
#define IT8152_CFGREG_BASE (it8152_base_address + 0x03f00000)
#define __REG_IT8152(x) (it8152_base_address + (x))
#define IT8152_PCI_CFG_ADDR __REG_IT8152(0x3f00800)
#define IT8152_PCI_CFG_DATA __REG_IT8152(0x3f00804)
#define IT8152_INTC_LDCNIRR __REG_IT8152(0x3f00300)
#define IT8152_INTC_LDPNIRR __REG_IT8152(0x3f00304)
#define IT8152_INTC_LDCNIMR __REG_IT8152(0x3f00308)
#define IT8152_INTC_LDPNIMR __REG_IT8152(0x3f0030C)
#define IT8152_INTC_LDNITR __REG_IT8152(0x3f00310)
#define IT8152_INTC_LDNIAR __REG_IT8152(0x3f00314)
#define IT8152_INTC_LPCNIRR __REG_IT8152(0x3f00320)
#define IT8152_INTC_LPPNIRR __REG_IT8152(0x3f00324)
#define IT8152_INTC_LPCNIMR __REG_IT8152(0x3f00328)
#define IT8152_INTC_LPPNIMR __REG_IT8152(0x3f0032C)
#define IT8152_INTC_LPNITR __REG_IT8152(0x3f00330)
#define IT8152_INTC_LPNIAR __REG_IT8152(0x3f00334)
#define IT8152_INTC_PDCNIRR __REG_IT8152(0x3f00340)
#define IT8152_INTC_PDPNIRR __REG_IT8152(0x3f00344)
#define IT8152_INTC_PDCNIMR __REG_IT8152(0x3f00348)
#define IT8152_INTC_PDPNIMR __REG_IT8152(0x3f0034C)
#define IT8152_INTC_PDNITR __REG_IT8152(0x3f00350)
#define IT8152_INTC_PDNIAR __REG_IT8152(0x3f00354)
#define IT8152_INTC_INTC_TYPER __REG_IT8152(0x3f003FC)
#define IT8152_GPIO_GPDR __REG_IT8152(0x3f00500)
/*
Interrupt controller per register summary:
---------------------------------------
LCDNIRR:
IT8152_LD_IRQ(8) PCICLK stop
IT8152_LD_IRQ(7) MCLK ready
IT8152_LD_IRQ(6) s/w
IT8152_LD_IRQ(5) UART
IT8152_LD_IRQ(4) GPIO
IT8152_LD_IRQ(3) TIMER 4
IT8152_LD_IRQ(2) TIMER 3
IT8152_LD_IRQ(1) TIMER 2
IT8152_LD_IRQ(0) TIMER 1
LPCNIRR:
IT8152_LP_IRQ(x) serial IRQ x
PCIDNIRR:
IT8152_PD_IRQ(14) PCISERR
IT8152_PD_IRQ(13) CPU/PCI bridge target abort (h2pTADR)
IT8152_PD_IRQ(12) CPU/PCI bridge master abort (h2pMADR)
IT8152_PD_IRQ(11) PCI INTD
IT8152_PD_IRQ(10) PCI INTC
IT8152_PD_IRQ(9) PCI INTB
IT8152_PD_IRQ(8) PCI INTA
IT8152_PD_IRQ(7) serial INTD
IT8152_PD_IRQ(6) serial INTC
IT8152_PD_IRQ(5) serial INTB
IT8152_PD_IRQ(4) serial INTA
IT8152_PD_IRQ(3) serial IRQ IOCHK (IOCHKR)
IT8152_PD_IRQ(2) chaining DMA (CDMAR)
IT8152_PD_IRQ(1) USB (USBR)
IT8152_PD_IRQ(0) Audio controller (ACR)
*/
#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
#define IT8152_LD_IRQ_COUNT 9
#define IT8152_LP_IRQ_COUNT 16
#define IT8152_PD_IRQ_COUNT 15
/* Priorities: */
#define IT8152_PD_IRQ(i) IT8152_IRQ(i)
#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT)
#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT)
/* frequently used interrupts */
#define IT8152_PCISERR IT8152_PD_IRQ(14)
#define IT8152_H2PTADR IT8152_PD_IRQ(13)
#define IT8152_H2PMAR IT8152_PD_IRQ(12)
#define IT8152_PCI_INTD IT8152_PD_IRQ(11)
#define IT8152_PCI_INTC IT8152_PD_IRQ(10)
#define IT8152_PCI_INTB IT8152_PD_IRQ(9)
#define IT8152_PCI_INTA IT8152_PD_IRQ(8)
#define IT8152_CDMA_INT IT8152_PD_IRQ(2)
#define IT8152_USB_INT IT8152_PD_IRQ(1)
#define IT8152_AUDIO_INT IT8152_PD_IRQ(0)
struct pci_dev;
struct pci_sys_data;
extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
extern void it8152_init_irq(void);
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
extern struct pci_ops it8152_ops;
#endif /* __ASM_HARDWARE_IT8152_H */

View file

@ -0,0 +1,221 @@
/*
* arch/arm/include/asm/hardware/locomo.h
*
* This file contains the definitions for the LoCoMo G/A Chip
*
* (C) Copyright 2004 John Lenz
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Based on sa1111.h
*/
#ifndef _ASM_ARCH_LOCOMO
#define _ASM_ARCH_LOCOMO
#define locomo_writel(val,addr) ({ *(volatile u16 *)(addr) = (val); })
#define locomo_readl(addr) (*(volatile u16 *)(addr))
/* LOCOMO version */
#define LOCOMO_VER 0x00
/* Pin status */
#define LOCOMO_ST 0x04
/* Pin status */
#define LOCOMO_C32K 0x08
/* Interrupt controller */
#define LOCOMO_ICR 0x0C
/* MCS decoder for boot selecting */
#define LOCOMO_MCSX0 0x10
#define LOCOMO_MCSX1 0x14
#define LOCOMO_MCSX2 0x18
#define LOCOMO_MCSX3 0x1c
/* Touch panel controller */
#define LOCOMO_ASD 0x20 /* AD start delay */
#define LOCOMO_HSD 0x28 /* HSYS delay */
#define LOCOMO_HSC 0x2c /* HSYS period */
#define LOCOMO_TADC 0x30 /* tablet ADC clock */
/* Long time timer */
#define LOCOMO_LTC 0xd8 /* LTC interrupt setting */
#define LOCOMO_LTINT 0xdc /* LTC interrupt */
/* DAC control signal for LCD (COMADJ ) */
#define LOCOMO_DAC 0xe0
/* DAC control */
#define LOCOMO_DAC_SCLOEB 0x08 /* SCL pin output data */
#define LOCOMO_DAC_TEST 0x04 /* Test bit */
#define LOCOMO_DAC_SDA 0x02 /* SDA pin level (read-only) */
#define LOCOMO_DAC_SDAOEB 0x01 /* SDA pin output data */
/* SPI interface */
#define LOCOMO_SPI 0x60
#define LOCOMO_SPIMD 0x00 /* SPI mode setting */
#define LOCOMO_SPICT 0x04 /* SPI mode control */
#define LOCOMO_SPIST 0x08 /* SPI status */
#define LOCOMO_SPI_TEND (1 << 3) /* Transfer end bit */
#define LOCOMO_SPI_REND (1 << 2) /* Receive end bit */
#define LOCOMO_SPI_RFW (1 << 1) /* write buffer bit */
#define LOCOMO_SPI_RFR (1) /* read buffer bit */
#define LOCOMO_SPIIS 0x10 /* SPI interrupt status */
#define LOCOMO_SPIWE 0x14 /* SPI interrupt status write enable */
#define LOCOMO_SPIIE 0x18 /* SPI interrupt enable */
#define LOCOMO_SPIIR 0x1c /* SPI interrupt request */
#define LOCOMO_SPITD 0x20 /* SPI transfer data write */
#define LOCOMO_SPIRD 0x24 /* SPI receive data read */
#define LOCOMO_SPITS 0x28 /* SPI transfer data shift */
#define LOCOMO_SPIRS 0x2C /* SPI receive data shift */
/* GPIO */
#define LOCOMO_GPD 0x90 /* GPIO direction */
#define LOCOMO_GPE 0x94 /* GPIO input enable */
#define LOCOMO_GPL 0x98 /* GPIO level */
#define LOCOMO_GPO 0x9c /* GPIO out data setting */
#define LOCOMO_GRIE 0xa0 /* GPIO rise detection */
#define LOCOMO_GFIE 0xa4 /* GPIO fall detection */
#define LOCOMO_GIS 0xa8 /* GPIO edge detection status */
#define LOCOMO_GWE 0xac /* GPIO status write enable */
#define LOCOMO_GIE 0xb0 /* GPIO interrupt enable */
#define LOCOMO_GIR 0xb4 /* GPIO interrupt request */
#define LOCOMO_GPIO(Nb) (0x01 << (Nb))
#define LOCOMO_GPIO_RTS LOCOMO_GPIO(0)
#define LOCOMO_GPIO_CTS LOCOMO_GPIO(1)
#define LOCOMO_GPIO_DSR LOCOMO_GPIO(2)
#define LOCOMO_GPIO_DTR LOCOMO_GPIO(3)
#define LOCOMO_GPIO_LCD_VSHA_ON LOCOMO_GPIO(4)
#define LOCOMO_GPIO_LCD_VSHD_ON LOCOMO_GPIO(5)
#define LOCOMO_GPIO_LCD_VEE_ON LOCOMO_GPIO(6)
#define LOCOMO_GPIO_LCD_MOD LOCOMO_GPIO(7)
#define LOCOMO_GPIO_DAC_ON LOCOMO_GPIO(8)
#define LOCOMO_GPIO_FL_VR LOCOMO_GPIO(9)
#define LOCOMO_GPIO_DAC_SDATA LOCOMO_GPIO(10)
#define LOCOMO_GPIO_DAC_SCK LOCOMO_GPIO(11)
#define LOCOMO_GPIO_DAC_SLOAD LOCOMO_GPIO(12)
#define LOCOMO_GPIO_CARD_DETECT LOCOMO_GPIO(13)
#define LOCOMO_GPIO_WRITE_PROT LOCOMO_GPIO(14)
#define LOCOMO_GPIO_CARD_POWER LOCOMO_GPIO(15)
/* Start the definitions of the devices. Each device has an initial
* base address and a series of offsets from that base address. */
/* Keyboard controller */
#define LOCOMO_KEYBOARD 0x40
#define LOCOMO_KIB 0x00 /* KIB level */
#define LOCOMO_KSC 0x04 /* KSTRB control */
#define LOCOMO_KCMD 0x08 /* KSTRB command */
#define LOCOMO_KIC 0x0c /* Key interrupt */
/* Front light adjustment controller */
#define LOCOMO_FRONTLIGHT 0xc8
#define LOCOMO_ALS 0x00 /* Adjust light cycle */
#define LOCOMO_ALD 0x04 /* Adjust light duty */
#define LOCOMO_ALC_EN 0x8000
/* Backlight controller: TFT signal */
#define LOCOMO_BACKLIGHT 0x38
#define LOCOMO_TC 0x00 /* TFT control signal */
#define LOCOMO_CPSD 0x04 /* CPS delay */
/* Audio controller */
#define LOCOMO_AUDIO 0x54
#define LOCOMO_ACC 0x00 /* Audio clock */
#define LOCOMO_PAIF 0xD0 /* PCM audio interface */
/* Audio clock */
#define LOCOMO_ACC_XON 0x80
#define LOCOMO_ACC_XEN 0x40
#define LOCOMO_ACC_XSEL0 0x00
#define LOCOMO_ACC_XSEL1 0x20
#define LOCOMO_ACC_MCLKEN 0x10
#define LOCOMO_ACC_64FSEN 0x08
#define LOCOMO_ACC_CLKSEL000 0x00 /* mclk 2 */
#define LOCOMO_ACC_CLKSEL001 0x01 /* mclk 3 */
#define LOCOMO_ACC_CLKSEL010 0x02 /* mclk 4 */
#define LOCOMO_ACC_CLKSEL011 0x03 /* mclk 6 */
#define LOCOMO_ACC_CLKSEL100 0x04 /* mclk 8 */
#define LOCOMO_ACC_CLKSEL101 0x05 /* mclk 12 */
/* PCM audio interface */
#define LOCOMO_PAIF_SCINV 0x20
#define LOCOMO_PAIF_SCEN 0x10
#define LOCOMO_PAIF_LRCRST 0x08
#define LOCOMO_PAIF_LRCEVE 0x04
#define LOCOMO_PAIF_LRCINV 0x02
#define LOCOMO_PAIF_LRCEN 0x01
/* LED controller */
#define LOCOMO_LED 0xe8
#define LOCOMO_LPT0 0x00
#define LOCOMO_LPT1 0x04
/* LED control */
#define LOCOMO_LPT_TOFH 0x80
#define LOCOMO_LPT_TOFL 0x08
#define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4)
#define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7))
extern struct bus_type locomo_bus_type;
#define LOCOMO_DEVID_KEYBOARD 0
#define LOCOMO_DEVID_FRONTLIGHT 1
#define LOCOMO_DEVID_BACKLIGHT 2
#define LOCOMO_DEVID_AUDIO 3
#define LOCOMO_DEVID_LED 4
#define LOCOMO_DEVID_UART 5
#define LOCOMO_DEVID_SPI 6
struct locomo_dev {
struct device dev;
unsigned int devid;
unsigned int irq[1];
void *mapbase;
unsigned long length;
u64 dma_mask;
};
#define LOCOMO_DEV(_d) container_of((_d), struct locomo_dev, dev)
#define locomo_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define locomo_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
struct locomo_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct locomo_dev *);
int (*remove)(struct locomo_dev *);
int (*suspend)(struct locomo_dev *, pm_message_t);
int (*resume)(struct locomo_dev *);
};
#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
/* GPIO control functions */
void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir);
int locomo_gpio_read_level(struct device *dev, unsigned int bits);
int locomo_gpio_read_output(struct device *dev, unsigned int bits);
void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set);
/* M62332 control function */
void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel);
/* Frontlight control */
void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
struct locomo_platform_data {
int irq_base; /* IRQ base for cascaded on-chip IRQs */
};
#endif

View file

@ -0,0 +1,26 @@
/*
* arch/arm/include/asm/hardware/memc.h
*
* Copyright (C) Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define VDMA_ALIGNMENT PAGE_SIZE
#define VDMA_XFERSIZE 16
#define VDMA_INIT 0
#define VDMA_START 1
#define VDMA_END 2
#ifndef __ASSEMBLY__
extern void memc_write(unsigned int reg, unsigned long val);
#define video_set_dma(start,end,offset) \
do { \
memc_write (VDMA_START, (start >> 2)); \
memc_write (VDMA_END, (end - VDMA_XFERSIZE) >> 2); \
memc_write (VDMA_INIT, (offset >> 2)); \
} while (0)
#endif

View file

@ -0,0 +1,475 @@
/*
* arch/arm/include/asm/hardware/sa1111.h
*
* Copyright (C) 2000 John G Dorsey <john+@cs.cmu.edu>
*
* This file contains definitions for the SA-1111 Companion Chip.
* (Structure and naming borrowed from SA-1101.h, by Peter Danielsson.)
*
* Macro that calculates real address for registers in the SA-1111
*/
#ifndef _ASM_ARCH_SA1111
#define _ASM_ARCH_SA1111
#include <mach/bitfield.h>
/*
* The SA1111 is always located at virtual 0xf4000000, and is always
* "native" endian.
*/
#define SA1111_VBASE 0xf4000000
/* Don't use these! */
#define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE)
#define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE)
#ifndef __ASSEMBLY__
#define _SA1111(x) ((x) + sa1111->resource.start)
#endif
#define sa1111_writel(val,addr) __raw_writel(val, addr)
#define sa1111_readl(addr) __raw_readl(addr)
/*
* 26 bits of the SA-1110 address bus are available to the SA-1111.
* Use these when feeding target addresses to the DMA engines.
*/
#define SA1111_ADDR_WIDTH (26)
#define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1)
#define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK)
/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
#define SA1111_SAC_DMA_MIN_XFER (0x800)
/*
* System Bus Interface (SBI)
*
* Registers
* SKCR Control Register
* SMCR Shared Memory Controller Register
* SKID ID Register
*/
#define SA1111_SKCR 0x0000
#define SA1111_SMCR 0x0004
#define SA1111_SKID 0x0008
#define SKCR_PLL_BYPASS (1<<0)
#define SKCR_RCLKEN (1<<1)
#define SKCR_SLEEP (1<<2)
#define SKCR_DOZE (1<<3)
#define SKCR_VCO_OFF (1<<4)
#define SKCR_SCANTSTEN (1<<5)
#define SKCR_CLKTSTEN (1<<6)
#define SKCR_RDYEN (1<<7)
#define SKCR_SELAC (1<<8)
#define SKCR_OPPC (1<<9)
#define SKCR_PLLTSTEN (1<<10)
#define SKCR_USBIOTSTEN (1<<11)
/*
* Don't believe the specs! Take them, throw them outside. Leave them
* there for a week. Spit on them. Walk on them. Stamp on them.
* Pour gasoline over them and finally burn them. Now think about coding.
* - The October 1999 errata (278260-007) says its bit 13, 1 to enable.
* - The Feb 2001 errata (278260-010) says that the previous errata
* (278260-009) is wrong, and its bit actually 12, fixed in spec
* 278242-003.
* - The SA1111 manual (278242) says bit 12, but 0 to enable.
* - Reality is bit 13, 1 to enable.
* -- rmk
*/
#define SKCR_OE_EN (1<<13)
#define SMCR_DTIM (1<<0)
#define SMCR_MBGE (1<<1)
#define SMCR_DRAC_0 (1<<2)
#define SMCR_DRAC_1 (1<<3)
#define SMCR_DRAC_2 (1<<4)
#define SMCR_DRAC Fld(3, 2)
#define SMCR_CLAT (1<<5)
#define SKID_SIREV_MASK (0x000000f0)
#define SKID_MTREV_MASK (0x0000000f)
#define SKID_ID_MASK (0xffffff00)
#define SKID_SA1111_ID (0x690cc200)
/*
* System Controller
*
* Registers
* SKPCR Power Control Register
* SKCDR Clock Divider Register
* SKAUD Audio Clock Divider Register
* SKPMC PS/2 Mouse Clock Divider Register
* SKPTC PS/2 Track Pad Clock Divider Register
* SKPEN0 PWM0 Enable Register
* SKPWM0 PWM0 Clock Register
* SKPEN1 PWM1 Enable Register
* SKPWM1 PWM1 Clock Register
*/
#define SA1111_SKPCR 0x0200
#define SA1111_SKCDR 0x0204
#define SA1111_SKAUD 0x0208
#define SA1111_SKPMC 0x020c
#define SA1111_SKPTC 0x0210
#define SA1111_SKPEN0 0x0214
#define SA1111_SKPWM0 0x0218
#define SA1111_SKPEN1 0x021c
#define SA1111_SKPWM1 0x0220
#define SKPCR_UCLKEN (1<<0)
#define SKPCR_ACCLKEN (1<<1)
#define SKPCR_I2SCLKEN (1<<2)
#define SKPCR_L3CLKEN (1<<3)
#define SKPCR_SCLKEN (1<<4)
#define SKPCR_PMCLKEN (1<<5)
#define SKPCR_PTCLKEN (1<<6)
#define SKPCR_DCLKEN (1<<7)
#define SKPCR_PWMCLKEN (1<<8)
/* USB Host controller */
#define SA1111_USB 0x0400
/*
* Serial Audio Controller
*
* Registers
* SACR0 Serial Audio Common Control Register
* SACR1 Serial Audio Alternate Mode (I2C/MSB) Control Register
* SACR2 Serial Audio AC-link Control Register
* SASR0 Serial Audio I2S/MSB Interface & FIFO Status Register
* SASR1 Serial Audio AC-link Interface & FIFO Status Register
* SASCR Serial Audio Status Clear Register
* L3_CAR L3 Control Bus Address Register
* L3_CDR L3 Control Bus Data Register
* ACCAR AC-link Command Address Register
* ACCDR AC-link Command Data Register
* ACSAR AC-link Status Address Register
* ACSDR AC-link Status Data Register
* SADTCS Serial Audio DMA Transmit Control/Status Register
* SADTSA Serial Audio DMA Transmit Buffer Start Address A
* SADTCA Serial Audio DMA Transmit Buffer Count Register A
* SADTSB Serial Audio DMA Transmit Buffer Start Address B
* SADTCB Serial Audio DMA Transmit Buffer Count Register B
* SADRCS Serial Audio DMA Receive Control/Status Register
* SADRSA Serial Audio DMA Receive Buffer Start Address A
* SADRCA Serial Audio DMA Receive Buffer Count Register A
* SADRSB Serial Audio DMA Receive Buffer Start Address B
* SADRCB Serial Audio DMA Receive Buffer Count Register B
* SAITR Serial Audio Interrupt Test Register
* SADR Serial Audio Data Register (16 x 32-bit)
*/
#define SA1111_SERAUDIO 0x0600
/*
* These are offsets from the above base.
*/
#define SA1111_SACR0 0x00
#define SA1111_SACR1 0x04
#define SA1111_SACR2 0x08
#define SA1111_SASR0 0x0c
#define SA1111_SASR1 0x10
#define SA1111_SASCR 0x18
#define SA1111_L3_CAR 0x1c
#define SA1111_L3_CDR 0x20
#define SA1111_ACCAR 0x24
#define SA1111_ACCDR 0x28
#define SA1111_ACSAR 0x2c
#define SA1111_ACSDR 0x30
#define SA1111_SADTCS 0x34
#define SA1111_SADTSA 0x38
#define SA1111_SADTCA 0x3c
#define SA1111_SADTSB 0x40
#define SA1111_SADTCB 0x44
#define SA1111_SADRCS 0x48
#define SA1111_SADRSA 0x4c
#define SA1111_SADRCA 0x50
#define SA1111_SADRSB 0x54
#define SA1111_SADRCB 0x58
#define SA1111_SAITR 0x5c
#define SA1111_SADR 0x80
#ifndef CONFIG_ARCH_PXA
#define SACR0_ENB (1<<0)
#define SACR0_BCKD (1<<2)
#define SACR0_RST (1<<3)
#define SACR1_AMSL (1<<0)
#define SACR1_L3EN (1<<1)
#define SACR1_L3MB (1<<2)
#define SACR1_DREC (1<<3)
#define SACR1_DRPL (1<<4)
#define SACR1_ENLBF (1<<5)
#define SACR2_TS3V (1<<0)
#define SACR2_TS4V (1<<1)
#define SACR2_WKUP (1<<2)
#define SACR2_DREC (1<<3)
#define SACR2_DRPL (1<<4)
#define SACR2_ENLBF (1<<5)
#define SACR2_RESET (1<<6)
#define SASR0_TNF (1<<0)
#define SASR0_RNE (1<<1)
#define SASR0_BSY (1<<2)
#define SASR0_TFS (1<<3)
#define SASR0_RFS (1<<4)
#define SASR0_TUR (1<<5)
#define SASR0_ROR (1<<6)
#define SASR0_L3WD (1<<16)
#define SASR0_L3RD (1<<17)
#define SASR1_TNF (1<<0)
#define SASR1_RNE (1<<1)
#define SASR1_BSY (1<<2)
#define SASR1_TFS (1<<3)
#define SASR1_RFS (1<<4)
#define SASR1_TUR (1<<5)
#define SASR1_ROR (1<<6)
#define SASR1_CADT (1<<16)
#define SASR1_SADR (1<<17)
#define SASR1_RSTO (1<<18)
#define SASR1_CLPM (1<<19)
#define SASR1_CRDY (1<<20)
#define SASR1_RS3V (1<<21)
#define SASR1_RS4V (1<<22)
#define SASCR_TUR (1<<5)
#define SASCR_ROR (1<<6)
#define SASCR_DTS (1<<16)
#define SASCR_RDD (1<<17)
#define SASCR_STO (1<<18)
#define SADTCS_TDEN (1<<0)
#define SADTCS_TDIE (1<<1)
#define SADTCS_TDBDA (1<<3)
#define SADTCS_TDSTA (1<<4)
#define SADTCS_TDBDB (1<<5)
#define SADTCS_TDSTB (1<<6)
#define SADTCS_TBIU (1<<7)
#define SADRCS_RDEN (1<<0)
#define SADRCS_RDIE (1<<1)
#define SADRCS_RDBDA (1<<3)
#define SADRCS_RDSTA (1<<4)
#define SADRCS_RDBDB (1<<5)
#define SADRCS_RDSTB (1<<6)
#define SADRCS_RBIU (1<<7)
#define SAD_CS_DEN (1<<0)
#define SAD_CS_DIE (1<<1) /* Not functional on metal 1 */
#define SAD_CS_DBDA (1<<3) /* Not functional on metal 1 */
#define SAD_CS_DSTA (1<<4)
#define SAD_CS_DBDB (1<<5) /* Not functional on metal 1 */
#define SAD_CS_DSTB (1<<6)
#define SAD_CS_BIU (1<<7) /* Not functional on metal 1 */
#define SAITR_TFS (1<<0)
#define SAITR_RFS (1<<1)
#define SAITR_TUR (1<<2)
#define SAITR_ROR (1<<3)
#define SAITR_CADT (1<<4)
#define SAITR_SADR (1<<5)
#define SAITR_RSTO (1<<6)
#define SAITR_TDBDA (1<<8)
#define SAITR_TDBDB (1<<9)
#define SAITR_RDBDA (1<<10)
#define SAITR_RDBDB (1<<11)
#endif /* !CONFIG_ARCH_PXA */
/*
* General-Purpose I/O Interface
*
* Registers
* PA_DDR GPIO Block A Data Direction
* PA_DRR/PA_DWR GPIO Block A Data Value Register (read/write)
* PA_SDR GPIO Block A Sleep Direction
* PA_SSR GPIO Block A Sleep State
* PB_DDR GPIO Block B Data Direction
* PB_DRR/PB_DWR GPIO Block B Data Value Register (read/write)
* PB_SDR GPIO Block B Sleep Direction
* PB_SSR GPIO Block B Sleep State
* PC_DDR GPIO Block C Data Direction
* PC_DRR/PC_DWR GPIO Block C Data Value Register (read/write)
* PC_SDR GPIO Block C Sleep Direction
* PC_SSR GPIO Block C Sleep State
*/
#define SA1111_GPIO 0x1000
#define SA1111_GPIO_PADDR (0x000)
#define SA1111_GPIO_PADRR (0x004)
#define SA1111_GPIO_PADWR (0x004)
#define SA1111_GPIO_PASDR (0x008)
#define SA1111_GPIO_PASSR (0x00c)
#define SA1111_GPIO_PBDDR (0x010)
#define SA1111_GPIO_PBDRR (0x014)
#define SA1111_GPIO_PBDWR (0x014)
#define SA1111_GPIO_PBSDR (0x018)
#define SA1111_GPIO_PBSSR (0x01c)
#define SA1111_GPIO_PCDDR (0x020)
#define SA1111_GPIO_PCDRR (0x024)
#define SA1111_GPIO_PCDWR (0x024)
#define SA1111_GPIO_PCSDR (0x028)
#define SA1111_GPIO_PCSSR (0x02c)
#define GPIO_A0 (1 << 0)
#define GPIO_A1 (1 << 1)
#define GPIO_A2 (1 << 2)
#define GPIO_A3 (1 << 3)
#define GPIO_B0 (1 << 8)
#define GPIO_B1 (1 << 9)
#define GPIO_B2 (1 << 10)
#define GPIO_B3 (1 << 11)
#define GPIO_B4 (1 << 12)
#define GPIO_B5 (1 << 13)
#define GPIO_B6 (1 << 14)
#define GPIO_B7 (1 << 15)
#define GPIO_C0 (1 << 16)
#define GPIO_C1 (1 << 17)
#define GPIO_C2 (1 << 18)
#define GPIO_C3 (1 << 19)
#define GPIO_C4 (1 << 20)
#define GPIO_C5 (1 << 21)
#define GPIO_C6 (1 << 22)
#define GPIO_C7 (1 << 23)
/*
* Interrupt Controller
*
* Registers
* INTTEST0 Test register 0
* INTTEST1 Test register 1
* INTEN0 Interrupt Enable register 0
* INTEN1 Interrupt Enable register 1
* INTPOL0 Interrupt Polarity selection 0
* INTPOL1 Interrupt Polarity selection 1
* INTTSTSEL Interrupt source selection
* INTSTATCLR0 Interrupt Status/Clear 0
* INTSTATCLR1 Interrupt Status/Clear 1
* INTSET0 Interrupt source set 0
* INTSET1 Interrupt source set 1
* WAKE_EN0 Wake-up source enable 0
* WAKE_EN1 Wake-up source enable 1
* WAKE_POL0 Wake-up polarity selection 0
* WAKE_POL1 Wake-up polarity selection 1
*/
#define SA1111_INTC 0x1600
/*
* These are offsets from the above base.
*/
#define SA1111_INTTEST0 0x0000
#define SA1111_INTTEST1 0x0004
#define SA1111_INTEN0 0x0008
#define SA1111_INTEN1 0x000c
#define SA1111_INTPOL0 0x0010
#define SA1111_INTPOL1 0x0014
#define SA1111_INTTSTSEL 0x0018
#define SA1111_INTSTATCLR0 0x001c
#define SA1111_INTSTATCLR1 0x0020
#define SA1111_INTSET0 0x0024
#define SA1111_INTSET1 0x0028
#define SA1111_WAKEEN0 0x002c
#define SA1111_WAKEEN1 0x0030
#define SA1111_WAKEPOL0 0x0034
#define SA1111_WAKEPOL1 0x0038
/* PS/2 Trackpad and Mouse Interfaces */
#define SA1111_KBD 0x0a00
#define SA1111_MSE 0x0c00
/* PCMCIA Interface */
#define SA1111_PCMCIA 0x1600
extern struct bus_type sa1111_bus_type;
#define SA1111_DEVID_SBI (1 << 0)
#define SA1111_DEVID_SK (1 << 1)
#define SA1111_DEVID_USB (1 << 2)
#define SA1111_DEVID_SAC (1 << 3)
#define SA1111_DEVID_SSP (1 << 4)
#define SA1111_DEVID_PS2 (3 << 5)
#define SA1111_DEVID_PS2_KBD (1 << 5)
#define SA1111_DEVID_PS2_MSE (1 << 6)
#define SA1111_DEVID_GPIO (1 << 7)
#define SA1111_DEVID_INT (1 << 8)
#define SA1111_DEVID_PCMCIA (1 << 9)
struct sa1111_dev {
struct device dev;
unsigned int devid;
struct resource res;
void __iomem *mapbase;
unsigned int skpcr_mask;
unsigned int irq[6];
u64 dma_mask;
};
#define SA1111_DEV(_d) container_of((_d), struct sa1111_dev, dev)
#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
struct sa1111_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct sa1111_dev *);
int (*remove)(struct sa1111_dev *);
int (*suspend)(struct sa1111_dev *, pm_message_t);
int (*resume)(struct sa1111_dev *);
void (*shutdown)(struct sa1111_dev *);
};
#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
/*
* These frob the SKPCR register, and call platform specific
* enable/disable functions.
*/
int sa1111_enable_device(struct sa1111_dev *);
void sa1111_disable_device(struct sa1111_dev *);
unsigned int sa1111_pll_clock(struct sa1111_dev *);
#define SA1111_AUDIO_ACLINK 0
#define SA1111_AUDIO_I2S 1
void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode);
int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate);
int sa1111_get_audio_rate(struct sa1111_dev *sadev);
int sa1111_check_dma_bug(dma_addr_t addr);
int sa1111_driver_register(struct sa1111_driver *);
void sa1111_driver_unregister(struct sa1111_driver *);
void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir);
void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
struct sa1111_platform_data {
int irq_base; /* base for cascaded on-chip IRQs */
unsigned disable_devs;
void *data;
int (*enable)(void *, unsigned);
void (*disable)(void *, unsigned);
};
#endif /* _ASM_ARCH_SA1111 */

View file

@ -0,0 +1,71 @@
/*
* Definitions for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define SCOOP_MCR 0x00
#define SCOOP_CDR 0x04
#define SCOOP_CSR 0x08
#define SCOOP_CPR 0x0C
#define SCOOP_CCR 0x10
#define SCOOP_IRR 0x14
#define SCOOP_IRM 0x14
#define SCOOP_IMR 0x18
#define SCOOP_ISR 0x1C
#define SCOOP_GPCR 0x20
#define SCOOP_GPWR 0x24
#define SCOOP_GPRR 0x28
#define SCOOP_CPR_OUT (1 << 7)
#define SCOOP_CPR_SD_3V (1 << 2)
#define SCOOP_CPR_CF_XV (1 << 1)
#define SCOOP_CPR_CF_3V (1 << 0)
#define SCOOP_GPCR_PA22 (1 << 12)
#define SCOOP_GPCR_PA21 (1 << 11)
#define SCOOP_GPCR_PA20 (1 << 10)
#define SCOOP_GPCR_PA19 (1 << 9)
#define SCOOP_GPCR_PA18 (1 << 8)
#define SCOOP_GPCR_PA17 (1 << 7)
#define SCOOP_GPCR_PA16 (1 << 6)
#define SCOOP_GPCR_PA15 (1 << 5)
#define SCOOP_GPCR_PA14 (1 << 4)
#define SCOOP_GPCR_PA13 (1 << 3)
#define SCOOP_GPCR_PA12 (1 << 2)
#define SCOOP_GPCR_PA11 (1 << 1)
struct scoop_config {
unsigned short io_out;
unsigned short io_dir;
unsigned short suspend_clr;
unsigned short suspend_set;
int gpio_base;
};
/* Structure for linking scoop devices to PCMCIA sockets */
struct scoop_pcmcia_dev {
struct device *dev; /* Pointer to this socket's scoop device */
int irq; /* irq for socket */
int cd_irq;
const char *cd_irq_str;
unsigned char keep_vs;
unsigned char keep_rd;
};
struct scoop_pcmcia_config {
struct scoop_pcmcia_dev *devs;
int num_devs;
void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);
};
extern struct scoop_pcmcia_config *platform_scoop_config;
void reset_scoop(struct device *dev);
unsigned short read_scoop_reg(struct device *dev, unsigned short reg);
void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data);

View file

@ -0,0 +1,28 @@
/*
* ssp.h
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef SSP_H
#define SSP_H
struct ssp_state {
unsigned int cr0;
unsigned int cr1;
};
int ssp_write_word(u16 data);
int ssp_read_word(u16 *data);
int ssp_flush(void);
void ssp_enable(void);
void ssp_disable(void);
void ssp_save_state(struct ssp_state *ssp);
void ssp_restore_state(struct ssp_state *ssp);
int ssp_init(void);
void ssp_exit(void);
#endif

View file

@ -0,0 +1,23 @@
struct clk;
void __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
void __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)
{
__sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
}
static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name)
{
__sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
}
static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
{
__sp804_clockevents_init(base, irq, NULL, name);
}

View file

@ -0,0 +1,74 @@
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#include <asm/kmap_types.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define kmap_prot PAGE_KERNEL
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
flush_cache_all(); \
} while (0)
extern pte_t *pkmap_page_table;
extern pte_t *fixmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
* existing virtual mapping in an atomic context. With a VIVT cache this
* is essential to do, but with a VIPT cache this is only an optimization
* so not to pay the price of establishing a second mapping if an existing
* one can be used. However, on platforms without hardware TLB maintenance
* broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
* the locking involved must also disable IRQs which is incompatible with
* the IPI mechanism used by global TLB operations.
*/
#define ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
#undef ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
#error "The sum of features in your kernel config cannot be supported together"
#endif
#endif
/*
* Needed to be able to broadcast the TLB invalidation for kmap.
*/
#ifdef CONFIG_ARM_ERRATA_798181
#undef ARCH_NEEDS_KMAP_HIGH_GET
#endif
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
static inline void *kmap_high_get(struct page *page)
{
return NULL;
}
#endif
/*
* The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
#endif

View file

@ -0,0 +1,71 @@
/*
* arch/arm/include/asm/hugetlb-3level.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
#define _ASM_ARM_HUGETLB_3LEVEL_H
/*
* If our huge pte is non-zero then mark the valid bit.
* This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
* ptes.
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
*/
static inline pte_t huge_ptep_get(pte_t *ptep)
{
pte_t retval = *ptep;
if (pte_val(retval))
pte_val(retval) |= L_PTE_VALID;
return retval;
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
ptep_clear_flush(vma, addr, ptep);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */

View file

@ -0,0 +1,84 @@
/*
* arch/arm/include/asm/hugetlb.h
*
* Copyright (C) 2012 ARM Ltd.
*
* Based on arch/x86/include/asm/hugetlb.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_ARM_HUGETLB_H
#define _ASM_ARM_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
#include <asm/hugetlb-3level.h>
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr, unsigned long len)
{
return 0;
}
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
clear_bit(PG_dcache_clean, &page->flags);
}
#endif /* _ASM_ARM_HUGETLB_H */

View file

@ -0,0 +1,139 @@
#ifndef _ARM_HW_BREAKPOINT_H
#define _ARM_HW_BREAKPOINT_H
#ifdef __KERNEL__
struct task_struct;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct arch_hw_breakpoint_ctrl {
u32 __reserved : 9,
mismatch : 1,
: 9,
len : 8,
type : 2,
privilege : 2,
enabled : 1;
};
struct arch_hw_breakpoint {
u32 address;
u32 trigger;
struct arch_hw_breakpoint_ctrl step_ctrl;
struct arch_hw_breakpoint_ctrl ctrl;
};
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
{
return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) |
(ctrl.privilege << 1) | ctrl.enabled;
}
static inline void decode_ctrl_reg(u32 reg,
struct arch_hw_breakpoint_ctrl *ctrl)
{
ctrl->enabled = reg & 0x1;
reg >>= 1;
ctrl->privilege = reg & 0x3;
reg >>= 2;
ctrl->type = reg & 0x3;
reg >>= 2;
ctrl->len = reg & 0xff;
reg >>= 17;
ctrl->mismatch = reg & 0x1;
}
/* Debug architecture numbers. */
#define ARM_DEBUG_ARCH_RESERVED 0 /* In case of ptrace ABI updates. */
#define ARM_DEBUG_ARCH_V6 1
#define ARM_DEBUG_ARCH_V6_1 2
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
/* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2
#define ARM_FSR_ACCESS_MASK (1 << 11)
/* Privilege Levels */
#define ARM_BREAKPOINT_PRIV 1
#define ARM_BREAKPOINT_USER 2
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
#define ARM_BREAKPOINT_LEN_2 0x3
#define ARM_BREAKPOINT_LEN_4 0xf
#define ARM_BREAKPOINT_LEN_8 0xff
/* Limits */
#define ARM_MAX_BRP 16
#define ARM_MAX_WRP 16
#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
/* DSCR method of entry bits. */
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */
#define ARM_DSCR_HDBGEN (1 << 14)
#define ARM_DSCR_MDBGEN (1 << 15)
/* OSLSR os lock model bits */
#define ARM_OSLSR_OSLM0 (1 << 0)
/* opcode2 numbers for the co-processor instructions. */
#define ARM_OP2_BVR 4
#define ARM_OP2_BCR 5
#define ARM_OP2_WVR 6
#define ARM_OP2_WCR 7
/* Base register numbers for the debug registers. */
#define ARM_BASE_BVR 64
#define ARM_BASE_BCR 80
#define ARM_BASE_WVR 96
#define ARM_BASE_WCR 112
/* Accessor macros for the debug registers. */
#define ARM_DBG_READ(N, M, OP2, VAL) do {\
asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
} while (0)
#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
} while (0)
struct notifier_block;
struct perf_event;
struct pmu;
extern struct pmu perf_ops_bp;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
extern u8 arch_get_debug_arch(void);
extern u8 arch_get_max_wp_len(void);
extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
int hw_breakpoint_slots(int type);
#else
static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
#endif /* _ARM_HW_BREAKPOINT_H */

View file

@ -0,0 +1,21 @@
/*
* Nothing to see here yet
*/
#ifndef _ARCH_ARM_HW_IRQ_H
#define _ARCH_ARM_HW_IRQ_H
static inline void ack_bad_irq(int irq)
{
extern unsigned long irq_err_count;
irq_err_count++;
}
void set_irq_flags(unsigned int irq, unsigned int flags);
#define IRQF_VALID (1 << 0)
#define IRQF_PROBE (1 << 1)
#define IRQF_NOAUTOEN (1 << 2)
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#endif

View file

@ -0,0 +1,15 @@
#ifndef __ASMARM_HWCAP_H
#define __ASMARM_HWCAP_H
#include <uapi/asm/hwcap.h>
#if !defined(__ASSEMBLY__)
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
#define ELF_HWCAP2 (elf_hwcap2)
extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif

View file

@ -0,0 +1,6 @@
#ifndef _ASM_ARM_HYPERVISOR_H
#define _ASM_ARM_HYPERVISOR_H
#include <asm/xen/hypervisor.h>
#endif

View file

@ -0,0 +1,23 @@
/*
* arch/arm/include/asm/ide.h
*
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
/*
* This file contains the ARM architecture specific IDE code.
*/
#ifndef __ASMARM_IDE_H
#define __ASMARM_IDE_H
#ifdef __KERNEL__
#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len)
#endif /* __KERNEL__ */
#endif /* __ASMARM_IDE_H */

View file

@ -0,0 +1,14 @@
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
#include <asm/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
void setup_mm_for_reboot(void);
#endif /* __ASM_IDMAP_H */

423
arch/arm/include/asm/io.h Normal file
View file

@ -0,0 +1,423 @@
/*
* arch/arm/include/asm/io.h
*
* Copyright (C) 1996-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Modifications:
* 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
* constant addresses and variable addresses.
* 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture
* specific IO header files.
* 27-Mar-1999 PJB Second parameter of memcpy_toio is const..
* 04-Apr-1999 PJB Added check_signature.
* 12-Dec-1999 RMK More cleanups
* 18-Jun-2000 RMK Removed virt_to_* and friends definitions
* 05-Oct-2004 BJD Moved memory string functions to use void __iomem
*/
#ifndef __ASM_ARM_IO_H
#define __ASM_ARM_IO_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* Atomic MMIO-wide IO modify
*/
extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
/*
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#if __LINUX_ARM_ARCH__ < 6
/*
* Half-word accesses are problematic with RiscPC due to limitations of
* the bus. Rather than special-case the machine, just let the compiler
* generate the access for CPUs prior to ARMv6.
*/
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
#else
/*
* When running under a hypervisor, we want to avoid I/O accesses with
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: "+Q" (*(volatile u16 __force *)addr)
: "r" (val));
}
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %1, %0"
: "+Q" (*(volatile u16 __force *)addr),
"=r" (val));
return val;
}
#endif
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
: "+Qo" (*(volatile u8 __force *)addr)
: "r" (val));
}
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
: "+Qo" (*(volatile u32 __force *)addr)
: "r" (val));
}
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("ldrb %1, %0"
: "+Qo" (*(volatile u8 __force *)addr),
"=r" (val));
return val;
}
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("ldr %1, %0"
: "+Qo" (*(volatile u32 __force *)addr),
"=r" (val));
return val;
}
/*
* Architecture ioremap implementation.
*/
#define MT_DEVICE 0
#define MT_DEVICE_NONSHARED 1
#define MT_DEVICE_CACHED 2
#define MT_DEVICE_WC 3
/*
* types 4 onwards can be found in asm/mach/map.h and are undefined
* for ioremap
*/
/*
* __arm_ioremap takes CPU physical address.
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
*/
extern void __readwrite_bug(const char *fn);
/*
* A typesafe __io() helper
*/
static inline void __iomem *__typesafe_io(unsigned long addr)
{
return (void __iomem *)addr;
}
#define IOMEM(x) ((void __force __iomem *)(x))
/* IO barriers */
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#include <asm/barrier.h>
#define __iormb() rmb()
#define __iowmb() wmb()
#else
#define __iormb() do { } while (0)
#define __iowmb() do { } while (0)
#endif
/* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000
#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
#if defined(CONFIG_PCI)
void pci_ioremap_set_mem_type(int mem_type);
#else
static inline void pci_ioremap_set_mem_type(int mem_type) {}
#endif
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
#elif defined(CONFIG_PCI)
#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
#endif
/*
* This is the limit of PC card/PCI/ISA IO space, which is by default
* 64K if we have PC card, PCI or ISA support. Otherwise, default to
* zero to prevent ISA/PCI drivers claiming IO space (and potentially
* oopsing.)
*
* Only set this larger if you really need inb() et.al. to operate over
* a larger address space. Note that SOC_COMMON ioremaps each sockets
* IO space area, and so inb() et.al. must be defined to operate as per
* readb() et.al. on such platforms.
*/
#ifndef IO_SPACE_LIMIT
#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
#endif
/*
* IO port access primitives
* -------------------------
*
* The ARM doesn't have special IO access instructions; all IO is memory
* mapped. Note that these are defined to perform little endian accesses
* only. Their primary purpose is to access PCI and ISA peripherals.
*
* Note that for a big endian machine, this implies that the following
* big endian mode connectivity is in place, as described by numerous
* ARM documents:
*
* PCI: D0-D7 D8-D15 D16-D23 D24-D31
* ARM: D24-D31 D16-D23 D8-D15 D0-D7
*
* The machine specific io.h include defines __io to translate an "IO"
* address to a memory address.
*
* Note that we prevent GCC re-ordering or caching values in expressions
* by introducing sequence points into the in*() definitions. Note that
* __raw_* do not guarantee this behaviour.
*
* The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
*/
#ifdef __io
#define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); })
#define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \
cpu_to_le16(v),__io(p)); })
#define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \
cpu_to_le32(v),__io(p)); })
#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
__raw_readw(__io(p))); __iormb(); __v; })
#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
__raw_readl(__io(p))); __iormb(); __v; })
#define outsb(p,d,l) __raw_writesb(__io(p),d,l)
#define outsw(p,d,l) __raw_writesw(__io(p),d,l)
#define outsl(p,d,l) __raw_writesl(__io(p),d,l)
#define insb(p,d,l) __raw_readsb(__io(p),d,l)
#define insw(p,d,l) __raw_readsw(__io(p),d,l)
#define insl(p,d,l) __raw_readsl(__io(p),d,l)
#endif
#define outb_p(val,port) outb((val),(port))
#define outw_p(val,port) outw((val),(port))
#define outl_p(val,port) outl((val),(port))
#define inb_p(port) inb((port))
#define inw_p(port) inw((port))
#define inl_p(port) inl((port))
#define outsb_p(port,from,len) outsb(port,from,len)
#define outsw_p(port,from,len) outsw(port,from,len)
#define outsl_p(port,from,len) outsl(port,from,len)
#define insb_p(port,to,len) insb(port,to,len)
#define insw_p(port,to,len) insw(port,to,len)
#define insl_p(port,to,len) insl(port,to,len)
/*
* String version of IO memory access ops:
*/
extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
extern void _memset_io(volatile void __iomem *, int, size_t);
#define mmiowb()
/*
* Memory access primitives
* ------------------------
*
* These perform PCI memory accesses via an ioremap region. They don't
* take an address as such, but a cookie.
*
* Again, this are defined to perform little endian accesses. See the
* IO port primitives for more information.
*/
#ifndef readl
#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
#define readsb(p,d,l) __raw_readsb(p,d,l)
#define readsw(p,d,l) __raw_readsw(p,d,l)
#define readsl(p,d,l) __raw_readsl(p,d,l)
#define writesb(p,d,l) __raw_writesb(p,d,l)
#define writesw(p,d,l) __raw_writesw(p,d,l)
#define writesl(p,d,l) __raw_writesl(p,d,l)
#define memset_io(c,v,l) _memset_io(c,(v),(l))
#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
#endif /* readl */
/*
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
* Documentation/io-mapping.txt.
*
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap __arm_iounmap
/*
* io{read,write}{8,16,32} macros
*/
#ifndef ioread8
#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c)
#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c)
#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c)
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *addr);
#endif
struct pci_dev;
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
/*
* can the hardware map this into one segment or not, given no other
* constraints.
*/
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
struct bio_vec;
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2);
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
(!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
#endif
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
/*
* Register ISA memory and port locations for glibc iopl/inb/outb
* emulation.
*/
extern void register_isa_ports(unsigned int mmio, unsigned int io,
unsigned int io_shift);
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_IO_H */

View file

@ -0,0 +1,44 @@
#ifndef __ASM_ARM_IRQ_H
#define __ASM_ARM_IRQ_H
#define NR_IRQS_LEGACY 16
#ifndef CONFIG_SPARSE_IRQ
#include <mach/irqs.h>
#else
#define NR_IRQS NR_IRQS_LEGACY
#endif
#ifndef irq_canonicalize
#define irq_canonicalize(i) (i)
#endif
/*
* Use this value to indicate lack of interrupt
* capability
*/
#ifndef NO_IRQ
#define NO_IRQ ((unsigned int)(-1))
#endif
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
extern void (*handle_arch_irq)(struct pt_regs *);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
#endif
#endif

View file

@ -0,0 +1,11 @@
#ifndef __ASM_ARM_IRQ_WORK_H
#define __ASM_ARM_IRQ_WORK_H
#include <asm/smp_plat.h>
static inline bool arch_irq_work_has_interrupt(void)
{
return is_smp();
}
#endif /* _ASM_ARM_IRQ_WORK_H */

View file

@ -0,0 +1,165 @@
#ifndef __ASM_ARM_IRQFLAGS_H
#define __ASM_ARM_IRQFLAGS_H
#ifdef __KERNEL__
#include <asm/ptrace.h>
/*
* CPU interrupt mask handling.
*/
#ifdef CONFIG_CPU_V7M
#define IRQMASK_REG_NAME_R "primask"
#define IRQMASK_REG_NAME_W "primask"
#define IRQMASK_I_BIT 1
#else
#define IRQMASK_REG_NAME_R "cpsr"
#define IRQMASK_REG_NAME_W "cpsr_c"
#define IRQMASK_I_BIT PSR_I_BIT
#endif
#if __LINUX_ARM_ARCH__ >= 6
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile(
" mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n"
" cpsid i"
: "=r" (flags) : : "memory", "cc");
return flags;
}
static inline void arch_local_irq_enable(void)
{
asm volatile(
" cpsie i @ arch_local_irq_enable"
:
:
: "memory", "cc");
}
static inline void arch_local_irq_disable(void)
{
asm volatile(
" cpsid i @ arch_local_irq_disable"
:
:
: "memory", "cc");
}
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#else
/*
* Save the current interrupt enable state & disable IRQs
*/
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags, temp;
asm volatile(
" mrs %0, cpsr @ arch_local_irq_save\n"
" orr %1, %0, #128\n"
" msr cpsr_c, %1"
: "=r" (flags), "=r" (temp)
:
: "memory", "cc");
return flags;
}
/*
* Enable IRQs
*/
static inline void arch_local_irq_enable(void)
{
unsigned long temp;
asm volatile(
" mrs %0, cpsr @ arch_local_irq_enable\n"
" bic %0, %0, #128\n"
" msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
}
/*
* Disable IRQs
*/
static inline void arch_local_irq_disable(void)
{
unsigned long temp;
asm volatile(
" mrs %0, cpsr @ arch_local_irq_disable\n"
" orr %0, %0, #128\n"
" msr cpsr_c, %0"
: "=r" (temp)
:
: "memory", "cc");
}
/*
* Enable FIQs
*/
#define local_fiq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ stf\n" \
" bic %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable FIQs
*/
#define local_fiq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ clf\n" \
" orr %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
#endif
/*
* Save the current interrupt enable state.
*/
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile(
" mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags"
: "=r" (flags) : : "memory", "cc");
return flags;
}
/*
* restore saved IRQ & FIQ state
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(
" msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore"
:
: "r" (flags)
: "memory", "cc");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags & IRQMASK_I_BIT;
}
#endif /* ifdef __KERNEL__ */
#endif /* ifndef __ASM_ARM_IRQFLAGS_H */

View file

@ -0,0 +1,40 @@
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H
#ifdef __KERNEL__
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 4
#ifdef CONFIG_THUMB2_KERNEL
#define JUMP_LABEL_NOP "nop.w"
#else
#define JUMP_LABEL_NOP "nop"
#endif
static __always_inline bool arch_static_branch(struct static_key *key)
{
asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i" (key) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __KERNEL__ */
typedef u32 jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif

View file

@ -0,0 +1,60 @@
#ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H
#ifdef CONFIG_KEXEC
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
#define KEXEC_CONTROL_PAGE_SIZE 4096
#define KEXEC_ARCH KEXEC_ARCH_ARM
#define KEXEC_ARM_ATAGS_OFFSET 0x1000
#define KEXEC_ARM_ZIMAGE_OFFSET 0x8000
#ifndef __ASSEMBLY__
/**
* crash_setup_regs() - save registers for the panic kernel
* @newregs: registers are saved here
* @oldregs: registers to be saved (may be %NULL)
*
* Function copies machine registers from @oldregs to @newregs. If @oldregs is
* %NULL then current registers are stored there.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs) {
memcpy(newregs, oldregs, sizeof(*newregs));
} else {
__asm__ __volatile__ (
"stmia %[regs_base], {r0-r12}\n\t"
"mov %[_ARM_sp], sp\n\t"
"str lr, %[_ARM_lr]\n\t"
"adr %[_ARM_pc], 1f\n\t"
"mrs %[_ARM_cpsr], cpsr\n\t"
"1:"
: [_ARM_pc] "=r" (newregs->ARM_pc),
[_ARM_cpsr] "=r" (newregs->ARM_cpsr),
[_ARM_sp] "=r" (newregs->ARM_sp),
[_ARM_lr] "=o" (newregs->ARM_lr)
: [regs_base] "r" (&newregs->ARM_r0)
: "memory"
);
}
}
/* Function pointer to optional machine-specific reinitialization */
extern void (*kexec_reinit)(void);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
#endif /* _ARM_KEXEC_H */

106
arch/arm/include/asm/kgdb.h Normal file
View file

@ -0,0 +1,106 @@
/*
* ARM KGDB support
*
* Author: Deepak Saxena <dsaxena@mvista.com>
*
* Copyright (C) 2002 MontaVista Software Inc.
*
*/
#ifndef __ARM_KGDB_H__
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
* it will send us an SWI command to write into memory as the
* debug trap. When an SWI occurs, the next instruction addr is
* placed into R14_svc before jumping to the vector trap.
* This doesn't work for kernel debugging as we are already in SVC
* we would loose the kernel's LR, which is a bad thing. This
* is bad thing.
*
* By doing this as an undefined instruction trap, we force a mode
* switch from SVC to UND mode, allowing us to save full kernel state.
*
* We also define a KGDB_COMPILED_BREAK which can be used to compile
* in breakpoints. This is important for things like sysrq-G and for
* the initial breakpoint from trap_init().
*
* Note to ARM HW designers: Add real trap support like SH && PPC to
* make our lives much much simpler. :)
*/
#define BREAK_INSTR_SIZE 4
#define GDB_BREAKINST 0xef9f0001
#define KGDB_BREAKINST 0xe7ffdefe
#define KGDB_COMPILED_BREAK 0xe7ffdeff
#define CACHE_FLUSH_IS_SAFE 1
#ifndef __ASSEMBLY__
static inline void arch_kgdb_breakpoint(void)
{
asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
extern int kgdb_fault_expected;
#endif /* !__ASSEMBLY__ */
/*
* From Kevin Hilman:
*
* gdb is expecting the following registers layout.
*
* r0-r15: 1 long word each
* f0-f7: unused, 3 long words each !!
* fps: unused, 1 long word
* cpsr: 1 long word
*
* Even though f0-f7 and fps are not used, they need to be
* present in the registers sent for correct processing in
* the host-side gdb.
*
* In particular, it is crucial that CPSR is in the right place,
* otherwise gdb will not be able to correctly interpret stepping over
* conditional branches.
*/
#define _GP_REGS 16
#define _FP_REGS 8
#define _EXTRA_REGS 2
#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0
#define _R1 1
#define _R2 2
#define _R3 3
#define _R4 4
#define _R5 5
#define _R6 6
#define _R7 7
#define _R8 8
#define _R9 9
#define _R10 10
#define _FP 11
#define _IP 12
#define _SPT 13
#define _LR 14
#define _PC 15
#define _CPSR (GDB_MAX_REGS - 1)
/*
* So that we can denote the end of a frame for tracing,
* in the simple case:
*/
#define CFI_END_FRAME(func) __CFI_END_FRAME(_PC, _SPT, func)
#endif /* __ASM_KGDB_H__ */

View file

@ -0,0 +1,9 @@
#ifndef __ARM_KMAP_TYPES_H
#define __ARM_KMAP_TYPES_H
/*
* This is the "bare minimum". AIO seems to require this.
*/
#define KM_TYPE_NR 16
#endif

View file

@ -0,0 +1,55 @@
/*
* arch/arm/include/asm/kprobes.h
*
* Copyright (C) 2006, 2007 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _ARM_KPROBES_H
#define _ARM_KPROBES_H
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
typedef u32 kprobe_opcode_t;
struct kprobe;
#include <asm/probes.h>
#define arch_specific_insn arch_probes_insn
struct prev_kprobe {
struct kprobe *kp;
unsigned int status;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
struct pt_regs jprobe_saved_regs;
char jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#endif /* _ARM_KPROBES_H */

View file

@ -0,0 +1,220 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__
#include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */
#define HCR_TGE (1 << 27)
#define HCR_TVM (1 << 26)
#define HCR_TTLB (1 << 25)
#define HCR_TPU (1 << 24)
#define HCR_TPC (1 << 23)
#define HCR_TSW (1 << 22)
#define HCR_TAC (1 << 21)
#define HCR_TIDCP (1 << 20)
#define HCR_TSC (1 << 19)
#define HCR_TID3 (1 << 18)
#define HCR_TID2 (1 << 17)
#define HCR_TID1 (1 << 16)
#define HCR_TID0 (1 << 15)
#define HCR_TWE (1 << 14)
#define HCR_TWI (1 << 13)
#define HCR_DC (1 << 12)
#define HCR_BSU (3 << 10)
#define HCR_BSU_IS (1 << 10)
#define HCR_FB (1 << 9)
#define HCR_VA (1 << 8)
#define HCR_VI (1 << 7)
#define HCR_VF (1 << 6)
#define HCR_AMO (1 << 5)
#define HCR_IMO (1 << 4)
#define HCR_FMO (1 << 3)
#define HCR_PTW (1 << 2)
#define HCR_SWIO (1 << 1)
#define HCR_VM 1
/*
* The bits we set in HCR:
* TAC: Trap ACTLR
* TSC: Trap SMC
* TVM: Trap VM ops (until MMU and caches are on)
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
* TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
/* System Control Register (SCTLR) bits */
#define SCTLR_TE (1 << 30)
#define SCTLR_EE (1 << 25)
#define SCTLR_V (1 << 13)
/* Hyp System Control Register (HSCTLR) bits */
#define HSCTLR_TE (1 << 30)
#define HSCTLR_EE (1 << 25)
#define HSCTLR_FI (1 << 21)
#define HSCTLR_WXN (1 << 19)
#define HSCTLR_I (1 << 12)
#define HSCTLR_C (1 << 2)
#define HSCTLR_A (1 << 1)
#define HSCTLR_M 1
#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
/* TTBCR and HTCR Registers bits */
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
#define TTBCR_SH1 (3 << 28)
#define TTBCR_ORGN1 (3 << 26)
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
#define TTBCR_T1SZ (7 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
#define TTBCR_T0SZ (7 << 0)
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
#define HSTR_T(x) (1 << x)
#define HSTR_TTEE (1 << 16)
#define HSTR_TJDBX (1 << 17)
/* Hyp Coprocessor Trap Register */
#define HCPTR_TCP(x) (1 << x)
#define HCPTR_TCP_MASK (0x3fff)
#define HCPTR_TASE (1 << 15)
#define HCPTR_TTA (1 << 20)
#define HCPTR_TCPAC (1 << 31)
/* Hyp Debug Configuration Register bits */
#define HDCR_TDRA (1 << 11)
#define HDCR_TDOSA (1 << 10)
#define HDCR_TDA (1 << 9)
#define HDCR_TDE (1 << 8)
#define HDCR_HPME (1 << 7)
#define HDCR_TPM (1 << 6)
#define HDCR_TPMCR (1 << 5)
#define HDCR_HPMN_MASK (0x1F)
/*
* The architecture supports 40-bit IPA as input to the 2nd stage translations
* and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
* space.
*/
#define KVM_PHYS_SHIFT (40)
#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
/* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12)
#define VTCR_ORGN0 (3 << 10)
#define VTCR_IRGN0 (3 << 8)
#define VTCR_SL0 (3 << 6)
#define VTCR_S (1 << 4)
#define VTCR_T0SZ (0xf)
#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
VTCR_S | VTCR_T0SZ)
#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
#define KVM_VTCR_SL0 VTCR_SL_L1
/* stage-2 input address range defined as 2^(32-T0SZ) */
#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
/* Virtualization Translation Table Base Register (VTTBR) bits */
#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
#define VTTBR_X (14 - KVM_T0SZ)
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26)
#define HSR_EC (0x3fU << HSR_EC_SHIFT)
#define HSR_IL (1U << 25)
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
#define HSR_ISV (1U << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
#define HSR_CV (1U << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20)
#define HSR_COND (0xfU << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~0xf)
#define HSR_EC_UNKNOWN (0x00)
#define HSR_EC_WFI (0x01)
#define HSR_EC_CP15_32 (0x03)
#define HSR_EC_CP15_64 (0x04)
#define HSR_EC_CP14_MR (0x05)
#define HSR_EC_CP14_LS (0x06)
#define HSR_EC_CP_0_13 (0x07)
#define HSR_EC_CP10_ID (0x08)
#define HSR_EC_JAZELLE (0x09)
#define HSR_EC_BXJ (0x0A)
#define HSR_EC_CP14_64 (0x0C)
#define HSR_EC_SVC_HYP (0x11)
#define HSR_EC_HVC (0x12)
#define HSR_EC_SMC (0x13)
#define HSR_EC_IABT (0x20)
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
#define HSR_WFI_IS_WFE (1U << 0)
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
#define HSR_DABT_CM (1U << 8)
#define HSR_DABT_EA (1U << 9)
#endif /* __ARM_KVM_ARM_H__ */

View file

@ -0,0 +1,103 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
/* 0 is reserved as an invalid value. */
#define c0_MPIDR 1 /* MultiProcessor ID Register */
#define c0_CSSELR 2 /* Cache Size Selection Register */
#define c1_SCTLR 3 /* System Control Register */
#define c1_ACTLR 4 /* Auxilliary Control Register */
#define c1_CPACR 5 /* Coprocessor Access Control */
#define c2_TTBR0 6 /* Translation Table Base Register 0 */
#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
#define c2_TTBR1 8 /* Translation Table Base Register 1 */
#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
#define c2_TTBCR 10 /* Translation Table Base Control R. */
#define c3_DACR 11 /* Domain Access Control Register */
#define c5_DFSR 12 /* Data Fault Status Register */
#define c5_IFSR 13 /* Instruction Fault Status Register */
#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
#define c6_DFAR 16 /* Data Fault Address Register */
#define c6_IFAR 17 /* Instruction Fault Address Register */
#define c7_PAR 18 /* Physical Address Register */
#define c7_PAR_high 19 /* PAR top 32 bits */
#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
#define c10_PRRR 21 /* Primary Region Remap Register */
#define c10_NMRR 22 /* Normal Memory Remap Register */
#define c12_VBAR 23 /* Vector Base Address Register */
#define c13_CID 24 /* Context ID Register */
#define c13_TID_URW 25 /* Thread ID, User R/W */
#define c13_TID_URO 26 /* Thread ID, User R/O */
#define c13_TID_PRIV 27 /* Thread ID, Privileged */
#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
#define ARM_EXCEPTION_SOFTWARE 2
#define ARM_EXCEPTION_PREF_ABORT 3
#define ARM_EXCEPTION_DATA_ABORT 4
#define ARM_EXCEPTION_IRQ 5
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
* instructions that load/store a 64-bit value from/to memory to/from
* a pair of registers which are used with the mrrc and mcrr instructions.
* If used with the ldrd/strd instructions, the a1 parameter is the first
* source/destination register and the a2 parameter is the second
* source/destination register. Note that the ldrd/strd instructions
* already swap the bytes within the words correctly according to the
* endianness setting, but the order of the registers need to be effectively
* swapped when used with the mrrc/mcrr instructions.
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define rr_lo_hi(a1, a2) a2, a1
#else
#define rr_lo_hi(a1, a2) a1, a2
#endif
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_exit[];
extern char __kvm_hyp_exit_end[];
extern char __kvm_hyp_vector[];
extern char __kvm_hyp_code_start[];
extern char __kvm_hyp_code_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
#endif /* __ARM_KVM_ASM_H__ */

View file

@ -0,0 +1,47 @@
/*
* Copyright (C) 2012 Rusty Russell IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_COPROC_H__
#define __ARM_KVM_COPROC_H__
#include <linux/kvm_host.h>
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_coproc_target_table {
unsigned target;
const struct coproc_reg *table;
size_t num;
};
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
void kvm_coproc_table_init(void);
struct kvm_one_reg;
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
#endif /* __ARM_KVM_COPROC_H__ */

View file

@ -0,0 +1,235 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_EMULATE_H__
#define __ARM_KVM_EMULATE_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = HCR_GUEST_MASK;
}
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.regs.usr_regs.ARM_pc;
}
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
}
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
}
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
return cpsr_mode > USR_MODE;;
}
static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
}
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}
static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hyp_pc;
}
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
}
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
}
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
/* Get Access Size from a data abort */
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return -EFAULT;
}
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
}
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
}
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cp15[c0_MPIDR];
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return be16_to_cpu(data & 0xffff);
default:
return be32_to_cpu(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return le16_to_cpu(data & 0xffff);
default:
return le32_to_cpu(data);
}
}
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_be16(data & 0xffff);
default:
return cpu_to_be32(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_le16(data & 0xffff);
default:
return cpu_to_le32(data);
}
}
}
#endif /* __ARM_KVM_EMULATE_H__ */

View file

@ -0,0 +1,245 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
#define KVM_MAX_VCPUS 0
#endif
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
#define KVM_VCPU_MAX_FEATURES 2
#include <kvm/arm_vgic.h>
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_arch {
/* VTTBR value associated with below pgd and vmid */
u64 vttbr;
/* Timer */
struct arch_timer_kvm timer;
/*
* Anything that is not used directly from assembly code goes
* here.
*/
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
/* Stage-2 page table */
pgd_t *pgd;
/* Interrupt controller */
struct vgic_dist vgic;
};
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
struct kvm_vcpu_fault_info {
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
u32 hyp_pc; /* PC when exception was taken from Hyp mode */
};
typedef struct vfp_hard_struct kvm_cpu_context_t;
struct kvm_vcpu_arch {
struct kvm_regs regs;
int target; /* Processor target */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* System control coprocessor (cp15) */
u32 cp15[NR_CP15_REGS];
/* The CPU type we expose to the VM */
u32 midr;
/* HYP trapping configuration */
u32 hcr;
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* Floating point registers (VFP and Advanced SIMD/NEON) */
struct vfp_hard_struct vfp_guest;
/* Host FP context */
kvm_cpu_context_t *host_cpu_context;
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
/*
* Anything that is not used directly from assembly code goes
* here.
*/
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;
/* IO related fields */
struct kvm_decode mmio_decode;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
/* Detect first run of a vcpu */
bool has_run_once;
};
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
struct kvm_vcpu_stat {
u32 halt_wakeup;
};
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
u64 kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
/* We do not have shadow page tables, hence the empty hooks */
static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
unsigned long end)
{
return 0;
}
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return 0;
}
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
/*
* Call initialization code, and switch to the full blown HYP
* code. The init code doesn't need to preserve these
* registers as r0-r3 are already callee saved according to
* the AAPCS.
* Note that we slightly misuse the prototype by casing the
* stack pointer to a void *.
*
* We don't have enough registers to perform the full init in
* one go. Install the boot PGD first, and then install the
* runtime PGD, stack pointer and vectors. The PGDs are always
* passed as the third argument, in order to be passed into
* r2-r3 to the init code (yes, this is compliant with the
* PCS!).
*/
kvm_call_hyp(NULL, 0, boot_pgd_ptr);
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
}
static inline void vgic_arch_setup(const struct vgic_params *vgic)
{
BUG_ON(vgic->type != VGIC_V2);
}
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
#endif /* __ARM_KVM_HOST_H__ */

View file

@ -0,0 +1,56 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_MMIO_H__
#define __ARM_KVM_MMIO_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
struct kvm_decode {
unsigned long rt;
bool sign_extend;
};
/*
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
* which is an anonymous type. Use our own type instead.
*/
struct kvm_exit_mmio {
phys_addr_t phys_addr;
u8 data[8];
u32 len;
bool is_write;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
struct kvm_exit_mmio *mmio)
{
run->mmio.phys_addr = mmio->phys_addr;
run->mmio.len = mmio->len;
run->mmio.is_write = mmio->is_write;
memcpy(run->mmio.data, mmio->data, mmio->len);
run->exit_reason = KVM_EXIT_MMIO;
}
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
#endif /* __ARM_KVM_MMIO_H__ */

View file

@ -0,0 +1,252 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
#include <asm/memory.h>
#include <asm/page.h>
/*
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
#define HYP_PAGE_OFFSET_MASK UL(~0)
#define HYP_PAGE_OFFSET PAGE_OFFSET
#define KERN_TO_HYP(kva) (kva)
/*
* Our virtual mapping for the boot-time MMU-enable code. Must be
* shared across all the page-tables. Conveniently, we use the vectors
* page, where no kernel data will ever be shared with HYP.
*/
#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
*/
#define KVM_MMU_CACHE_MIN_PAGES 2
#ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
{
*pmd = new_pmd;
flush_pmd_entry(pmd);
}
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
*/
flush_pmd_entry(pte);
}
static inline void kvm_clean_pgd(pgd_t *pgd)
{
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
}
static inline void kvm_clean_pmd(pmd_t *pmd)
{
clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
}
static inline void kvm_clean_pmd_entry(pmd_t *pmd)
{
clean_pmd_entry(pmd);
}
static inline void kvm_clean_pte(pte_t *pte)
{
clean_pte_table(pte);
}
static inline void kvm_set_s2pte_writable(pte_t *pte)
{
pte_val(*pte) |= L_PTE_S2_RDWR;
}
static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
{
pmd_val(*pmd) |= L_PMD_S2_RDWR;
}
/* Open coded p*d_addr_end that can deal with 64bit addresses */
#define kvm_pgd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#define kvm_pud_addr_end(addr,end) (end)
#define kvm_pmd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
#define kvm_pgd_index(addr) pgd_index(addr)
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(kvm, pudp) (0)
#define KVM_PREALLOC_LEVEL 0
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
return kvm->arch.pgd;
}
static inline unsigned int kvm_get_hwpgd_size(void)
{
return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size,
bool ipa_uncached)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
VM_BUG_ON(size & ~PAGE_MASK);
if (!need_flush && !icache_is_pipt())
goto vipt_cache;
while (size) {
void *va = kmap_atomic_pfn(pfn);
if (need_flush)
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
void *va = kmap_atomic(pte_page(pte));
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
kunmap_atomic(va);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
unsigned long size = PMD_SIZE;
pfn_t pfn = pmd_pfn(pmd);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
pfn++;
size -= PAGE_SIZE;
kunmap_atomic(va);
}
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm);
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */

Some files were not shown because too many files have changed in this diff Show more