Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,40 @@
generic-y += bitsperlong.h
generic-y += cputime.h
generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += fcntl.h
generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += percpu.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
generic-y += sizes.h
generic-y += socket.h
generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h

13
arch/sh/include/asm/adc.h Normal file
View file

@ -0,0 +1,13 @@
#ifndef __ASM_ADC_H
#define __ASM_ADC_H
#ifdef __KERNEL__
/*
* Copyright (C) 2004 Andriy Skulysh
*/
#include <cpu/adc.h>
int adc_single(unsigned int channel);
#endif /* __KERNEL__ */
#endif /* __ASM_ADC_H */

View file

@ -0,0 +1,69 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima
*
* Defitions for the address spaces of the SH CPUs.
*/
#ifndef __ASM_SH_ADDRSPACE_H
#define __ASM_SH_ADDRSPACE_H
#ifdef __KERNEL__
#include <cpu/addrspace.h>
/* If this CPU supports segmentation, hook up the helpers */
#ifdef P1SEG
/*
[ P0/U0 (virtual) ] 0x00000000 <------ User space
[ P1 (fixed) cached ] 0x80000000 <------ Kernel space
[ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
[ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
[ P4 control ] 0xE0000000
*/
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
#ifdef CONFIG_29BIT
/*
* Map an address to a certain privileged segment
*/
#define P1SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG))
#define P2SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG))
#define P3SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
#else
/*
* These will never work in 32-bit, don't even bother.
*/
#define P1SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#define P2SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#define P3SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#define P4SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#endif
#endif /* P1SEG */
/* Check if an address can be reached in 29 bits */
#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
#ifdef CONFIG_SH_STORE_QUEUES
/*
* This is a special case for the SH-4 store queues, as pages for this
* space still need to be faulted in before it's possible to flush the
* store queue cache for writeout to the remapped region.
*/
#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
#else
#define P3_ADDR_MAX P4SEG
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_ADDRSPACE_H */

View file

@ -0,0 +1,21 @@
#ifndef __ASM_SH_ALIGNMENT_H
#define __ASM_SH_ALIGNMENT_H
#include <linux/types.h>
extern void inc_unaligned_byte_access(void);
extern void inc_unaligned_word_access(void);
extern void inc_unaligned_dword_access(void);
extern void inc_unaligned_multi_access(void);
extern void inc_unaligned_user_access(void);
extern void inc_unaligned_kernel_access(void);
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
extern unsigned int unaligned_user_action(void);
extern void unaligned_fixups_notify(struct task_struct *, insn_size_t, struct pt_regs *);
#endif /* __ASM_SH_ALIGNMENT_H */

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,94 @@
#ifndef __ASM_SH_ATOMIC_GRB_H
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__ ( \
" .align 2 \n\t" \
" mova 1f, r0 \n\t" /* r0 = end point */ \
" mov r15, r1 \n\t" /* r1 = saved sp */ \
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
" mov.l @%1, %0 \n\t" /* load old value */ \
" " #op " %2, %0 \n\t" /* $op */ \
" mov.l %0, @%1 \n\t" /* store new value */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ \
: "=&r" (tmp), \
"+r" (v) \
: "r" (i) \
: "memory" , "r0", "r1"); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__ ( \
" .align 2 \n\t" \
" mova 1f, r0 \n\t" /* r0 = end point */ \
" mov r15, r1 \n\t" /* r1 = saved sp */ \
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
" mov.l @%1, %0 \n\t" /* load old value */ \
" " #op " %2, %0 \n\t" /* $op */ \
" mov.l %0, @%1 \n\t" /* store new value */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ \
: "=&r" (tmp), \
"+r" (v) \
: "r" (i) \
: "memory" , "r0", "r1"); \
\
return tmp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
int tmp;
unsigned int _mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (_mask)
: "memory" , "r0", "r1");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (mask)
: "memory" , "r0", "r1");
}
#endif /* __ASM_SH_ATOMIC_GRB_H */

View file

@ -0,0 +1,63 @@
#ifndef __ASM_SH_ATOMIC_IRQ_H
#define __ASM_SH_ATOMIC_IRQ_H
#include <linux/irqflags.h>
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter c_op i; \
raw_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
raw_local_irq_save(flags); \
temp = v->counter; \
temp c_op i; \
v->counter = temp; \
raw_local_irq_restore(flags); \
\
return temp; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter &= ~mask;
raw_local_irq_restore(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif /* __ASM_SH_ATOMIC_IRQ_H */

View file

@ -0,0 +1,88 @@
#ifndef __ASM_SH_ATOMIC_LLSC_H
#define __ASM_SH_ATOMIC_LLSC_H
/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
__asm__ __volatile__ ( \
"1: movli.l @%2, %0 ! atomic_" #op "\n" \
" " #op " %1, %0 \n" \
" movco.l %0, @%2 \n" \
" bf 1b \n" \
: "=&z" (tmp) \
: "r" (i), "r" (&v->counter) \
: "t"); \
}
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
__asm__ __volatile__ ( \
"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
" " #op " %1, %0 \n" \
" movco.l %0, @%2 \n" \
" bf 1b \n" \
" synco \n" \
: "=&z" (temp) \
: "r" (i), "r" (&v->counter) \
: "t"); \
\
return temp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
}
#endif /* __ASM_SH_ATOMIC_LLSC_H */

View file

@ -0,0 +1,66 @@
#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/atomic-llsc.h>
#else
#include <asm/atomic-irq.h>
#endif
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
#endif /* __ASM_SH_ATOMIC_H */

View file

@ -0,0 +1,39 @@
/*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#include <asm/cache_insns.h>
#endif
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
* Legacy SH cores typically require a sequence of 8 nops after
* modification of a control register in order for the changes to take
* effect. On newer cores (like the sh4a and sh5) this is accomplished
* with icbi.
*
* Also note that on sh4a in the icbi case we can forego a synco for the
* write barrier, as it's not necessary for control registers.
*
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
#define ctrl_barrier() __icbi(PAGE_OFFSET)
#else
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#include <asm-generic/barrier.h>
#endif /* __ASM_SH_BARRIER_H */

View file

@ -0,0 +1,172 @@
#ifndef __ASM_SH_BITOPS_GRB_H
#define __ASM_SH_BITOPS_GRB_H
static inline void set_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline void clear_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = ~(1 << (nr & 0x1f));
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* and */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline void change_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" xor %2, %0 \n\t" /* xor */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1");
}
static inline int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t"
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" or %3, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1" ,"t");
return retval;
}
static inline int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval,not_mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
not_mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t" /* %1 = *a */
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" and %4, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask),
"r" (not_mask)
: "memory" , "r0", "r1", "t");
return retval;
}
static inline int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-14, r15 \n\t" /* LOGIN */
" mov.l @%2, %0 \n\t" /* load old value */
" mov %0, %1 \n\t" /* %1 = *a */
" tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */
" mov #-1, %1 \n\t" /* retvat = -1 */
" negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */
" xor %3, %0 \n\t"
" mov.l %0, @%2 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"=&r" (retval),
"+r" (a)
: "r" (mask)
: "memory" , "r0", "r1", "t");
return retval;
}
#include <asm-generic/bitops/non-atomic.h>
#endif /* __ASM_SH_BITOPS_GRB_H */

View file

@ -0,0 +1,146 @@
#ifndef __ASM_SH_BITOPS_LLSC_H
#define __ASM_SH_BITOPS_LLSC_H
static inline void set_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! set_bit \n\t"
"or %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (mask)
: "t", "memory"
);
}
static inline void clear_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! clear_bit \n\t"
"and %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (~mask)
: "t", "memory"
);
}
static inline void change_bit(int nr, volatile void *addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! change_bit \n\t"
"xor %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (a), "r" (mask)
: "t", "memory"
);
}
static inline int test_and_set_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_set_bit \n\t"
"mov %0, %1 \n\t"
"or %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask)
: "t", "memory"
);
return retval != 0;
}
static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_clear_bit \n\t"
"mov %0, %1 \n\t"
"and %4, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask), "r" (~mask)
: "t", "memory"
);
return retval != 0;
}
static inline int test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long tmp;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! test_and_change_bit \n\t"
"mov %0, %1 \n\t"
"xor %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"and %3, %1 \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (a), "r" (mask)
: "t", "memory"
);
return retval != 0;
}
#include <asm-generic/bitops/non-atomic.h>
#endif /* __ASM_SH_BITOPS_LLSC_H */

View file

@ -0,0 +1,142 @@
#ifndef __ASM_SH_BITOPS_OP32_H
#define __ASM_SH_BITOPS_OP32_H
/*
* The bit modifying instructions on SH-2A are only capable of working
* with a 3-bit immediate, which signifies the shift position for the bit
* being worked on.
*/
#if defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
#else
#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
#endif
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bset.b %1, @(%O2,%0) ! __set_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bclr.b %1, @(%O2,%0) ! __clear_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)),
"i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
__asm__ __volatile__ (
"bxor.b %1, @(%O2,%0) ! __change_bit\n\t"
: "+r" (addr)
: "i" (BYTE_OFFSET(nr)),
"i" (BYTE_NUMBER(nr))
: "t", "memory"
);
} else {
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#endif /* __ASM_SH_BITOPS_OP32_H */

View file

@ -0,0 +1,99 @@
#ifndef __ASM_SH_BITOPS_H
#define __ASM_SH_BITOPS_H
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
/* For __swab32 */
#include <asm/byteorder.h>
#include <asm/barrier.h>
#ifdef CONFIG_GUSA_RB
#include <asm/bitops-grb.h>
#elif defined(CONFIG_CPU_SH2A)
#include <asm-generic/bitops/atomic.h>
#include <asm/bitops-op32.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/bitops-llsc.h>
#else
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#endif
#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
__asm__("1:\n\t"
"shlr %1\n\t"
"bt/s 1b\n\t"
" add #1, %0"
: "=r" (result), "=r" (word)
: "0" (~0L), "1" (word)
: "t");
return result;
}
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
unsigned long result;
__asm__("1:\n\t"
"shlr %1\n\t"
"bf/s 1b\n\t"
" add #1, %0"
: "=r" (result), "=r" (word)
: "0" (~0L), "1" (word)
: "t");
return result;
}
#else
static inline unsigned long ffz(unsigned long word)
{
unsigned long result, __d2, __d3;
__asm__("gettr tr0, %2\n\t"
"pta $+32, tr0\n\t"
"andi %1, 1, %3\n\t"
"beq %3, r63, tr0\n\t"
"pta $+4, tr0\n"
"0:\n\t"
"shlri.l %1, 1, %1\n\t"
"addi %0, 1, %0\n\t"
"andi %1, 1, %3\n\t"
"beqi %3, 1, tr0\n"
"1:\n\t"
"ptabs %2, tr0\n\t"
: "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
: "0" (0L), "1" (word));
return result;
}
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_BITOPS_H */

View file

@ -0,0 +1,10 @@
#ifndef __ASM_SH_BL_BIT_H
#define __ASM_SH_BL_BIT_H
#ifdef CONFIG_SUPERH32
# include <asm/bl_bit_32.h>
#else
# include <asm/bl_bit_64.h>
#endif
#endif /* __ASM_SH_BL_BIT_H */

View file

@ -0,0 +1,33 @@
#ifndef __ASM_SH_BL_BIT_32_H
#define __ASM_SH_BL_BIT_32_H
static inline void set_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or %2, %0\n\t"
"and %3, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "r" (0x10000000), "r" (0xffffff0f)
: "memory"
);
}
static inline void clear_bl_bit(void)
{
unsigned long __dummy0, __dummy1;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %2, %0\n\t"
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~0x10000000)
: "memory"
);
}
#endif /* __ASM_SH_BL_BIT_32_H */

View file

@ -0,0 +1,40 @@
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_BL_BIT_64_H
#define __ASM_SH_BL_BIT_64_H
#include <asm/processor.h>
#define SR_BL_LL 0x0000000010000000LL
static inline void set_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void clear_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
#endif /* __ASM_SH_BL_BIT_64_H */

119
arch/sh/include/asm/bug.h Normal file
View file

@ -0,0 +1,119 @@
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
#include <linux/linkage.h>
#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
#define BUGFLAG_UNWINDER (1 << 1)
#ifdef CONFIG_GENERIC_BUG
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
/**
* _EMIT_BUG_ENTRY
* %1 - __FILE__
* %2 - __LINE__
* %3 - trap type
* %4 - sizeof(struct bug_entry)
*
* The trapa opcode itself sits in %0.
* The %O notation is used to avoid # generation.
*
* The offending file and line are encoded in the __bug_table section.
*/
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#else
#define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t.long 1b\n" \
"\t.short %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#endif
#define BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define __WARN_TAINT(taint) \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
"i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
__WARN(); \
} \
unlikely(__ret_warn_on); \
})
#define UNWINDER_BUG() \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
_EMIT_BUG_ENTRY \
: \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
"i" (BUGFLAG_UNWINDER), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define UNWINDER_BUG_ON(x) ({ \
int __ret_unwinder_on = !!(x); \
if (__builtin_constant_p(__ret_unwinder_on)) { \
if (__ret_unwinder_on) \
UNWINDER_BUG(); \
} else { \
if (unlikely(__ret_unwinder_on)) \
UNWINDER_BUG(); \
} \
unlikely(__ret_unwinder_on); \
})
#else
#define UNWINDER_BUG BUG
#define UNWINDER_BUG_ON BUG_ON
#endif /* CONFIG_GENERIC_BUG */
#include <asm-generic/bug.h>
struct pt_regs;
/* arch/sh/kernel/traps.c */
extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
extern void die_if_kernel(const char *str, struct pt_regs *regs, long err);
extern void die_if_no_fixup(const char *str, struct pt_regs *regs, long err);
#endif /* __ASM_SH_BUG_H */

View file

@ -0,0 +1,77 @@
#ifndef __ASM_SH_BUGS_H
#define __ASM_SH_BUGS_H
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
/*
* I don't know of any Super-H bugs yet.
*/
#include <asm/processor.h>
extern void select_idle_routine(void);
static void __init check_bugs(void)
{
extern unsigned long loops_per_jiffy;
char *p = &init_utsname()->machine[2]; /* "sh" */
select_idle_routine();
current_cpu_data.loops_per_jiffy = loops_per_jiffy;
switch (current_cpu_data.family) {
case CPU_FAMILY_SH2:
*p++ = '2';
break;
case CPU_FAMILY_SH2A:
*p++ = '2';
*p++ = 'a';
break;
case CPU_FAMILY_SH3:
*p++ = '3';
break;
case CPU_FAMILY_SH4:
*p++ = '4';
break;
case CPU_FAMILY_SH4A:
*p++ = '4';
*p++ = 'a';
break;
case CPU_FAMILY_SH4AL_DSP:
*p++ = '4';
*p++ = 'a';
*p++ = 'l';
*p++ = '-';
*p++ = 'd';
*p++ = 's';
*p++ = 'p';
break;
case CPU_FAMILY_SH5:
*p++ = '6';
*p++ = '4';
break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
* default:, so we're able to have the compiler whine
* about unhandled enumerations.
*/
break;
}
printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
#ifndef __LITTLE_ENDIAN__
/* 'eb' means 'Endian Big' */
*p++ = 'e';
*p++ = 'b';
#endif
*p = '\0';
}
#endif /* __ASM_SH_BUGS_H */

View file

@ -0,0 +1,47 @@
/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
*
* include/asm-sh/cache.h
*
* Copyright 1999 (C) Niibe Yutaka
* Copyright 2002, 2003 (C) Paul Mundt
*/
#ifndef __ASM_SH_CACHE_H
#define __ASM_SH_CACHE_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <cpu/cache.h>
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
unsigned int linesz; /* Cache line size (bytes) */
unsigned int way_size; /* sets * line size */
/*
* way_incr is the address offset for accessing the next way
* in memory mapped cache array ops.
*/
unsigned int way_incr;
unsigned int entry_shift;
unsigned int entry_mask;
/*
* Compute a mask which selects the address bits which overlap between
* 1. those used to select the cache set during indexing
* 2. those in the physical page number.
*/
unsigned int alias_mask;
unsigned int n_aliases; /* Number of aliases */
unsigned long flags;
};
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */

View file

@ -0,0 +1,11 @@
#ifndef __ASM_SH_CACHE_INSNS_H
#define __ASM_SH_CACHE_INSNS_H
#ifdef CONFIG_SUPERH32
# include <asm/cache_insns_32.h>
#else
# include <asm/cache_insns_64.h>
#endif
#endif /* __ASM_SH_CACHE_INSNS_H */

View file

@ -0,0 +1,21 @@
#ifndef __ASM_SH_CACHE_INSNS_32_H
#define __ASM_SH_CACHE_INSNS_32_H
#include <linux/types.h>
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_32_H */

View file

@ -0,0 +1,23 @@
/*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_CACHE_INSNS_64_H
#define __ASM_SH_CACHE_INSNS_64_H
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long long)(signed long long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_64_H */

View file

@ -0,0 +1,104 @@
#ifndef __ASM_SH_CACHEFLUSH_H
#define __ASM_SH_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h>
/*
* Cache flushing:
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_range(start, end) flushes(invalidates) a range for icache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
* - flush_cache_sigtramp(vaddr) flushes the signal trampoline
*/
extern void (*local_flush_cache_all)(void *args);
extern void (*local_flush_cache_mm)(void *args);
extern void (*local_flush_cache_dup_mm)(void *args);
extern void (*local_flush_cache_page)(void *args);
extern void (*local_flush_cache_range)(void *args);
extern void (*local_flush_dcache_page)(void *args);
extern void (*local_flush_icache_range)(void *args);
extern void (*local_flush_icache_page)(void *args);
extern void (*local_flush_cache_sigtramp)(void *args);
static inline void cache_noop(void *args) { }
extern void (*__flush_wback_region)(void *start, int size);
extern void (*__flush_purge_region)(void *start, int size);
extern void (*__flush_invalidate_region)(void *start, int size);
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_dup_mm(struct mm_struct *mm);
extern void flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn);
extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma,
struct page *page);
extern void flush_cache_sigtramp(unsigned long address);
struct flusher_data {
struct vm_area_struct *vma;
unsigned long addr1, addr2;
};
#define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page(struct page *page, unsigned long);
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
__flush_anon_page(page, vmaddr);
}
static inline void flush_kernel_vmap_range(void *addr, int size)
{
__flush_wback_region(addr, size);
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
__flush_invalidate_region(addr, size);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
flush_dcache_page(page);
}
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void kmap_coherent_init(void);
void *kmap_coherent(struct page *page, unsigned long addr);
void kunmap_coherent(void *kvaddr);
#define PG_dcache_clean PG_arch_1
void cpu_cache_init(void);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */

View file

@ -0,0 +1,5 @@
#ifdef CONFIG_SUPERH32
# include <asm/checksum_32.h>
#else
# include <asm-generic/checksum.h>
#endif

View file

@ -0,0 +1,215 @@
#ifndef __ASM_SH_CHECKSUM_H
#define __ASM_SH_CHECKSUM_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
*/
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int __dummy;
__asm__("swap.w %0, %1\n\t"
"extu.w %0, %0\n\t"
"extu.w %1, %1\n\t"
"add %1, %0\n\t"
"swap.w %0, %1\n\t"
"add %1, %0\n\t"
"not %0, %0\n\t"
: "=r" (sum), "=&r" (__dummy)
: "0" (sum)
: "t");
return (__force __sum16)sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, __dummy0, __dummy1;
__asm__ __volatile__(
"mov.l @%1+, %0\n\t"
"mov.l @%1+, %3\n\t"
"add #-2, %2\n\t"
"clrt\n\t"
"1:\t"
"addc %3, %0\n\t"
"movt %4\n\t"
"mov.l @%1+, %3\n\t"
"dt %2\n\t"
"bf/s 1b\n\t"
" cmp/eq #1, %4\n\t"
"addc %3, %0\n\t"
"addc %2, %0" /* Here %2 is 0, add carry-bit */
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
: "1" (iph), "2" (ihl)
: "t", "memory");
return csum_fold(sum);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
#ifdef __LITTLE_ENDIAN__
unsigned long len_proto = (proto + len) << 8;
#else
unsigned long len_proto = proto + len;
#endif
__asm__("clrt\n\t"
"addc %0, %1\n\t"
"addc %2, %1\n\t"
"addc %3, %1\n\t"
"movt %0\n\t"
"add %1, %0"
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
: "t");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
unsigned int __dummy;
__asm__("clrt\n\t"
"mov.l @(0,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(4,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(8,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(12,%2), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(0,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(4,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(8,%3), %1\n\t"
"addc %1, %0\n\t"
"mov.l @(12,%3), %1\n\t"
"addc %1, %0\n\t"
"addc %4, %0\n\t"
"addc %5, %0\n\t"
"movt %1\n\t"
"add %1, %0\n"
: "=r" (sum), "=&r" (__dummy)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "t");
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic((__force const void *)src,
dst, len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif /* __ASM_SH_CHECKSUM_H */

View file

@ -0,0 +1,33 @@
/*
* Copyright (C) 2010 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __CLKDEV__H_
#define __CLKDEV__H_
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/clock.h>
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
if (!slab_is_available())
return alloc_bootmem_low_pages(size);
else
return kzalloc(size, GFP_KERNEL);
}
#ifndef CONFIG_COMMON_CLK
#define __clk_put(clk)
#define __clk_get(clk) ({ 1; })
#endif
#endif /* __CLKDEV_H__ */

View file

@ -0,0 +1,16 @@
#ifndef __ASM_SH_CLOCK_H
#define __ASM_SH_CLOCK_H
#include <linux/sh_clk.h>
/* Should be defined by processor-specific code */
void __deprecated arch_init_clk_ops(struct sh_clk_ops **, int type);
int __init arch_clk_init(void);
/* arch/sh/kernel/cpu/clock-cpg.c */
int __init __deprecated cpg_clk_init(void);
/* arch/sh/kernel/cpu/clock.c */
int clk_init(void);
#endif /* __ASM_SH_CLOCK_H */

View file

@ -0,0 +1,72 @@
#ifndef __ASM_SH_CMPXCHG_GRB_H
#define __ASM_SH_CMPXCHG_GRB_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-4, r15 \n\t" /* LOGIN */
" mov.l @%1, %0 \n\t" /* load old value */
" mov.l %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory", "r0", "r1");
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN */
" mov.b @%1, %0 \n\t" /* load old value */
" extu.b %0, %0 \n\t" /* extend as unsigned */
" mov.b %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory" , "r0", "r1");
return retval;
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-8, r15 \n\t" /* LOGIN */
" mov.l @%3, %0 \n\t" /* load old value */
" cmp/eq %0, %1 \n\t"
" bf 1f \n\t" /* if not equal */
" mov.l %2, @%3 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (old), "+r" (new) /* old or new can be r15 */
: "r" (m)
: "memory" , "r0", "r1", "t");
return retval;
}
#endif /* __ASM_SH_CMPXCHG_GRB_H */

View file

@ -0,0 +1,42 @@
#ifndef __ASM_SH_CMPXCHG_IRQ_H
#define __ASM_SH_CMPXCHG_IRQ_H
#include <linux/irqflags.h>
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long flags, retval;
local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags);
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long flags, retval;
local_irq_save(flags);
retval = *m;
*m = val & 0xff;
local_irq_restore(flags);
return retval;
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
__u32 retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags); /* implies memory barrier */
return retval;
}
#endif /* __ASM_SH_CMPXCHG_IRQ_H */

View file

@ -0,0 +1,71 @@
#ifndef __ASM_SH_CMPXCHG_LLSC_H
#define __ASM_SH_CMPXCHG_LLSC_H
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! xchg_u32 \n\t"
"mov %0, %1 \n\t"
"mov %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z"(tmp), "=&r" (retval)
: "r" (m), "r" (val)
: "t", "memory"
);
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! xchg_u8 \n\t"
"mov %0, %1 \n\t"
"mov %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z"(tmp), "=&r" (retval)
: "r" (m), "r" (val & 0xff)
: "t", "memory"
);
return retval;
}
static inline unsigned long
__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! __cmpxchg_u32 \n\t"
"mov %0, %1 \n\t"
"cmp/eq %1, %3 \n\t"
"bf 2f \n\t"
"mov %4, %0 \n\t"
"2: \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (retval)
: "r" (m), "r" (old), "r" (new)
: "t", "memory"
);
return retval;
}
#endif /* __ASM_SH_CMPXCHG_LLSC_H */

View file

@ -0,0 +1,70 @@
#ifndef __ASM_SH_CMPXCHG_H
#define __ASM_SH_CMPXCHG_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#include <linux/compiler.h>
#include <linux/types.h>
#if defined(CONFIG_GUSA_RB)
#include <asm/cmpxchg-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/cmpxchg-llsc.h>
#else
#include <asm/cmpxchg-irq.h>
#endif
extern void __xchg_called_with_bad_pointer(void);
#define __xchg(ptr, x, size) \
({ \
unsigned long __xchg__res; \
volatile void *__xchg_ptr = (ptr); \
switch (size) { \
case 4: \
__xchg__res = xchg_u32(__xchg_ptr, x); \
break; \
case 1: \
__xchg__res = xchg_u8(__xchg_ptr, x); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
__xchg__res = x; \
break; \
} \
\
__xchg__res; \
})
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#endif /* __ASM_SH_CMPXCHG_H */

View file

@ -0,0 +1,18 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef __ASM_SH_DEVICE_H
#define __ASM_SH_DEVICE_H
#include <asm-generic/device.h>
struct platform_device;
/* allocate contiguous memory chunk and fill in struct resource */
int platform_resource_setup_memory(struct platform_device *pdev,
char *name, unsigned long memsize);
void plat_early_device_setup(void);
#endif /* __ASM_SH_DEVICE_H */

View file

@ -0,0 +1,100 @@
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
extern struct dma_map_ops *dma_ops;
extern void no_iommu_init(void);
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
return dma_ops;
}
#include <asm-generic/dma-coherent.h>
#include <asm-generic/dma-mapping-common.h>
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->dma_supported)
return ops->dma_supported(dev, mask);
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return dma_addr == 0;
}
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!ops->alloc)
return NULL;
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
if (ops->free)
ops->free(dev, size, vaddr, dma_handle, attrs);
}
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
#endif /* __ASM_SH_DMA_MAPPING_H */

View file

@ -0,0 +1,53 @@
/*
* Common header for the legacy SH DMA driver and the new dmaengine driver
*
* extracted from arch/sh/include/asm/dma-sh.h:
*
* Copyright (C) 2000 Takashi YOSHII
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef DMA_REGISTER_H
#define DMA_REGISTER_H
/* DMA registers */
#define SAR 0x00 /* Source Address Register */
#define DAR 0x04 /* Destination Address Register */
#define TCR 0x08 /* Transfer Count Register */
#define CHCR 0x0C /* Channel Control Register */
#define DMAOR 0x40 /* DMA Operation Register */
/* DMAOR definitions */
#define DMAOR_AE 0x00000004 /* Address Error Flag */
#define DMAOR_NMIF 0x00000002
#define DMAOR_DME 0x00000001 /* DMA Master Enable */
/* Definitions for the SuperH DMAC */
#define REQ_L 0x00000000
#define REQ_E 0x00080000
#define RACK_H 0x00000000
#define RACK_L 0x00040000
#define ACK_R 0x00000000
#define ACK_W 0x00020000
#define ACK_H 0x00000000
#define ACK_L 0x00010000
#define DM_INC 0x00004000 /* Destination addresses are incremented */
#define DM_DEC 0x00008000 /* Destination addresses are decremented */
#define DM_FIX 0x0000c000 /* Destination address is fixed */
#define SM_INC 0x00001000 /* Source addresses are incremented */
#define SM_DEC 0x00002000 /* Source addresses are decremented */
#define SM_FIX 0x00003000 /* Source address is fixed */
#define RS_IN 0x00000200
#define RS_OUT 0x00000300
#define RS_AUTO 0x00000400 /* Auto Request */
#define RS_ERS 0x00000800 /* DMA extended resource selector */
#define TS_BLK 0x00000040
#define TM_BUR 0x00000020
#define CHCR_DE 0x00000001 /* DMA Enable */
#define CHCR_TE 0x00000002 /* Transfer End Flag */
#define CHCR_IE 0x00000004 /* Interrupt Enable */
#endif

151
arch/sh/include/asm/dma.h Normal file
View file

@ -0,0 +1,151 @@
/*
* include/asm-sh/dma.h
*
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <asm-generic/dma.h>
/*
* Read and write modes can mean drastically different things depending on the
* channel configuration. Consult your DMAC documentation and module
* implementation for further clues.
*/
#define DMA_MODE_READ 0x00
#define DMA_MODE_WRITE 0x01
#define DMA_MODE_MASK 0x01
#define DMA_AUTOINIT 0x10
/*
* DMAC (dma_info) flags
*/
enum {
DMAC_CHANNELS_CONFIGURED = 0x01,
DMAC_CHANNELS_TEI_CAPABLE = 0x02, /* Transfer end interrupt */
};
/*
* DMA channel capabilities / flags
*/
enum {
DMA_CONFIGURED = 0x01,
/*
* Transfer end interrupt, inherited from DMAC.
* wait_queue used in dma_wait_for_completion.
*/
DMA_TEI_CAPABLE = 0x02,
};
extern spinlock_t dma_spin_lock;
struct dma_channel;
struct dma_ops {
int (*request)(struct dma_channel *chan);
void (*free)(struct dma_channel *chan);
int (*get_residue)(struct dma_channel *chan);
int (*xfer)(struct dma_channel *chan);
int (*configure)(struct dma_channel *chan, unsigned long flags);
int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
};
struct dma_channel {
char dev_id[16]; /* unique name per DMAC of channel */
unsigned int chan; /* DMAC channel number */
unsigned int vchan; /* Virtual channel number */
unsigned int mode;
unsigned int count;
unsigned long sar;
unsigned long dar;
const char **caps;
unsigned long flags;
atomic_t busy;
wait_queue_head_t wait_queue;
struct device dev;
void *priv_data;
};
struct dma_info {
struct platform_device *pdev;
const char *name;
unsigned int nr_channels;
unsigned long flags;
struct dma_ops *ops;
struct dma_channel *channels;
struct list_head list;
int first_channel_nr;
int first_vchannel_nr;
};
struct dma_chan_caps {
int ch_num;
const char **caplist;
};
#define to_dma_channel(channel) container_of(channel, struct dma_channel, dev)
/* arch/sh/drivers/dma/dma-api.c */
extern int dma_xfer(unsigned int chan, unsigned long from,
unsigned long to, size_t size, unsigned int mode);
#define dma_write(chan, from, to, size) \
dma_xfer(chan, from, to, size, DMA_MODE_WRITE)
#define dma_write_page(chan, from, to) \
dma_write(chan, from, to, PAGE_SIZE)
#define dma_read(chan, from, to, size) \
dma_xfer(chan, from, to, size, DMA_MODE_READ)
#define dma_read_page(chan, from, to) \
dma_read(chan, from, to, PAGE_SIZE)
extern int request_dma_bycap(const char **dmac, const char **caps,
const char *dev_id);
extern int get_dma_residue(unsigned int chan);
extern struct dma_info *get_dma_info(unsigned int chan);
extern struct dma_channel *get_dma_channel(unsigned int chan);
extern void dma_wait_for_completion(unsigned int chan);
extern void dma_configure_channel(unsigned int chan, unsigned long flags);
extern int register_dmac(struct dma_info *info);
extern void unregister_dmac(struct dma_info *info);
extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
extern int dma_extend(unsigned int chan, unsigned long op, void *param);
extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
/* arch/sh/drivers/dma/dma-sysfs.c */
extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);
extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_DMA_H */

View file

@ -0,0 +1,23 @@
/*
* SH7760 DMABRG (USB/Audio) support
*/
#ifndef _DMABRG_H_
#define _DMABRG_H_
/* IRQ sources */
#define DMABRGIRQ_USBDMA 0
#define DMABRGIRQ_USBDMAERR 1
#define DMABRGIRQ_A0TXF 2
#define DMABRGIRQ_A0TXH 3
#define DMABRGIRQ_A0RXF 4
#define DMABRGIRQ_A0RXH 5
#define DMABRGIRQ_A1TXF 6
#define DMABRGIRQ_A1TXH 7
#define DMABRGIRQ_A1RXF 8
#define DMABRGIRQ_A1RXH 9
extern int dmabrg_request_irq(unsigned int, void(*)(void *), void *);
extern void dmabrg_free_irq(unsigned int);
#endif

421
arch/sh/include/asm/dwarf.h Normal file
View file

@ -0,0 +1,421 @@
/*
* Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#ifndef __ASM_SH_DWARF_H
#define __ASM_SH_DWARF_H
#ifdef CONFIG_DWARF_UNWINDER
/*
* DWARF expression operations
*/
#define DW_OP_addr 0x03
#define DW_OP_deref 0x06
#define DW_OP_const1u 0x08
#define DW_OP_const1s 0x09
#define DW_OP_const2u 0x0a
#define DW_OP_const2s 0x0b
#define DW_OP_const4u 0x0c
#define DW_OP_const4s 0x0d
#define DW_OP_const8u 0x0e
#define DW_OP_const8s 0x0f
#define DW_OP_constu 0x10
#define DW_OP_consts 0x11
#define DW_OP_dup 0x12
#define DW_OP_drop 0x13
#define DW_OP_over 0x14
#define DW_OP_pick 0x15
#define DW_OP_swap 0x16
#define DW_OP_rot 0x17
#define DW_OP_xderef 0x18
#define DW_OP_abs 0x19
#define DW_OP_and 0x1a
#define DW_OP_div 0x1b
#define DW_OP_minus 0x1c
#define DW_OP_mod 0x1d
#define DW_OP_mul 0x1e
#define DW_OP_neg 0x1f
#define DW_OP_not 0x20
#define DW_OP_or 0x21
#define DW_OP_plus 0x22
#define DW_OP_plus_uconst 0x23
#define DW_OP_shl 0x24
#define DW_OP_shr 0x25
#define DW_OP_shra 0x26
#define DW_OP_xor 0x27
#define DW_OP_skip 0x2f
#define DW_OP_bra 0x28
#define DW_OP_eq 0x29
#define DW_OP_ge 0x2a
#define DW_OP_gt 0x2b
#define DW_OP_le 0x2c
#define DW_OP_lt 0x2d
#define DW_OP_ne 0x2e
#define DW_OP_lit0 0x30
#define DW_OP_lit1 0x31
#define DW_OP_lit2 0x32
#define DW_OP_lit3 0x33
#define DW_OP_lit4 0x34
#define DW_OP_lit5 0x35
#define DW_OP_lit6 0x36
#define DW_OP_lit7 0x37
#define DW_OP_lit8 0x38
#define DW_OP_lit9 0x39
#define DW_OP_lit10 0x3a
#define DW_OP_lit11 0x3b
#define DW_OP_lit12 0x3c
#define DW_OP_lit13 0x3d
#define DW_OP_lit14 0x3e
#define DW_OP_lit15 0x3f
#define DW_OP_lit16 0x40
#define DW_OP_lit17 0x41
#define DW_OP_lit18 0x42
#define DW_OP_lit19 0x43
#define DW_OP_lit20 0x44
#define DW_OP_lit21 0x45
#define DW_OP_lit22 0x46
#define DW_OP_lit23 0x47
#define DW_OP_lit24 0x48
#define DW_OP_lit25 0x49
#define DW_OP_lit26 0x4a
#define DW_OP_lit27 0x4b
#define DW_OP_lit28 0x4c
#define DW_OP_lit29 0x4d
#define DW_OP_lit30 0x4e
#define DW_OP_lit31 0x4f
#define DW_OP_reg0 0x50
#define DW_OP_reg1 0x51
#define DW_OP_reg2 0x52
#define DW_OP_reg3 0x53
#define DW_OP_reg4 0x54
#define DW_OP_reg5 0x55
#define DW_OP_reg6 0x56
#define DW_OP_reg7 0x57
#define DW_OP_reg8 0x58
#define DW_OP_reg9 0x59
#define DW_OP_reg10 0x5a
#define DW_OP_reg11 0x5b
#define DW_OP_reg12 0x5c
#define DW_OP_reg13 0x5d
#define DW_OP_reg14 0x5e
#define DW_OP_reg15 0x5f
#define DW_OP_reg16 0x60
#define DW_OP_reg17 0x61
#define DW_OP_reg18 0x62
#define DW_OP_reg19 0x63
#define DW_OP_reg20 0x64
#define DW_OP_reg21 0x65
#define DW_OP_reg22 0x66
#define DW_OP_reg23 0x67
#define DW_OP_reg24 0x68
#define DW_OP_reg25 0x69
#define DW_OP_reg26 0x6a
#define DW_OP_reg27 0x6b
#define DW_OP_reg28 0x6c
#define DW_OP_reg29 0x6d
#define DW_OP_reg30 0x6e
#define DW_OP_reg31 0x6f
#define DW_OP_breg0 0x70
#define DW_OP_breg1 0x71
#define DW_OP_breg2 0x72
#define DW_OP_breg3 0x73
#define DW_OP_breg4 0x74
#define DW_OP_breg5 0x75
#define DW_OP_breg6 0x76
#define DW_OP_breg7 0x77
#define DW_OP_breg8 0x78
#define DW_OP_breg9 0x79
#define DW_OP_breg10 0x7a
#define DW_OP_breg11 0x7b
#define DW_OP_breg12 0x7c
#define DW_OP_breg13 0x7d
#define DW_OP_breg14 0x7e
#define DW_OP_breg15 0x7f
#define DW_OP_breg16 0x80
#define DW_OP_breg17 0x81
#define DW_OP_breg18 0x82
#define DW_OP_breg19 0x83
#define DW_OP_breg20 0x84
#define DW_OP_breg21 0x85
#define DW_OP_breg22 0x86
#define DW_OP_breg23 0x87
#define DW_OP_breg24 0x88
#define DW_OP_breg25 0x89
#define DW_OP_breg26 0x8a
#define DW_OP_breg27 0x8b
#define DW_OP_breg28 0x8c
#define DW_OP_breg29 0x8d
#define DW_OP_breg30 0x8e
#define DW_OP_breg31 0x8f
#define DW_OP_regx 0x90
#define DW_OP_fbreg 0x91
#define DW_OP_bregx 0x92
#define DW_OP_piece 0x93
#define DW_OP_deref_size 0x94
#define DW_OP_xderef_size 0x95
#define DW_OP_nop 0x96
#define DW_OP_push_object_address 0x97
#define DW_OP_call2 0x98
#define DW_OP_call4 0x99
#define DW_OP_call_ref 0x9a
#define DW_OP_form_tls_address 0x9b
#define DW_OP_call_frame_cfa 0x9c
#define DW_OP_bit_piece 0x9d
#define DW_OP_lo_user 0xe0
#define DW_OP_hi_user 0xff
/*
* Addresses used in FDE entries in the .eh_frame section may be encoded
* using one of the following encodings.
*/
#define DW_EH_PE_absptr 0x00
#define DW_EH_PE_omit 0xff
#define DW_EH_PE_uleb128 0x01
#define DW_EH_PE_udata2 0x02
#define DW_EH_PE_udata4 0x03
#define DW_EH_PE_udata8 0x04
#define DW_EH_PE_sleb128 0x09
#define DW_EH_PE_sdata2 0x0a
#define DW_EH_PE_sdata4 0x0b
#define DW_EH_PE_sdata8 0x0c
#define DW_EH_PE_signed 0x09
#define DW_EH_PE_pcrel 0x10
/*
* The architecture-specific register number that contains the return
* address in the .debug_frame table.
*/
#define DWARF_ARCH_RA_REG 17
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/module.h>
/*
* Read either the frame pointer (r14) or the stack pointer (r15).
* NOTE: this MUST be inlined.
*/
static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg)
{
unsigned long value = 0;
switch (reg) {
case 14:
__asm__ __volatile__("mov r14, %0\n" : "=r" (value));
break;
case 15:
__asm__ __volatile__("mov r15, %0\n" : "=r" (value));
break;
default:
BUG();
}
return value;
}
/**
* dwarf_cie - Common Information Entry
*/
struct dwarf_cie {
unsigned long length;
unsigned long cie_id;
unsigned char version;
const char *augmentation;
unsigned int code_alignment_factor;
int data_alignment_factor;
/* Which column in the rule table represents return addr of func. */
unsigned int return_address_reg;
unsigned char *initial_instructions;
unsigned char *instructions_end;
unsigned char encoding;
unsigned long cie_pointer;
unsigned long flags;
#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
/* linked-list entry if this CIE is from a module */
struct list_head link;
struct rb_node node;
};
/**
* dwarf_fde - Frame Description Entry
*/
struct dwarf_fde {
unsigned long length;
unsigned long cie_pointer;
struct dwarf_cie *cie;
unsigned long initial_location;
unsigned long address_range;
unsigned char *instructions;
unsigned char *end;
/* linked-list entry if this FDE is from a module */
struct list_head link;
struct rb_node node;
};
/**
* dwarf_frame - DWARF information for a frame in the call stack
*/
struct dwarf_frame {
struct dwarf_frame *prev, *next;
unsigned long pc;
struct list_head reg_list;
unsigned long cfa;
/* Valid when DW_FRAME_CFA_REG_OFFSET is set in flags */
unsigned int cfa_register;
unsigned int cfa_offset;
/* Valid when DW_FRAME_CFA_REG_EXP is set in flags */
unsigned char *cfa_expr;
unsigned int cfa_expr_len;
unsigned long flags;
#define DWARF_FRAME_CFA_REG_OFFSET (1 << 0)
#define DWARF_FRAME_CFA_REG_EXP (1 << 1)
unsigned long return_addr;
};
/**
* dwarf_reg - DWARF register
* @flags: Describes how to calculate the value of this register
*/
struct dwarf_reg {
struct list_head link;
unsigned int number;
unsigned long addr;
unsigned long flags;
#define DWARF_REG_OFFSET (1 << 0)
#define DWARF_VAL_OFFSET (1 << 1)
#define DWARF_UNDEFINED (1 << 2)
};
/*
* Call Frame instruction opcodes.
*/
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_lo_user 0x1c
#define DW_CFA_hi_user 0x3f
/* GNU extension opcodes */
#define DW_CFA_GNU_args_size 0x2e
#define DW_CFA_GNU_negative_offset_extended 0x2f
/*
* Some call frame instructions encode their operands in the opcode. We
* need some helper functions to extract both the opcode and operands
* from an instruction.
*/
static inline unsigned int DW_CFA_opcode(unsigned long insn)
{
return (insn & 0xc0);
}
static inline unsigned int DW_CFA_operand(unsigned long insn)
{
return (insn & 0x3f);
}
#define DW_EH_FRAME_CIE 0 /* .eh_frame CIE IDs are 0 */
#define DW_CIE_ID 0xffffffff
#define DW64_CIE_ID 0xffffffffffffffffULL
/*
* DWARF FDE/CIE length field values.
*/
#define DW_EXT_LO 0xfffffff0
#define DW_EXT_HI 0xffffffff
#define DW_EXT_DWARF64 DW_EXT_HI
extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
struct dwarf_frame *);
extern void dwarf_free_frame(struct dwarf_frame *);
extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
extern void module_dwarf_cleanup(struct module *);
#endif /* !__ASSEMBLY__ */
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#else
/*
* Use the asm comment character to ignore the rest of the line.
*/
#define CFI_IGNORE !
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#ifndef __ASSEMBLY__
static inline void dwarf_unwinder_init(void)
{
}
#define module_dwarf_finalize(hdr, sechdrs, me) (0)
#define module_dwarf_cleanup(mod) do { } while (0)
#endif
#endif /* CONFIG_DWARF_UNWINDER */
#endif /* __ASM_SH_DWARF_H */

235
arch/sh/include/asm/elf.h Normal file
View file

@ -0,0 +1,235 @@
#ifndef __ASM_SH_ELF_H
#define __ASM_SH_ELF_H
#include <linux/utsname.h>
#include <asm/auxvec.h>
#include <asm/ptrace.h>
#include <asm/user.h>
/* ELF header e_flags defines */
#define EF_SH_PIC 0x100 /* -fpic */
#define EF_SH_FDPIC 0x8000 /* -mfdpic */
/* SH (particularly SHcompact) relocation types */
#define R_SH_NONE 0
#define R_SH_DIR32 1
#define R_SH_REL32 2
#define R_SH_DIR8WPN 3
#define R_SH_IND12W 4
#define R_SH_DIR8WPL 5
#define R_SH_DIR8WPZ 6
#define R_SH_DIR8BP 7
#define R_SH_DIR8W 8
#define R_SH_DIR8L 9
#define R_SH_SWITCH16 25
#define R_SH_SWITCH32 26
#define R_SH_USES 27
#define R_SH_COUNT 28
#define R_SH_ALIGN 29
#define R_SH_CODE 30
#define R_SH_DATA 31
#define R_SH_LABEL 32
#define R_SH_SWITCH8 33
#define R_SH_GNU_VTINHERIT 34
#define R_SH_GNU_VTENTRY 35
#define R_SH_TLS_GD_32 144
#define R_SH_TLS_LD_32 145
#define R_SH_TLS_LDO_32 146
#define R_SH_TLS_IE_32 147
#define R_SH_TLS_LE_32 148
#define R_SH_TLS_DTPMOD32 149
#define R_SH_TLS_DTPOFF32 150
#define R_SH_TLS_TPOFF32 151
#define R_SH_GOT32 160
#define R_SH_PLT32 161
#define R_SH_COPY 162
#define R_SH_GLOB_DAT 163
#define R_SH_JMP_SLOT 164
#define R_SH_RELATIVE 165
#define R_SH_GOTOFF 166
#define R_SH_GOTPC 167
/* FDPIC relocs */
#define R_SH_GOT20 201
#define R_SH_GOTOFF20 202
#define R_SH_GOTFUNCDESC 203
#define R_SH_GOTFUNCDESC20 204
#define R_SH_GOTOFFFUNCDESC 205
#define R_SH_GOTOFFFUNCDESC20 206
#define R_SH_FUNCDESC 207
#define R_SH_FUNCDESC_VALUE 208
/* SHmedia relocs */
#define R_SH_IMM_LOW16 246
#define R_SH_IMM_LOW16_PCREL 247
#define R_SH_IMM_MEDLOW16 248
#define R_SH_IMM_MEDLOW16_PCREL 249
/* Keep this the last entry. */
#define R_SH_NUM 256
/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpu_struct elf_fpregset_t;
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_ARCH EM_SH
#ifdef __KERNEL__
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SH)
#define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC)
#define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC)
/*
* Enable dump using regset.
* This covers all of general/DSP/FPU regs.
*/
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_CORE_COPY_REGS(_dest,_regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.flags)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (utsname()->machine)
#ifdef __SH5__
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
_r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
_r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
_r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
_r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
_r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
_r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
_r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
_r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
_r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
_r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
_r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
_r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
_r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
_r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
_r->sr = SR_FD | SR_MMU; } while (0)
#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \
_r->sr = SR_FD; } while (0)
#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, \
_dynamic_addr) \
do { \
_r->regs[0] = 0; \
_r->regs[1] = 0; \
_r->regs[2] = 0; \
_r->regs[3] = 0; \
_r->regs[4] = 0; \
_r->regs[5] = 0; \
_r->regs[6] = 0; \
_r->regs[7] = 0; \
_r->regs[8] = _exec_map_addr; \
_r->regs[9] = _interp_map_addr; \
_r->regs[10] = _dynamic_addr; \
_r->regs[11] = 0; \
_r->regs[12] = 0; \
_r->regs[13] = 0; \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
#endif
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
#ifdef CONFIG_VSYSCALL
/* vDSO has arch_setup_additional_pages */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
extern unsigned int vdso_enabled;
extern void __kernel_vsyscall;
#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
else \
NEW_AUX_ENT(AT_IGNORE, 0)
#else
#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif /* CONFIG_VSYSCALL */
#ifdef CONFIG_SH_FPU
#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
#else
#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
/* Optional FPU initialization */ \
FPU_AUX_ENT; \
\
/* Optional vsyscall entry */ \
VSYSCALL_AUX_ENT; \
\
/* Cache desc */ \
NEW_AUX_ENT(AT_L1I_CACHESHAPE, l1i_cache_shape); \
NEW_AUX_ENT(AT_L1D_CACHESHAPE, l1d_cache_shape); \
NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \
} while (0)
#endif /* __KERNEL__ */
#endif /* __ASM_SH_ELF_H */

View file

@ -0,0 +1,122 @@
! entry.S macro define
.macro cli
stc sr, r0
or #0xf0, r0
ldc r0, sr
.endm
.macro sti
mov #0xfffffff0, r11
extu.b r11, r11
not r11, r11
stc sr, r10
and r11, r10
#ifdef CONFIG_CPU_HAS_SR_RB
stc k_g_imask, r11
or r11, r10
#endif
ldc r10, sr
.endm
.macro get_current_thread_info, ti, tmp
#ifdef CONFIG_CPU_HAS_SR_RB
stc r7_bank, \ti
#else
mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
shll8 \tmp
shll2 \tmp
mov r15, \ti
and \tmp, \ti
#endif
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_on
7835:
.endm
.macro TRACE_IRQS_OFF
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_off
7835:
.endm
#else
.macro TRACE_IRQS_ON
.endm
.macro TRACE_IRQS_OFF
.endm
#endif
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
# define PREF(x) pref @x
#else
# define PREF(x) nop
#endif
/*
* Macro for use within assembly. Because the DWARF unwinder
* needs to use the frame register to unwind the stack, we
* need to setup r14 with the value of the stack pointer as
* the return address is usually on the stack somewhere.
*/
.macro setup_frame_reg
#ifdef CONFIG_DWARF_UNWINDER
mov r15, r14
#endif
.endm

19
arch/sh/include/asm/fb.h Normal file
View file

@ -0,0 +1,19 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View file

@ -0,0 +1,101 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
#include <linux/threads.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of P3 backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
/*
* The FIX_CMAP entries are used by kmap_coherent() to get virtual
* addresses which are of a known color, and so their values are
* important. __fix_to_virt(FIX_CMAP_END - n) must give an address
* which is the same color as a page (n<<PAGE_SHIFT).
*/
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
#endif
#ifdef CONFIG_IOREMAP_FIXED
/*
* FIX_IOREMAP entries are useful for mapping physical address
* space before ioremap() is useable, e.g. really early in boot
* before kmalloc() is working.
*/
#define FIX_N_IOREMAPS 32
FIX_IOREMAP_BEGIN,
FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#include <asm-generic/fixmap.h>
#endif

View file

@ -0,0 +1,30 @@
/*
* include/asm-sh/flat.h
*
* uClinux flat-format executables
*
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*/
#ifndef __ASM_SH_FLAT_H
#define __ASM_SH_FLAT_H
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) ({ (void)p; 0; })
#define FLAT_PLAT_INIT(_r) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \
_r->sr = SR_FD; } while (0)
#endif /* __ASM_SH_FLAT_H */

71
arch/sh/include/asm/fpu.h Normal file
View file

@ -0,0 +1,71 @@
#ifndef __ASM_SH_FPU_H
#define __ASM_SH_FPU_H
#ifndef __ASSEMBLY__
struct task_struct;
#ifdef CONFIG_SH_FPU
static inline void release_fpu(struct pt_regs *regs)
{
regs->sr |= SR_FD;
}
static inline void grab_fpu(struct pt_regs *regs)
{
regs->sr &= ~SR_FD;
}
extern void save_fpu(struct task_struct *__tsk);
extern void restore_fpu(struct task_struct *__tsk);
extern void fpu_state_restore(struct pt_regs *regs);
extern void __fpu_state_restore(void);
#else
#define save_fpu(tsk) do { } while (0)
#define restore_fpu(tsk) do { } while (0)
#define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0)
#define fpu_state_restore(regs) do { } while (0)
#define __fpu_state_restore(regs) do { } while (0)
#endif
struct user_regset;
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern int init_fpu(struct task_struct *);
extern int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf);
static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
save_fpu(tsk);
release_fpu(regs);
} else
tsk->thread.fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
__unlazy_fpu(tsk, regs);
preempt_enable();
}
static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
{
preempt_disable();
if (task_thread_info(tsk)->status & TS_USEDFPU) {
task_thread_info(tsk)->status &= ~TS_USEDFPU;
release_fpu(regs);
}
preempt_enable();
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FPU_H */

View file

@ -0,0 +1,18 @@
/*
* include/asm-sh/freq.h
*
* Copyright (C) 2002, 2003 Paul Mundt
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __ASM_SH_FREQ_H
#define __ASM_SH_FREQ_H
#ifdef __KERNEL__
#include <cpu/freq.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_FREQ_H */

View file

@ -0,0 +1,47 @@
#ifndef __ASM_SH_FTRACE_H
#define __ASM_SH_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#define FTRACE_SYSCALL_MAX NR_syscalls
#ifndef __ASSEMBLY__
extern void mcount(void);
#define MCOUNT_ADDR ((long)(mcount))
#ifdef CONFIG_DYNAMIC_FTRACE
#define CALL_ADDR ((long)(ftrace_call))
#define STUB_ADDR ((long)(ftrace_stub))
#define GRAPH_ADDR ((long)(ftrace_graph_call))
#define CALLER_ADDR ((long)(ftrace_caller))
#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4)
#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4)
struct dyn_arch_ftrace {
/* No extra data needed on sh */
};
#endif /* CONFIG_DYNAMIC_FTRACE */
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* 'addr' is the memory table address. */
return addr;
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#ifndef __ASSEMBLY__
/* arch/sh/kernel/return_address.c */
extern void *return_address(unsigned int);
#define ftrace_return_address(n) return_address(n)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_FTRACE_H */

View file

@ -0,0 +1,110 @@
#ifndef __ASM_SH_FUTEX_IRQ_H
#define __ASM_SH_FUTEX_IRQ_H
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval + oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval | oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval & oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
int *oldval)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = get_user(*oldval, uaddr);
if (!ret)
ret = put_user(*oldval ^ oparg, uaddr);
local_irq_restore(flags);
return ret;
}
static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
u32 __user *uaddr,
u32 oldval, u32 newval)
{
unsigned long flags;
int ret;
u32 prev = 0;
local_irq_save(flags);
ret = get_user(prev, uaddr);
if (!ret && oldval == prev)
ret = put_user(newval, uaddr);
local_irq_restore(flags);
*uval = prev;
return ret;
}
#endif /* __ASM_SH_FUTEX_IRQ_H */

View file

@ -0,0 +1,78 @@
#ifndef __ASM_SH_FUTEX_H
#define __ASM_SH_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
/* XXX: UP variants, fix for SH-4A and SMP.. */
#include <asm/futex-irq.h>
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ADD:
ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
break;
case FUTEX_OP_OR:
ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
break;
case FUTEX_OP_ANDN:
ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
break;
case FUTEX_OP_XOR:
ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
break;
default:
ret = -ENOSYS;
break;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
#endif /* __KERNEL__ */
#endif /* __ASM_SH_FUTEX_H */

View file

@ -0,0 +1,54 @@
/*
* include/asm-sh/gpio.h
*
* Generic GPIO API and pinmux table support for SuperH.
*
* Copyright (c) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_GPIO_H
#define __ASM_SH_GPIO_H
#include <linux/kernel.h>
#include <linux/errno.h>
#if defined(CONFIG_CPU_SH3)
#include <cpu/gpio.h>
#endif
#define ARCH_NR_GPIOS 512
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
static inline int gpio_get_value(unsigned gpio)
{
return __gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned gpio, int value)
{
__gpio_set_value(gpio, value);
}
static inline int gpio_cansleep(unsigned gpio)
{
return __gpio_cansleep(gpio);
}
static inline int gpio_to_irq(unsigned gpio)
{
return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
return -ENOSYS;
}
#endif /* CONFIG_GPIOLIB */
#endif /* __ASM_SH_GPIO_H */

View file

@ -0,0 +1,16 @@
#ifndef __ASM_SH_HARDIRQ_H
#define __ASM_SH_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
extern void ack_bad_irq(unsigned int irq);
#endif /* __ASM_SH_HARDIRQ_H */

View file

@ -0,0 +1,251 @@
#ifndef __ASM_SH_HD64461
#define __ASM_SH_HD64461
/*
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2000 YAEGASHI Takeshi
*
* Hitachi HD64461 companion chip support
* (please note manual reference 0x10000000 = 0xb0000000)
*/
/* Constants for PCMCIA mappings */
#define HD64461_PCC_WINDOW 0x01000000
/* Area 6 - Slot 0 - memory and/or IO card */
#define HD64461_IOBASE 0xb0000000
#define HD64461_IO_OFFSET(x) (HD64461_IOBASE + (x))
#define HD64461_PCC0_BASE HD64461_IO_OFFSET(0x8000000)
#define HD64461_PCC0_ATTR (HD64461_PCC0_BASE) /* 0xb80000000 */
#define HD64461_PCC0_COMM (HD64461_PCC0_BASE+HD64461_PCC_WINDOW) /* 0xb90000000 */
#define HD64461_PCC0_IO (HD64461_PCC0_BASE+2*HD64461_PCC_WINDOW) /* 0xba0000000 */
/* Area 5 - Slot 1 - memory card only */
#define HD64461_PCC1_BASE HD64461_IO_OFFSET(0x4000000)
#define HD64461_PCC1_ATTR (HD64461_PCC1_BASE) /* 0xb4000000 */
#define HD64461_PCC1_COMM (HD64461_PCC1_BASE+HD64461_PCC_WINDOW) /* 0xb5000000 */
/* Standby Control Register for HD64461 */
#define HD64461_STBCR HD64461_IO_OFFSET(0x00000000)
#define HD64461_STBCR_CKIO_STBY 0x2000
#define HD64461_STBCR_SAFECKE_IST 0x1000
#define HD64461_STBCR_SLCKE_IST 0x0800
#define HD64461_STBCR_SAFECKE_OST 0x0400
#define HD64461_STBCR_SLCKE_OST 0x0200
#define HD64461_STBCR_SMIAST 0x0100
#define HD64461_STBCR_SLCDST 0x0080
#define HD64461_STBCR_SPC0ST 0x0040
#define HD64461_STBCR_SPC1ST 0x0020
#define HD64461_STBCR_SAFEST 0x0010
#define HD64461_STBCR_STM0ST 0x0008
#define HD64461_STBCR_STM1ST 0x0004
#define HD64461_STBCR_SIRST 0x0002
#define HD64461_STBCR_SURTST 0x0001
/* System Configuration Register */
#define HD64461_SYSCR HD64461_IO_OFFSET(0x02)
/* CPU Data Bus Control Register */
#define HD64461_SCPUCR HD64461_IO_OFFSET(0x04)
/* Base Address Register */
#define HD64461_LCDCBAR HD64461_IO_OFFSET(0x1000)
/* Line increment address */
#define HD64461_LCDCLOR HD64461_IO_OFFSET(0x1002)
/* Controls LCD controller */
#define HD64461_LCDCCR HD64461_IO_OFFSET(0x1004)
/* LCCDR control bits */
#define HD64461_LCDCCR_STBACK 0x0400 /* Standby Back */
#define HD64461_LCDCCR_STREQ 0x0100 /* Standby Request */
#define HD64461_LCDCCR_MOFF 0x0080 /* Memory Off */
#define HD64461_LCDCCR_REFSEL 0x0040 /* Refresh Select */
#define HD64461_LCDCCR_EPON 0x0020 /* End Power On */
#define HD64461_LCDCCR_SPON 0x0010 /* Start Power On */
/* Controls LCD (1) */
#define HD64461_LDR1 HD64461_IO_OFFSET(0x1010)
#define HD64461_LDR1_DON 0x01 /* Display On */
#define HD64461_LDR1_DINV 0x80 /* Display Invert */
/* Controls LCD (2) */
#define HD64461_LDR2 HD64461_IO_OFFSET(0x1012)
#define HD64461_LDHNCR HD64461_IO_OFFSET(0x1014) /* Number of horizontal characters */
#define HD64461_LDHNSR HD64461_IO_OFFSET(0x1016) /* Specify output start position + width of CL1 */
#define HD64461_LDVNTR HD64461_IO_OFFSET(0x1018) /* Specify total vertical lines */
#define HD64461_LDVNDR HD64461_IO_OFFSET(0x101a) /* specify number of display vertical lines */
#define HD64461_LDVSPR HD64461_IO_OFFSET(0x101c) /* specify vertical synchronization pos and AC nr */
/* Controls LCD (3) */
#define HD64461_LDR3 HD64461_IO_OFFSET(0x101e)
/* Palette Registers */
#define HD64461_CPTWAR HD64461_IO_OFFSET(0x1030) /* Color Palette Write Address Register */
#define HD64461_CPTWDR HD64461_IO_OFFSET(0x1032) /* Color Palette Write Data Register */
#define HD64461_CPTRAR HD64461_IO_OFFSET(0x1034) /* Color Palette Read Address Register */
#define HD64461_CPTRDR HD64461_IO_OFFSET(0x1036) /* Color Palette Read Data Register */
#define HD64461_GRDOR HD64461_IO_OFFSET(0x1040) /* Display Resolution Offset Register */
#define HD64461_GRSCR HD64461_IO_OFFSET(0x1042) /* Solid Color Register */
#define HD64461_GRCFGR HD64461_IO_OFFSET(0x1044) /* Accelerator Configuration Register */
#define HD64461_GRCFGR_ACCSTATUS 0x10 /* Accelerator Status */
#define HD64461_GRCFGR_ACCRESET 0x08 /* Accelerator Reset */
#define HD64461_GRCFGR_ACCSTART_BITBLT 0x06 /* Accelerator Start BITBLT */
#define HD64461_GRCFGR_ACCSTART_LINE 0x04 /* Accelerator Start Line Drawing */
#define HD64461_GRCFGR_COLORDEPTH16 0x01 /* Sets Colordepth 16 for Accelerator */
#define HD64461_GRCFGR_COLORDEPTH8 0x01 /* Sets Colordepth 8 for Accelerator */
/* Line Drawing Registers */
#define HD64461_LNSARH HD64461_IO_OFFSET(0x1046) /* Line Start Address Register (H) */
#define HD64461_LNSARL HD64461_IO_OFFSET(0x1048) /* Line Start Address Register (L) */
#define HD64461_LNAXLR HD64461_IO_OFFSET(0x104a) /* Axis Pixel Length Register */
#define HD64461_LNDGR HD64461_IO_OFFSET(0x104c) /* Diagonal Register */
#define HD64461_LNAXR HD64461_IO_OFFSET(0x104e) /* Axial Register */
#define HD64461_LNERTR HD64461_IO_OFFSET(0x1050) /* Start Error Term Register */
#define HD64461_LNMDR HD64461_IO_OFFSET(0x1052) /* Line Mode Register */
/* BitBLT Registers */
#define HD64461_BBTSSARH HD64461_IO_OFFSET(0x1054) /* Source Start Address Register (H) */
#define HD64461_BBTSSARL HD64461_IO_OFFSET(0x1056) /* Source Start Address Register (L) */
#define HD64461_BBTDSARH HD64461_IO_OFFSET(0x1058) /* Destination Start Address Register (H) */
#define HD64461_BBTDSARL HD64461_IO_OFFSET(0x105a) /* Destination Start Address Register (L) */
#define HD64461_BBTDWR HD64461_IO_OFFSET(0x105c) /* Destination Block Width Register */
#define HD64461_BBTDHR HD64461_IO_OFFSET(0x105e) /* Destination Block Height Register */
#define HD64461_BBTPARH HD64461_IO_OFFSET(0x1060) /* Pattern Start Address Register (H) */
#define HD64461_BBTPARL HD64461_IO_OFFSET(0x1062) /* Pattern Start Address Register (L) */
#define HD64461_BBTMARH HD64461_IO_OFFSET(0x1064) /* Mask Start Address Register (H) */
#define HD64461_BBTMARL HD64461_IO_OFFSET(0x1066) /* Mask Start Address Register (L) */
#define HD64461_BBTROPR HD64461_IO_OFFSET(0x1068) /* ROP Register */
#define HD64461_BBTMDR HD64461_IO_OFFSET(0x106a) /* BitBLT Mode Register */
/* PC Card Controller Registers */
/* Maps to Physical Area 6 */
#define HD64461_PCC0ISR HD64461_IO_OFFSET(0x2000) /* socket 0 interface status */
#define HD64461_PCC0GCR HD64461_IO_OFFSET(0x2002) /* socket 0 general control */
#define HD64461_PCC0CSCR HD64461_IO_OFFSET(0x2004) /* socket 0 card status change */
#define HD64461_PCC0CSCIER HD64461_IO_OFFSET(0x2006) /* socket 0 card status change interrupt enable */
#define HD64461_PCC0SCR HD64461_IO_OFFSET(0x2008) /* socket 0 software control */
/* Maps to Physical Area 5 */
#define HD64461_PCC1ISR HD64461_IO_OFFSET(0x2010) /* socket 1 interface status */
#define HD64461_PCC1GCR HD64461_IO_OFFSET(0x2012) /* socket 1 general control */
#define HD64461_PCC1CSCR HD64461_IO_OFFSET(0x2014) /* socket 1 card status change */
#define HD64461_PCC1CSCIER HD64461_IO_OFFSET(0x2016) /* socket 1 card status change interrupt enable */
#define HD64461_PCC1SCR HD64461_IO_OFFSET(0x2018) /* socket 1 software control */
/* PCC Interface Status Register */
#define HD64461_PCCISR_READY 0x80 /* card ready */
#define HD64461_PCCISR_MWP 0x40 /* card write-protected */
#define HD64461_PCCISR_VS2 0x20 /* voltage select pin 2 */
#define HD64461_PCCISR_VS1 0x10 /* voltage select pin 1 */
#define HD64461_PCCISR_CD2 0x08 /* card detect 2 */
#define HD64461_PCCISR_CD1 0x04 /* card detect 1 */
#define HD64461_PCCISR_BVD2 0x02 /* battery 1 */
#define HD64461_PCCISR_BVD1 0x01 /* battery 1 */
#define HD64461_PCCISR_PCD_MASK 0x0c /* card detect */
#define HD64461_PCCISR_BVD_MASK 0x03 /* battery voltage */
#define HD64461_PCCISR_BVD_BATGOOD 0x03 /* battery good */
#define HD64461_PCCISR_BVD_BATWARN 0x01 /* battery low warning */
#define HD64461_PCCISR_BVD_BATDEAD1 0x02 /* battery dead */
#define HD64461_PCCISR_BVD_BATDEAD2 0x00 /* battery dead */
/* PCC General Control Register */
#define HD64461_PCCGCR_DRVE 0x80 /* output drive */
#define HD64461_PCCGCR_PCCR 0x40 /* PC card reset */
#define HD64461_PCCGCR_PCCT 0x20 /* PC card type, 1=IO&mem, 0=mem */
#define HD64461_PCCGCR_VCC0 0x10 /* voltage control pin VCC0SEL0 */
#define HD64461_PCCGCR_PMMOD 0x08 /* memory mode */
#define HD64461_PCCGCR_PA25 0x04 /* pin A25 */
#define HD64461_PCCGCR_PA24 0x02 /* pin A24 */
#define HD64461_PCCGCR_REG 0x01 /* pin PCC0REG# */
/* PCC Card Status Change Register */
#define HD64461_PCCCSCR_SCDI 0x80 /* sw card detect intr */
#define HD64461_PCCCSCR_SRV1 0x40 /* reserved */
#define HD64461_PCCCSCR_IREQ 0x20 /* IREQ intr req */
#define HD64461_PCCCSCR_SC 0x10 /* STSCHG (status change) pin */
#define HD64461_PCCCSCR_CDC 0x08 /* CD (card detect) change */
#define HD64461_PCCCSCR_RC 0x04 /* READY change */
#define HD64461_PCCCSCR_BW 0x02 /* battery warning change */
#define HD64461_PCCCSCR_BD 0x01 /* battery dead change */
/* PCC Card Status Change Interrupt Enable Register */
#define HD64461_PCCCSCIER_CRE 0x80 /* change reset enable */
#define HD64461_PCCCSCIER_IREQE_MASK 0x60 /* IREQ enable */
#define HD64461_PCCCSCIER_IREQE_DISABLED 0x00 /* IREQ disabled */
#define HD64461_PCCCSCIER_IREQE_LEVEL 0x20 /* IREQ level-triggered */
#define HD64461_PCCCSCIER_IREQE_FALLING 0x40 /* IREQ falling-edge-trig */
#define HD64461_PCCCSCIER_IREQE_RISING 0x60 /* IREQ rising-edge-trig */
#define HD64461_PCCCSCIER_SCE 0x10 /* status change enable */
#define HD64461_PCCCSCIER_CDE 0x08 /* card detect change enable */
#define HD64461_PCCCSCIER_RE 0x04 /* ready change enable */
#define HD64461_PCCCSCIER_BWE 0x02 /* battery warn change enable */
#define HD64461_PCCCSCIER_BDE 0x01 /* battery dead change enable*/
/* PCC Software Control Register */
#define HD64461_PCCSCR_VCC1 0x02 /* voltage control pin 1 */
#define HD64461_PCCSCR_SWP 0x01 /* write protect */
/* PCC0 Output Pins Control Register */
#define HD64461_P0OCR HD64461_IO_OFFSET(0x202a)
/* PCC1 Output Pins Control Register */
#define HD64461_P1OCR HD64461_IO_OFFSET(0x202c)
/* PC Card General Control Register */
#define HD64461_PGCR HD64461_IO_OFFSET(0x202e)
/* Port Control Registers */
#define HD64461_GPACR HD64461_IO_OFFSET(0x4000) /* Port A - Handles IRDA/TIMER */
#define HD64461_GPBCR HD64461_IO_OFFSET(0x4002) /* Port B - Handles UART */
#define HD64461_GPCCR HD64461_IO_OFFSET(0x4004) /* Port C - Handles PCMCIA 1 */
#define HD64461_GPDCR HD64461_IO_OFFSET(0x4006) /* Port D - Handles PCMCIA 1 */
/* Port Control Data Registers */
#define HD64461_GPADR HD64461_IO_OFFSET(0x4010) /* A */
#define HD64461_GPBDR HD64461_IO_OFFSET(0x4012) /* B */
#define HD64461_GPCDR HD64461_IO_OFFSET(0x4014) /* C */
#define HD64461_GPDDR HD64461_IO_OFFSET(0x4016) /* D */
/* Interrupt Control Registers */
#define HD64461_GPAICR HD64461_IO_OFFSET(0x4020) /* A */
#define HD64461_GPBICR HD64461_IO_OFFSET(0x4022) /* B */
#define HD64461_GPCICR HD64461_IO_OFFSET(0x4024) /* C */
#define HD64461_GPDICR HD64461_IO_OFFSET(0x4026) /* D */
/* Interrupt Status Registers */
#define HD64461_GPAISR HD64461_IO_OFFSET(0x4040) /* A */
#define HD64461_GPBISR HD64461_IO_OFFSET(0x4042) /* B */
#define HD64461_GPCISR HD64461_IO_OFFSET(0x4044) /* C */
#define HD64461_GPDISR HD64461_IO_OFFSET(0x4046) /* D */
/* Interrupt Request Register & Interrupt Mask Register */
#define HD64461_NIRR HD64461_IO_OFFSET(0x5000)
#define HD64461_NIMR HD64461_IO_OFFSET(0x5002)
#define HD64461_IRQBASE OFFCHIP_IRQ_BASE
#define OFFCHIP_IRQ_BASE 64
#define HD64461_IRQ_NUM 16
#define HD64461_IRQ_UART (HD64461_IRQBASE+5)
#define HD64461_IRQ_IRDA (HD64461_IRQBASE+6)
#define HD64461_IRQ_TMU1 (HD64461_IRQBASE+9)
#define HD64461_IRQ_TMU0 (HD64461_IRQBASE+10)
#define HD64461_IRQ_GPIO (HD64461_IRQBASE+11)
#define HD64461_IRQ_AFE (HD64461_IRQBASE+12)
#define HD64461_IRQ_PCC1 (HD64461_IRQBASE+13)
#define HD64461_IRQ_PCC0 (HD64461_IRQBASE+14)
#define __IO_PREFIX hd64461
#include <asm/io_generic.h>
/* arch/sh/cchips/hd6446x/hd64461/setup.c */
void hd64461_register_irq_demux(int irq,
int (*demux) (int irq, void *dev), void *dev);
void hd64461_unregister_irq_demux(int irq);
#endif

View file

@ -0,0 +1,18 @@
#ifndef __ASM_SH_HEARTBEAT_H
#define __ASM_SH_HEARTBEAT_H
#include <linux/timer.h>
#define HEARTBEAT_INVERTED (1 << 0)
struct heartbeat_data {
void __iomem *base;
unsigned char *bit_pos;
unsigned int nr_bits;
struct timer_list timer;
unsigned int regsize;
unsigned int mask;
unsigned long flags;
};
#endif /* __ASM_SH_HEARTBEAT_H */

View file

@ -0,0 +1,99 @@
#ifndef _ASM_SH_HUGETLB_H
#define _ASM_SH_HUGETLB_H
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
clear_bit(PG_dcache_clean, &page->flags);
}
#endif /* _ASM_SH_HUGETLB_H */

View file

@ -0,0 +1,70 @@
#ifndef __ASM_SH_HW_BREAKPOINT_H
#define __ASM_SH_HW_BREAKPOINT_H
#include <uapi/asm/hw_breakpoint.h>
#define __ARCH_HW_BREAKPOINT_H
#include <linux/kdebug.h>
#include <linux/types.h>
struct arch_hw_breakpoint {
char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
u16 len;
u16 type;
};
enum {
SH_BREAKPOINT_READ = (1 << 1),
SH_BREAKPOINT_WRITE = (1 << 2),
SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
SH_BREAKPOINT_LEN_1 = (1 << 12),
SH_BREAKPOINT_LEN_2 = (1 << 13),
SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
SH_BREAKPOINT_LEN_8 = (1 << 14),
};
struct sh_ubc {
const char *name;
unsigned int num_events;
unsigned int trap_nr;
void (*enable)(struct arch_hw_breakpoint *, int);
void (*disable)(struct arch_hw_breakpoint *, int);
void (*enable_all)(unsigned long);
void (*disable_all)(void);
unsigned long (*active_mask)(void);
unsigned long (*triggered_mask)(void);
void (*clear_triggered_mask)(unsigned long);
struct clk *clk; /* optional interface clock / MSTP bit */
};
struct perf_event;
struct task_struct;
struct pmu;
/* Maximum number of UBC channels */
#define HBP_NUM 2
static inline int hw_breakpoint_slots(int type)
{
return HBP_NUM;
}
/* arch/sh/kernel/hw_breakpoint.c */
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void arch_fill_perf_breakpoint(struct perf_event *bp);
extern int register_sh_ubc(struct sh_ubc *);
extern struct pmu perf_ops_bp;
#endif /* __ASM_SH_HW_BREAKPOINT_H */

View file

@ -0,0 +1,35 @@
#ifndef __ASM_SH_HW_IRQ_H
#define __ASM_SH_HW_IRQ_H
#include <linux/init.h>
#include <linux/sh_intc.h>
#include <linux/atomic.h>
extern atomic_t irq_err_count;
struct ipr_data {
unsigned char irq;
unsigned char ipr_idx; /* Index for the IPR registered */
unsigned char shift; /* Number of bits to shift the data */
unsigned char priority; /* The priority */
};
struct ipr_desc {
unsigned long *ipr_offsets;
unsigned int nr_offsets;
struct ipr_data *ipr_data;
unsigned int nr_irqs;
struct irq_chip chip;
};
void register_ipr_controller(struct ipr_desc *);
void __init plat_irq_setup(void);
void __init plat_irq_setup_sh3(void);
void __init plat_irq_setup_pins(int mode);
enum { IRQ_MODE_IRQ, IRQ_MODE_IRQ7654, IRQ_MODE_IRQ3210,
IRQ_MODE_IRL7654_MASK, IRQ_MODE_IRL3210_MASK,
IRQ_MODE_IRL7654, IRQ_MODE_IRL3210 };
#endif /* __ASM_SH_HW_IRQ_H */

View file

@ -0,0 +1,20 @@
/*
* MMIO/IRQ and platform data for SH7760 I2C channels
*/
#ifndef _I2C_SH7760_H_
#define _I2C_SH7760_H_
#define SH7760_I2C_DEVNAME "sh7760-i2c"
#define SH7760_I2C0_MMIO 0xFE140000
#define SH7760_I2C0_MMIOEND 0xFE14003B
#define SH7760_I2C1_MMIO 0xFE150000
#define SH7760_I2C1_MMIOEND 0xFE15003B
struct sh7760_i2c_platdata {
unsigned int speed_khz;
};
#endif

390
arch/sh/include/asm/io.h Normal file
View file

@ -0,0 +1,390 @@
#ifndef __ASM_SH_IO_H
#define __ASM_SH_IO_H
/*
* Convention:
* read{b,w,l,q}/write{b,w,l,q} are for PCI,
* while in{b,w,l}/out{b,w,l} are for ISA
*
* In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
* and 'string' versions: ins{b,w,l}/outs{b,w,l}
*
* While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
* automatically, there are also __raw versions, which do not.
*/
#include <linux/errno.h>
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm-generic/iomap.h>
#ifdef __KERNEL__
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
#include <mach/mangle-port.h>
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
#define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
#define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
#define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; })
#define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; })
#define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; })
#define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c))
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c))
#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
#define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
#define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
#define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); })
#define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); })
#define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); })
#define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); })
#define readsb(p,d,l) __raw_readsb(p,d,l)
#define readsw(p,d,l) __raw_readsw(p,d,l)
#define readsl(p,d,l) __raw_readsl(p,d,l)
#define writesb(p,d,l) __raw_writesb(p,d,l)
#define writesw(p,d,l) __raw_writesw(p,d,l)
#define writesl(p,d,l) __raw_writesl(p,d,l)
#define __BUILD_UNCACHED_IO(bwlq, type) \
static inline type read##bwlq##_uncached(unsigned long addr) \
{ \
type ret; \
jump_to_uncached(); \
ret = __raw_read##bwlq(addr); \
back_to_cached(); \
return ret; \
} \
\
static inline void write##bwlq##_uncached(type v, unsigned long addr) \
{ \
jump_to_uncached(); \
__raw_write##bwlq(v, addr); \
back_to_cached(); \
}
__BUILD_UNCACHED_IO(b, u8)
__BUILD_UNCACHED_IO(w, u16)
__BUILD_UNCACHED_IO(l, u32)
__BUILD_UNCACHED_IO(q, u64)
#define __BUILD_MEMORY_STRING(pfx, bwlq, type) \
\
static inline void \
pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__raw_write##bwlq(*__addr, mem); \
__addr++; \
} \
} \
\
static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
void *addr, unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __raw_read##bwlq(mem); \
__addr++; \
} \
}
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#else
__BUILD_MEMORY_STRING(__raw_, l, u32)
#endif
__BUILD_MEMORY_STRING(__raw_, q, u64)
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Slowdown I/O port space accesses for antique hardware.
*/
#undef CONF_SLOWDOWN_IO
/*
* On SuperH I/O ports are memory mapped, so we access them using normal
* load/store instructions. sh_io_port_base is the virtual address to
* which all ports are being mapped.
*/
extern unsigned long sh_io_port_base;
static inline void __set_io_port_base(unsigned long pbase)
{
*(unsigned long *)&sh_io_port_base = pbase;
barrier();
}
#ifdef CONFIG_GENERIC_IOMAP
#define __ioport_map ioport_map
#else
extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
#endif
#ifdef CONF_SLOWDOWN_IO
#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
#else
#define SLOW_DOWN_IO
#endif
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
\
static inline void pfx##out##bwlq##p(type val, unsigned long port) \
{ \
volatile type *__addr; \
\
__addr = __ioport_map(port, sizeof(type)); \
*__addr = val; \
slow; \
} \
\
static inline type pfx##in##bwlq##p(unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
__addr = __ioport_map(port, sizeof(type)); \
__val = *__addr; \
slow; \
\
return __val; \
}
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(, bwlq, type)
BUILDIO_IOPORT(b, u8)
BUILDIO_IOPORT(w, u16)
BUILDIO_IOPORT(l, u32)
BUILDIO_IOPORT(q, u64)
#define __BUILD_IOPORT_STRING(bwlq, type) \
\
static inline void outs##bwlq(unsigned long port, const void *addr, \
unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
out##bwlq(*__addr, port); \
__addr++; \
} \
} \
\
static inline void ins##bwlq(unsigned long port, void *addr, \
unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = in##bwlq(port); \
__addr++; \
} \
}
__BUILD_IOPORT_STRING(b, u8)
__BUILD_IOPORT_STRING(w, u16)
__BUILD_IOPORT_STRING(l, u32)
__BUILD_IOPORT_STRING(q, u64)
#else /* !CONFIG_HAS_IOPORT_MAP */
#include <asm/io_noioport.h>
#endif
#define IO_SPACE_LIMIT 0xffffffff
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
/* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
void memset_io(volatile void __iomem *, int, unsigned long);
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long poke_real_address_q(unsigned long long addr,
unsigned long long val);
#if !defined(CONFIG_MMU)
#define virt_to_phys(address) ((unsigned long)(address))
#define phys_to_virt(address) ((void *)(address))
#else
#define virt_to_phys(address) (__pa(address))
#define phys_to_virt(address) (__va(address))
#endif
/*
* On 32-bit SH, we traditionally have the whole physical address space
* mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
* not need to do anything but place the address in the proper segment.
* This is true for P1 and P2 addresses, as well as some P3 ones.
* However, most of the P3 addresses and newer cores using extended
* addressing need to map through page tables, so the ioremap()
* implementation becomes a bit more complicated.
*
* See arch/sh/mm/ioremap.c for additional notes on this.
*
* We cheat a bit and always return uncachable areas until we've fixed
* the drivers to handle caching properly.
*
* On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr);
static inline void __iomem *
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
}
static inline void __iomem *
__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
#ifdef CONFIG_29BIT
phys_addr_t last_addr = offset + size - 1;
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
* In the P3 case or for addresses outside of the 29-bit space,
* mapping must be done by the PMB or by using page tables.
*/
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
u64 flags = pgprot_val(prot);
/*
* Anything using the legacy PTEA space attributes needs
* to be kicked down to page table mappings.
*/
if (unlikely(flags & _PAGE_PCC_MASK))
return NULL;
if (unlikely(flags & _PAGE_CACHABLE))
return (void __iomem *)P1SEGADDR(offset);
return (void __iomem *)P2SEGADDR(offset);
}
/* P4 above the store queues are always mapped. */
if (unlikely(offset >= P3_ADDR_MAX))
return (void __iomem *)P4SEGADDR(offset);
#endif
return NULL;
}
static inline void __iomem *
__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
void __iomem *ret;
ret = __ioremap_trapped(offset, size);
if (ret)
return ret;
ret = __ioremap_29bit(offset, size, prot);
if (ret)
return ret;
return __ioremap(offset, size, prot);
}
#else
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}
static inline void __iomem *
ioremap_cache(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL);
}
#ifdef CONFIG_HAVE_IOREMAP_PROT
static inline void __iomem *
ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
{
return __ioremap_mode(offset, size, __pgprot(flags));
}
#endif
#ifdef CONFIG_IOREMAP_FIXED
extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
BUG();
return NULL;
}
static inline void ioremap_fixed_init(void) { }
static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#endif
#define ioremap_nocache ioremap
#define iounmap __iounmap
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(phys_addr_t addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_IO_H */

View file

@ -0,0 +1,18 @@
/*
* Trivial I/O routine definitions, intentionally meant to be included
* multiple times. Ugly I/O routine concatenation helpers taken from
* alpha. Must be included _before_ io.h to avoid preprocessor-induced
* routine mismatch.
*/
#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
#define _IO_CONCAT(a,b) a ## _ ## b
#ifndef __IO_PREFIX
#error "Don't include this header without a valid system prefix"
#endif
void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size);
void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr);
void IO_CONCAT(__IO_PREFIX,mem_init)(void);
#undef __IO_PREFIX

View file

@ -0,0 +1,63 @@
#ifndef __ASM_SH_IO_NOIOPORT_H
#define __ASM_SH_IO_NOIOPORT_H
static inline u8 inb(unsigned long addr)
{
BUG();
return -1;
}
static inline u16 inw(unsigned long addr)
{
BUG();
return -1;
}
static inline u32 inl(unsigned long addr)
{
BUG();
return -1;
}
static inline void outb(unsigned char x, unsigned long port)
{
BUG();
}
static inline void outw(unsigned short x, unsigned long port)
{
BUG();
}
static inline void outl(unsigned int x, unsigned long port)
{
BUG();
}
static inline void __iomem *ioport_map(unsigned long port, unsigned int size)
{
BUG();
return NULL;
}
static inline void ioport_unmap(void __iomem *addr)
{
BUG();
}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
#define insb(a, b, c) BUG()
#define insw(a, b, c) BUG()
#define insl(a, b, c) BUG()
#define outsb(a, b, c) BUG()
#define outsw(a, b, c) BUG()
#define outsl(a, b, c) BUG()
#endif /* __ASM_SH_IO_NOIOPORT_H */

View file

@ -0,0 +1,58 @@
#ifndef __ASM_SH_IO_TRAPPED_H
#define __ASM_SH_IO_TRAPPED_H
#include <linux/list.h>
#include <linux/ioport.h>
#include <asm/page.h>
#define IO_TRAPPED_MAGIC 0xfeedbeef
struct trapped_io {
unsigned int magic;
struct resource *resource;
unsigned int num_resources;
unsigned int minimum_bus_width;
struct list_head list;
void __iomem *virt_base;
} __aligned(PAGE_SIZE);
#ifdef CONFIG_IO_TRAPPED
int register_trapped_io(struct trapped_io *tiop);
int handle_trapped_io(struct pt_regs *regs, unsigned long address);
void __iomem *match_trapped_io_handler(struct list_head *list,
unsigned long offset,
unsigned long size);
#ifdef CONFIG_HAS_IOMEM
extern struct list_head trapped_mem;
static inline void __iomem *
__ioremap_trapped(unsigned long offset, unsigned long size)
{
return match_trapped_io_handler(&trapped_mem, offset, size);
}
#else
#define __ioremap_trapped(offset, size) NULL
#endif
#ifdef CONFIG_HAS_IOPORT_MAP
extern struct list_head trapped_io;
static inline void __iomem *
__ioport_map_trapped(unsigned long offset, unsigned long size)
{
return match_trapped_io_handler(&trapped_io, offset, size);
}
#else
#define __ioport_map_trapped(offset, size) NULL
#endif
#else
#define register_trapped_io(tiop) (-1)
#define handle_trapped_io(tiop, address) 0
#define __ioremap_trapped(offset, size) NULL
#define __ioport_map_trapped(offset, size) NULL
#endif
#endif /* __ASM_SH_IO_TRAPPED_H */

72
arch/sh/include/asm/irq.h Normal file
View file

@ -0,0 +1,72 @@
#ifndef __ASM_SH_IRQ_H
#define __ASM_SH_IRQ_H
#include <linux/cpumask.h>
#include <asm/machvec.h>
/*
* Only legacy non-sparseirq platforms have to set a reasonably sane
* value here. sparseirq platforms allocate their irq_descs on the fly,
* so will expand automatically based on the number of registered IRQs.
*/
#ifdef CONFIG_SPARSE_IRQ
# define NR_IRQS 8
#else
# define NR_IRQS 512
#endif
/*
* This is a special IRQ number for indicating that no IRQ has been
* triggered and to simply ignore the IRQ dispatch. This is a special
* case that can happen with IRQ auto-distribution when multiple CPUs
* are woken up and signalled in parallel.
*/
#define NO_IRQ_IGNORE ((unsigned int)-1)
/*
* Simple Mask Register Support
*/
extern void make_maskreg_irq(unsigned int irq);
extern unsigned short *irq_mask_register;
/*
* PINT IRQs
*/
void init_IRQ_pint(void);
void make_imask_irq(unsigned int irq);
static inline int generic_irq_demux(int irq)
{
return irq;
}
#define irq_demux(irq) sh_mv.mv_irq_demux(irq)
void init_IRQ(void);
void migrate_irqs(void);
asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu);
# define __ARCH_HAS_DO_SOFTIRQ
#else
# define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0)
#endif
#ifdef CONFIG_INTC_BALANCING
extern unsigned int irq_lookup(unsigned int irq);
extern void irq_finish(unsigned int irq);
#else
#define irq_lookup(irq) (irq)
#define irq_finish(irq) do { } while (0)
#endif
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
#endif
#endif /* __ASM_SH_IRQ_H */

View file

@ -0,0 +1,9 @@
#ifndef __ASM_SH_IRQFLAGS_H
#define __ASM_SH_IRQFLAGS_H
#define ARCH_IRQ_DISABLED 0xf0
#define ARCH_IRQ_ENABLED 0x00
#include <asm-generic/irqflags.h>
#endif /* __ASM_SH_IRQFLAGS_H */

View file

@ -0,0 +1,17 @@
#ifndef __ASM_SH_KDEBUG_H
#define __ASM_SH_KDEBUG_H
/* Grossly misnamed. */
enum die_val {
DIE_TRAP,
DIE_NMI,
DIE_OOPS,
DIE_BREAKPOINT,
DIE_SSTEP,
};
/* arch/sh/kernel/dumpstack.c */
extern void printk_address(unsigned long address, int reliable);
extern void dump_mem(const char *str, unsigned long bottom, unsigned long top);
#endif /* __ASM_SH_KDEBUG_H */

View file

@ -0,0 +1,70 @@
#ifndef __ASM_SH_KEXEC_H
#define __ASM_SH_KEXEC_H
#include <asm/ptrace.h>
#include <asm/string.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
* and kmap is not required.
*
* Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
* calculation for the amount of memory directly mappable into the
* kernel memory space.
*/
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
#ifdef CONFIG_KEXEC
/* arch/sh/kernel/machine_kexec.c */
void reserve_crashkernel(void);
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else {
__asm__ __volatile__ ("mov r0, %0" : "=r" (newregs->regs[0]));
__asm__ __volatile__ ("mov r1, %0" : "=r" (newregs->regs[1]));
__asm__ __volatile__ ("mov r2, %0" : "=r" (newregs->regs[2]));
__asm__ __volatile__ ("mov r3, %0" : "=r" (newregs->regs[3]));
__asm__ __volatile__ ("mov r4, %0" : "=r" (newregs->regs[4]));
__asm__ __volatile__ ("mov r5, %0" : "=r" (newregs->regs[5]));
__asm__ __volatile__ ("mov r6, %0" : "=r" (newregs->regs[6]));
__asm__ __volatile__ ("mov r7, %0" : "=r" (newregs->regs[7]));
__asm__ __volatile__ ("mov r8, %0" : "=r" (newregs->regs[8]));
__asm__ __volatile__ ("mov r9, %0" : "=r" (newregs->regs[9]));
__asm__ __volatile__ ("mov r10, %0" : "=r" (newregs->regs[10]));
__asm__ __volatile__ ("mov r11, %0" : "=r" (newregs->regs[11]));
__asm__ __volatile__ ("mov r12, %0" : "=r" (newregs->regs[12]));
__asm__ __volatile__ ("mov r13, %0" : "=r" (newregs->regs[13]));
__asm__ __volatile__ ("mov r14, %0" : "=r" (newregs->regs[14]));
__asm__ __volatile__ ("mov r15, %0" : "=r" (newregs->regs[15]));
__asm__ __volatile__ ("sts pr, %0" : "=r" (newregs->pr));
__asm__ __volatile__ ("sts macl, %0" : "=r" (newregs->macl));
__asm__ __volatile__ ("sts mach, %0" : "=r" (newregs->mach));
__asm__ __volatile__ ("stc gbr, %0" : "=r" (newregs->gbr));
__asm__ __volatile__ ("stc sr, %0" : "=r" (newregs->sr));
newregs->pc = (unsigned long)current_text_addr();
}
}
#else
static inline void reserve_crashkernel(void) { }
#endif /* CONFIG_KEXEC */
#endif /* __ASM_SH_KEXEC_H */

View file

@ -0,0 +1,37 @@
#ifndef __ASM_SH_KGDB_H
#define __ASM_SH_KGDB_H
#include <asm/cacheflush.h>
#include <asm/ptrace.h>
enum regnames {
GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7,
GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15,
GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR,
};
#define _GP_REGS 16
#define _EXTRA_REGS 7
#define GDB_SIZEOF_REG sizeof(u32)
#define DBG_MAX_REG_NUM (_GP_REGS + _EXTRA_REGS)
#define NUMREGBYTES (DBG_MAX_REG_NUM * sizeof(GDB_SIZEOF_REG))
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ("trapa #0x3c\n");
}
#define BREAK_INSTR_SIZE 2
#define BUFMAX 2048
#ifdef CONFIG_SMP
# define CACHE_FLUSH_IS_SAFE 0
#else
# define CACHE_FLUSH_IS_SAFE 1
#endif
#define GDB_ADJUSTS_BREAK_OFFSET
#endif /* __ASM_SH_KGDB_H */

View file

@ -0,0 +1,14 @@
#ifndef __SH_KMAP_TYPES_H
#define __SH_KMAP_TYPES_H
/* Dummy header just to define km_type. */
#ifdef CONFIG_DEBUG_HIGHMEM
#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
#undef __WITH_KM_FENCE
#endif

View file

@ -0,0 +1,57 @@
#ifndef __ASM_SH_KPROBES_H
#define __ASM_SH_KPROBES_H
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
typedef insn_size_t kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xc33a
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
struct kprobe;
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
void jprobe_return_end(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long jprobe_saved_r15;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_handle_illslot(unsigned long pc);
#else
#define kprobe_handle_illslot(pc) (-1)
#endif /* CONFIG_KPROBES */
#endif /* __ASM_SH_KPROBES_H */

View file

@ -0,0 +1,7 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .balign 4
#define __ALIGN_STR ".balign 4"
#endif

View file

@ -0,0 +1,43 @@
/*
* include/asm-sh/machvec.h
*
* Copyright 2000 Stuart Menefy (stuart.menefy@st.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#ifndef _ASM_SH_MACHVEC_H
#define _ASM_SH_MACHVEC_H
#include <linux/types.h>
#include <linux/time.h>
#include <generated/machtypes.h>
struct sh_machine_vector {
void (*mv_setup)(char **cmdline_p);
const char *mv_name;
int (*mv_irq_demux)(int irq);
void (*mv_init_irq)(void);
#ifdef CONFIG_HAS_IOPORT_MAP
void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
void (*mv_ioport_unmap)(void __iomem *);
#endif
int (*mv_clk_init)(void);
int (*mv_mode_pins)(void);
void (*mv_mem_init)(void);
void (*mv_mem_reserve)(void);
};
extern struct sh_machine_vector sh_mv;
#define get_system_type() sh_mv.mv_name
#define __initmv \
__used __section(.machvec.init)
#endif /* _ASM_SH_MACHVEC_H */

View file

@ -0,0 +1,7 @@
/*
* Machine dependent access functions for RTC registers.
*/
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
#endif /* _ASM_MC146818RTC_H */

106
arch/sh/include/asm/mmu.h Normal file
View file

@ -0,0 +1,106 @@
#ifndef __MMU_H
#define __MMU_H
/*
* Privileged Space Mapping Buffer (PMB) definitions
*/
#define PMB_PASCR 0xff000070
#define PMB_IRMCR 0xff000078
#define PASCR_SE 0x80000000
#define PMB_ADDR 0xf6100000
#define PMB_DATA 0xf7100000
#define NR_PMB_ENTRIES 16
#define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8
#define PMB_PFN_MASK 0xff000000
#define PMB_SZ_16M 0x00000000
#define PMB_SZ_64M 0x00000010
#define PMB_SZ_128M 0x00000080
#define PMB_SZ_512M 0x00000090
#define PMB_SZ_MASK PMB_SZ_512M
#define PMB_C 0x00000008
#define PMB_WT 0x00000001
#define PMB_UB 0x00000200
#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB)
#define PMB_V 0x00000100
#define PMB_NO_ENTRY (-1)
#ifndef __ASSEMBLY__
#include <linux/errno.h>
#include <linux/threads.h>
#include <asm/page.h>
/* Default "unsigned long" context */
typedef unsigned long mm_context_id_t[NR_CPUS];
typedef struct {
#ifdef CONFIG_MMU
mm_context_id_t id;
void *vdso;
#else
unsigned long end_brk;
#endif
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
} mm_context_t;
#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
bool __in_29bit_mode(void);
void pmb_init(void);
int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
unsigned long size, pgprot_t prot);
void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
pgprot_t prot, void *caller);
int pmb_unmap(void __iomem *addr);
#else
static inline int
pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
unsigned long size, pgprot_t prot)
{
return -EINVAL;
}
static inline void __iomem *
pmb_remap_caller(phys_addr_t phys, unsigned long size,
pgprot_t prot, void *caller)
{
return NULL;
}
static inline int pmb_unmap(void __iomem *addr)
{
return -EINVAL;
}
#define pmb_init(addr) do { } while (0)
#ifdef CONFIG_29BIT
#define __in_29bit_mode() (1)
#else
#define __in_29bit_mode() (0)
#endif
#endif /* CONFIG_PMB */
static inline void __iomem *
pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
{
return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
}
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */

View file

@ -0,0 +1,190 @@
/*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 2007 Paul Mundt
*
* ASID handling idea taken from MIPS implementation.
*/
#ifndef __ASM_SH_MMU_CONTEXT_H
#define __ASM_SH_MMU_CONTEXT_H
#ifdef __KERNEL__
#include <cpu/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm-generic/mm_hooks.h>
/*
* The MMU "context" consists of two things:
* (a) TLB cache version (or round, cycle whatever expression you like)
* (b) ASID (Address Space IDentifier)
*/
#ifdef CONFIG_CPU_HAS_PTEAEX
#define MMU_CONTEXT_ASID_MASK 0x0000ffff
#else
#define MMU_CONTEXT_ASID_MASK 0x000000ff
#endif
#define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK)
#define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1)
/* Impossible ASID value, to differentiate from NO_CONTEXT. */
#define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION
#define NO_CONTEXT 0UL
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#ifdef CONFIG_MMU
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
/*
* Virtual Page Number mask
*/
#define MMU_VPN_MASK 0xfffff000
#if defined(CONFIG_SUPERH32)
#include <asm/mmu_context_32.h>
#else
#include <asm/mmu_context_64.h>
#endif
/*
* Get MMU context if needed.
*/
static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
unsigned long asid = asid_cache(cpu);
/* Check if we have old version of context. */
if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
/* It's old, we need to get new context with new version. */
if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
/*
* We exhaust ASID of this version.
* Flush all TLB and start new cycle.
*/
local_flush_tlb_all();
#ifdef CONFIG_SUPERH64
/*
* The SH-5 cache uses the ASIDs, requiring both the I and D
* cache to be flushed when the ASID is exhausted. Weak.
*/
flush_cache_all();
#endif
/*
* Fix version; Note that we avoid version #0
* to distinguish NO_CONTEXT.
*/
if (!asid)
asid = MMU_CONTEXT_FIRST_VERSION;
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
int i;
for (i = 0; i < num_online_cpus(); i++)
cpu_context(i, mm) = NO_CONTEXT;
return 0;
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
{
get_mmu_context(mm, cpu);
set_asid(cpu_asid(cpu, mm));
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
if (likely(prev != next)) {
cpumask_set_cpu(cpu, mm_cpumask(next));
set_TTB(next->pgd);
activate_context(next, cpu);
} else
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
activate_context(next, cpu);
}
#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
#define deactivate_mm(tsk,mm) do { } while (0)
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#else
#define set_asid(asid) do { } while (0)
#define get_asid() (0)
#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
#define switch_and_save_asid(asid) (0)
#define set_TTB(pgd) do { } while (0)
#define get_TTB() (0)
#include <asm-generic/mmu_context.h>
#endif /* CONFIG_MMU */
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
/*
* If this processor has an MMU, we need methods to turn it off/on ..
* paging_init() will also have to be updated for the processor in
* question.
*/
static inline void enable_mmu(void)
{
unsigned int cpu = smp_processor_id();
/* Enable MMU */
__raw_writel(MMU_CONTROL_INIT, MMUCR);
ctrl_barrier();
if (asid_cache(cpu) == NO_CONTEXT)
asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
}
static inline void disable_mmu(void)
{
unsigned long cr;
cr = __raw_readl(MMUCR);
cr &= ~MMU_CONTROL_INIT;
__raw_writel(cr, MMUCR);
ctrl_barrier();
}
#else
/*
* MMU control handlers for processors lacking memory
* management hardware.
*/
#define enable_mmu() do { } while (0)
#define disable_mmu() do { } while (0)
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMU_CONTEXT_H */

View file

@ -0,0 +1,59 @@
#ifndef __ASM_SH_MMU_CONTEXT_32_H
#define __ASM_SH_MMU_CONTEXT_32_H
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Do nothing */
}
#ifdef CONFIG_CPU_HAS_PTEAEX
static inline void set_asid(unsigned long asid)
{
__raw_writel(asid, MMU_PTEAEX);
}
static inline unsigned long get_asid(void)
{
return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
}
#else
static inline void set_asid(unsigned long asid)
{
unsigned long __dummy;
__asm__ __volatile__ ("mov.l %2, %0\n\t"
"and %3, %0\n\t"
"or %1, %0\n\t"
"mov.l %0, %2"
: "=&r" (__dummy)
: "r" (asid), "m" (__m(MMU_PTEH)),
"r" (0xffffff00));
}
static inline unsigned long get_asid(void)
{
unsigned long asid;
__asm__ __volatile__ ("mov.l %1, %0"
: "=r" (asid)
: "m" (__m(MMU_PTEH)));
asid &= MMU_CONTEXT_ASID_MASK;
return asid;
}
#endif /* CONFIG_CPU_HAS_PTEAEX */
/* MMU_TTB is used for optimizing the fault handling. */
static inline void set_TTB(pgd_t *pgd)
{
__raw_writel((unsigned long)pgd, MMU_TTB);
}
static inline pgd_t *get_TTB(void)
{
return (pgd_t *)__raw_readl(MMU_TTB);
}
#endif /* __ASM_SH_MMU_CONTEXT_32_H */

View file

@ -0,0 +1,78 @@
#ifndef __ASM_SH_MMU_CONTEXT_64_H
#define __ASM_SH_MMU_CONTEXT_64_H
/*
* sh64-specific mmu_context interface.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <cpu/registers.h>
#include <asm/cacheflush.h>
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Well, at least free TLB entries */
flush_tlb_mm(mm);
}
static inline unsigned long get_asid(void)
{
unsigned long long sr;
asm volatile ("getcon " __SR ", %0\n\t"
: "=r" (sr));
sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
return (unsigned long) sr;
}
/* Set ASID into SR */
static inline void set_asid(unsigned long asid)
{
unsigned long long sr, pc;
asm volatile ("getcon " __SR ", %0" : "=r" (sr));
sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the
* gas trick of generating symbols using numerics and forward
* reference.
*/
asm volatile ("movi 1, %1\n\t"
"shlli %1, 28, %1\n\t"
"or %0, %1, %1\n\t"
"putcon %1, " __SR "\n\t"
"putcon %0, " __SSR "\n\t"
"movi 1f, %1\n\t"
"ori %1, 1 , %1\n\t"
"putcon %1, " __SPC "\n\t"
"rte\n"
"1:\n\t"
: "=r" (sr), "=r" (pc) : "0" (sr));
}
/* arch/sh/kernel/cpu/sh5/entry.S */
extern unsigned long switch_and_save_asid(unsigned long new_asid);
/* No spare register to twiddle, so use a software cache */
extern pgd_t *mmu_pdtp_cache;
#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
#define get_TTB() (mmu_pdtp_cache)
#endif /* __ASM_SH_MMU_CONTEXT_64_H */

View file

@ -0,0 +1,47 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
#ifdef __KERNEL__
#ifdef CONFIG_NEED_MULTIPLE_NODES
#include <linux/numa.h>
extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
static inline int pfn_to_nid(unsigned long pfn)
{
int nid;
for (nid = 0; nid < MAX_NUMNODES; nid++)
if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
break;
return nid;
}
static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
{
return NODE_DATA(pfn_to_nid(pfn));
}
/* arch/sh/mm/numa.c */
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end);
#else
static inline void
setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_NEED_MULTIPLE_NODES */
/* Platform specific mem init */
void __init plat_mem_setup(void);
/* arch/sh/kernel/setup.c */
void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
/* arch/sh/mm/init.c */
void __init allocate_pgdat(unsigned int nid);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMZONE_H */

View file

@ -0,0 +1,41 @@
#ifndef _ASM_SH_MODULE_H
#define _ASM_SH_MODULE_H
#include <asm-generic/module.h>
#ifdef CONFIG_DWARF_UNWINDER
struct mod_arch_specific {
struct list_head fde_list;
struct list_head cie_list;
};
#endif
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# ifdef CONFIG_CPU_SH2
# define MODULE_PROC_FAMILY "SH2LE "
# elif defined CONFIG_CPU_SH3
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
#else
# ifdef CONFIG_CPU_SH2
# define MODULE_PROC_FAMILY "SH2BE "
# elif defined CONFIG_CPU_SH3
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif
#endif
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#endif /* _ASM_SH_MODULE_H */

View file

@ -0,0 +1,109 @@
/*
* arch/sh/include/asm/mutex-llsc.h
*
* SH-4A optimized mutex locking primitives
*
* Please look into asm-generic/mutex-xchg.h for a formal definition.
*/
#ifndef __ASM_SH_MUTEX_LLSC_H
#define __ASM_SH_MUTEX_LLSC_H
/*
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
* with a bastardized atomic decrement (it is not a reliable atomic decrement
* but it satisfies the defined semantics for our purpose, while being
* smaller and faster than a real atomic decrement or atomic swap.
* The idea is to attempt decrementing the lock value only once. If once
* decremented it isn't zero, or if its store-back fails due to a dispute
* on the exclusive store, we simply bail out immediately through the slow
* path where the lock will be reattempted until it succeeds.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
fail_fn(count);
}
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
__res = -1;
return __res;
}
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%2 \n\t"
"movt %1 \n\t"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res <= 0))
fail_fn(count);
}
/*
* If the unlock was done on a contended lock, or if the unlock simply fails
* then the mutex remains locked.
*/
#define __mutex_slowpath_needs_to_unlock() 1
/*
* For __mutex_fastpath_trylock we do an atomic decrement and check the
* result and put it in the __res variable.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int __res, __orig;
__asm__ __volatile__ (
"1: movli.l @%2, %0 \n\t"
"dt %0 \n\t"
"movco.l %0,@%2 \n\t"
"bf 1b \n\t"
"cmp/eq #0,%0 \n\t"
"bt 2f \n\t"
"mov #0, %1 \n\t"
"bf 3f \n\t"
"2: mov #1, %1 \n\t"
"3: "
: "=&z" (__orig), "=&r" (__res)
: "r" (&count->counter)
: "t");
return __res;
}
#endif /* __ASM_SH_MUTEX_LLSC_H */

View file

@ -0,0 +1,12 @@
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#if defined(CONFIG_CPU_SH4A)
#include <asm/mutex-llsc.h>
#else
#include <asm-generic/mutex-dec.h>
#endif

206
arch/sh/include/asm/page.h Normal file
View file

@ -0,0 +1,206 @@
#ifndef __ASM_SH_PAGE_H
#define __ASM_SH_PAGE_H
/*
* Copyright (C) 1999 Niibe Yutaka
*/
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
#elif defined(CONFIG_PAGE_SIZE_8KB)
# define PAGE_SHIFT 13
#elif defined(CONFIG_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14
#elif defined(CONFIG_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16
#else
# error "Bogus kernel page size?"
#endif
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PTE_MASK PAGE_MASK
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
#define HPAGE_SHIFT 18
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define HPAGE_SHIFT 20
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE-1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
#endif
#ifndef __ASSEMBLY__
#include <asm/uncached.h>
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end, memory_limit;
static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
}
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
struct page;
struct vm_area_struct;
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage clear_user_highpage
/*
* These are used to make use of C type-checking..
*/
#ifdef CONFIG_X2TLB
typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
typedef struct { unsigned long long pgd; } pgd_t;
#define pte_val(x) \
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#elif defined(CONFIG_SUPERH32)
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#else
typedef struct { unsigned long long pte_low; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
typedef struct page *pgtable_t;
#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
#endif /* !__ASSEMBLY__ */
/*
* __MEMORY_START and SIZE are the physical addresses and size of RAM.
*/
#define __MEMORY_START CONFIG_MEMORY_START
#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
/*
* PHYSICAL_OFFSET is the offset in physical memory where the base
* of the kernel is loaded.
*/
#ifdef CONFIG_PHYSICAL_START
#define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START)
#else
#define PHYSICAL_OFFSET 0
#endif
/*
* PAGE_OFFSET is the virtual address of the start of kernel address
* space.
*/
#define PAGE_OFFSET CONFIG_PAGE_OFFSET
/*
* Virtual to physical RAM address translation.
*
* In 29 bit mode, the physical offset of RAM from address 0 is visible in
* the kernel virtual address space, and thus we don't have to take
* this into account when translating. However in 32 bit mode this offset
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
#ifdef CONFIG_PMB
#define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START)
#define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START)
#else
#define ___pa(x) ((x)-PAGE_OFFSET)
#define ___va(x) ((x)+PAGE_OFFSET)
#endif
#ifndef __ASSEMBLY__
#define __pa(x) ___pa((unsigned long)x)
#define __va(x) (void *)___va((unsigned long)x)
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_UNCACHED_MAPPING
#if defined(CONFIG_29BIT)
#define UNCAC_ADDR(addr) P2SEGADDR(addr)
#define CAC_ADDR(addr) P1SEGADDR(addr)
#else
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
#endif
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* PFN = physical frame number (ie PFN 0 == physical address 0)
* PFN_START is the PFN of the first page of RAM. By defining this we
* don't have struct page entries for the portion of address space
* between physical address 0 and the start of RAM.
*/
#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
#define ARCH_PFN_OFFSET (PFN_START)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
#endif
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
/*
* Some drivers need to perform DMA into kmalloc'ed buffers
* and so we have to increase the kmalloc minalign for this.
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64
/*
* While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
* happily generate {ld/st}.q pairs, requiring us to have 8-byte
* alignment to avoid traps. The kmalloc alignment is guaranteed by
* virtue of L1_CACHE_BYTES, requiring this to only be special cased
* for slab caches.
*/
#define ARCH_SLAB_MINALIGN 8
#endif
#endif /* __ASM_SH_PAGE_H */

131
arch/sh/include/asm/pci.h Normal file
View file

@ -0,0 +1,131 @@
#ifndef __ASM_SH_PCI_H
#define __ASM_SH_PCI_H
#ifdef __KERNEL__
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
#define pcibios_assign_all_busses() 1
/*
* A board can define one or more PCI channels that represent built-in (or
* external) PCI controllers.
*/
struct pci_channel {
struct pci_channel *next;
struct pci_bus *bus;
struct pci_ops *pci_ops;
struct resource *resources;
unsigned int nr_resources;
unsigned long io_offset;
unsigned long mem_offset;
unsigned long reg_base;
unsigned long io_map_base;
unsigned int index;
unsigned int need_domain_info;
/* Optional error handling */
struct timer_list err_timer, serr_timer;
unsigned int err_irq, serr_irq;
};
/* arch/sh/drivers/pci/pci.c */
extern raw_spinlock_t pci_config_lock;
extern int register_pci_controller(struct pci_channel *hose);
extern void pcibios_report_status(unsigned int status_mask, int warn);
/* arch/sh/drivers/pci/common.c */
extern int early_read_config_byte(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u8 *value);
extern int early_read_config_word(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u16 *value);
extern int early_read_config_dword(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u32 *value);
extern int early_write_config_byte(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u8 value);
extern int early_write_config_word(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u16 value);
extern int early_write_config_dword(struct pci_channel *hose, int top_bus,
int bus, int devfn, int offset, u32 value);
extern void pcibios_enable_timers(struct pci_channel *hose);
extern unsigned int pcibios_handle_status_errors(unsigned long addr,
unsigned int status, struct pci_channel *hose);
extern int pci_is_66mhz_capable(struct pci_channel *hose,
int top_bus, int current_bus);
extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev;
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
extern void pcibios_set_master(struct pci_dev *dev);
/* Dynamic DMA mapping stuff.
* SuperH has everything mapped statically like x86.
*/
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
#ifdef CONFIG_PCI
/*
* None of the SH PCI controllers support MWI, it is always treated as a
* direct memory write.
*/
#define PCI_DISABLE_MWI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = L1_CACHE_BYTES;
else
cacheline_size = byte << 2;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#endif
/* Board-specific fixup routines. */
int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin);
#define pci_domain_nr(bus) ((struct pci_channel *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
{
struct pci_channel *hose = bus->sysdata;
return hose->need_domain_info;
}
/* Chances are this interrupt is wired PC-style ... */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
/* generic DMA-mapping stuff */
#include <asm-generic/pci-dma-compat.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PCI_H */

View file

@ -0,0 +1,29 @@
#ifndef __ASM_SH_PERF_EVENT_H
#define __ASM_SH_PERF_EVENT_H
struct hw_perf_event;
#define MAX_HWEVENTS 2
struct sh_pmu {
const char *name;
unsigned int num_events;
void (*disable_all)(void);
void (*enable_all)(void);
void (*enable)(struct hw_perf_event *, int);
void (*disable)(struct hw_perf_event *, int);
u64 (*read)(int);
int (*event_map)(int);
unsigned int max_events;
unsigned long raw_event_mask;
const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
/* arch/sh/kernel/perf_event.c */
extern int register_sh_pmu(struct sh_pmu *);
extern int reserve_pmc_hardware(void);
extern void release_pmc_hardware(void);
#endif /* __ASM_SH_PERF_EVENT_H */

View file

@ -0,0 +1,79 @@
#ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H
#include <linux/quicklist.h>
#include <asm/page.h>
#define QUICK_PT 0 /* Other page table pages that are zero on free */
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
#if PAGETABLE_LEVELS > 2
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte)
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
*/
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page;
void *pg;
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
if (!pgtable_page_ctor(page)) {
quicklist_free(QUICK_PT, NULL, pg);
return NULL;
}
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(QUICK_PT, NULL, pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
quicklist_free_page(QUICK_PT, NULL, pte);
}
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PT, NULL, 25, 16);
}
#endif /* __ASM_SH_PGALLOC_H */

View file

@ -0,0 +1,23 @@
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
#define __ASM_SH_PGTABLE_2LEVEL_H
#include <asm-generic/pgtable-nopmd.h>
/*
* traditional two-level paging structure
*/
#define PAGETABLE_LEVELS 2
/* PTE bits */
#define PTE_MAGNITUDE 2 /* 32-bit PTEs */
#define PTE_SHIFT PAGE_SHIFT
#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
/* PGD bits */
#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE))
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#endif /* __ASM_SH_PGTABLE_2LEVEL_H */

View file

@ -0,0 +1,56 @@
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
#define __ASM_SH_PGTABLE_3LEVEL_H
#include <asm-generic/pgtable-nopud.h>
/*
* Some cores need a 3-level page table layout, for example when using
* 64-bit PTEs and 4K pages.
*/
#define PAGETABLE_LEVELS 3
#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */
/* PGD bits */
#define PGDIR_SHIFT 30
#define PTRS_PER_PGD 4
#define USER_PTRS_PER_PGD 2
/* PMD bits */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
typedef struct { unsigned long long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return pud_val(pud);
}
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
#define pud_none(x) (!pud_val(x))
#define pud_present(x) (pud_val(x))
#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0)
#define pud_bad(x) (pud_val(x) & ~PAGE_MASK)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
#endif /* __ASM_SH_PGTABLE_3LEVEL_H */

View file

@ -0,0 +1,163 @@
/*
* This file contains the functions and defines necessary to modify and
* use the SuperH page table tree.
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*/
#ifndef __ASM_SH_PGTABLE_H
#define __ASM_SH_PGTABLE_H
#ifdef CONFIG_X2TLB
#include <asm/pgtable-3level.h>
#else
#include <asm/pgtable-2level.h>
#endif
#include <asm/page.h>
#include <asm/mmu.h>
#ifndef __ASSEMBLY__
#include <asm/addrspace.h>
#include <asm/fixmap.h>
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
/*
* Effective and physical address definitions, to aid with sign
* extension.
*/
#define NEFF 32
#define NEFF_SIGN (1LL << (NEFF - 1))
#define NEFF_MASK (-1LL << NEFF)
static inline unsigned long long neff_sign_extend(unsigned long val)
{
unsigned long long extended = val;
return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended;
}
#ifdef CONFIG_29BIT
#define NPHYS 29
#else
#define NPHYS 32
#endif
#define NPHYS_SIGN (1LL << (NPHYS - 1))
#define NPHYS_MASK (-1LL << NPHYS)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
#define FIRST_USER_ADDRESS 0
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
static inline unsigned long phys_addr_mask(void)
{
/* Is the MMU in 29bit mode? */
if (__in_29bit_mode())
return PHYS_ADDR_MASK29;
return PHYS_ADDR_MASK32;
}
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
#ifdef CONFIG_SUPERH32
#define VMALLOC_START (P3SEG)
#else
#define VMALLOC_START (0xf0000000)
#endif
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#if defined(CONFIG_SUPERH32)
#include <asm/pgtable_32.h>
#else
#include <asm/pgtable_64.h>
#endif
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
* protection for execute, and considers it the same as a read. Also, write
* permission implies read permission. This is the closest we can get..
*
* SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
* not only supporting separate execute, read, and write bits, but having
* completely separate permission bits for user and kernel space.
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_EXECREAD
#define __P101 PAGE_EXECREAD
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_WRITEONLY
#define __S011 PAGE_SHARED
#define __S100 PAGE_EXECREAD
#define __S101 PAGE_EXECREAD
#define __S110 PAGE_RWX
#define __S111 PAGE_RWX
typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
/*
* Initialise the page table caches
*/
extern void pgtable_cache_init(void);
struct vm_area_struct;
struct mm_struct;
extern void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_cache(vma, address, pte);
__update_tlb(vma, address, pte);
}
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd);
/* arch/sh/mm/mmap.c */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTE_SPECIAL
#include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */

View file

@ -0,0 +1,498 @@
#ifndef __ASM_SH_PGTABLE_32_H
#define __ASM_SH_PGTABLE_32_H
/*
* Linux PTEL encoding.
*
* Hardware and software bit definitions for the PTEL value (see below for
* notes on SH-X2 MMUs and 64-bit PTEs):
*
* - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
*
* - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the
* hardware PTEL value can't have the SH-bit set when MMUCR.IX is set,
* which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT).
*
* In order to keep this relatively clean, do not use these for defining
* SH-3 specific flags until all of the other unused bits have been
* exhausted.
*
* - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
*
* - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
* Bit 10 is used for _PAGE_ACCESSED, and bit 11 is used for _PAGE_SPECIAL.
*
* - On 29 bit platforms, bits 31 to 29 are used for the space attributes
* and timing control which (together with bit 0) are moved into the
* old-style PTEA on the parts that support it.
*
* XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
*
* SH-X2 MMUs and extended PTEs
*
* SH-X2 supports an extended mode TLB with split data arrays due to the
* number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
* SZ bit placeholders still exist in data array 1, but are implemented as
* reserved bits, with the real logic existing in data array 2.
*
* The downside to this is that we can no longer fit everything in to a 32-bit
* PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
* side, this gives us quite a few spare bits to play with for future usage.
*/
/* Legacy and compat mode bits */
#define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */
#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#define _PAGE_PROTNONE 0x200 /* software: if not present */
#define _PAGE_ACCESSED 0x400 /* software: page referenced */
#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
#define _PAGE_SPECIAL 0x800 /* software: special page */
#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
/* Extended mode bits */
#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
#define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */
#define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */
#define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */
#define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */
#define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */
#define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */
#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */
/* Wrapper for extended mode pgprot twiddling */
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
#ifdef CONFIG_X2TLB
#define _PAGE_PCC_MASK 0x00000000 /* No legacy PTEA support */
#else
/* software: moves to PTEA.TC (Timing Control) */
#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
#define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */
/* software: moves to PTEA.SA[2:0] (Space Attributes) */
#define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */
#define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */
#define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */
#define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */
#define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */
#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
#define _PAGE_PCC_MASK 0xe0000001
/* copy the ptea attributes */
static inline unsigned long copy_ptea_attributes(unsigned long x)
{
return ((x >> 28) & 0xe) | (x & 0x1);
}
#endif
/* Mask which drops unused bits from the PTEL value */
#if defined(CONFIG_CPU_SH3)
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
_PAGE_FILE | _PAGE_SZ1 | \
_PAGE_HW_SHARED)
#elif defined(CONFIG_X2TLB)
/* Get rid of the legacy PR/SZ bits when using extended mode */
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
_PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
#else
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
#endif
#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
/* Hardware flags, page size encoding */
#if !defined(CONFIG_MMU)
# define _PAGE_FLAGS_HARD 0ULL
#elif defined(CONFIG_X2TLB)
# if defined(CONFIG_PAGE_SIZE_4KB)
# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0)
# elif defined(CONFIG_PAGE_SIZE_8KB)
# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1)
# elif defined(CONFIG_PAGE_SIZE_64KB)
# define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2)
# endif
#else
# if defined(CONFIG_PAGE_SIZE_4KB)
# define _PAGE_FLAGS_HARD _PAGE_SZ0
# elif defined(CONFIG_PAGE_SIZE_64KB)
# define _PAGE_FLAGS_HARD _PAGE_SZ1
# endif
#endif
#if defined(CONFIG_X2TLB)
# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2)
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
# define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
# define _PAGE_SZHUGE (_PAGE_EXT_ESZ3)
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
# define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
# endif
# define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED))
#else
# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_SZ1)
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
# define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
# endif
# define _PAGE_WIRED (0)
#endif
/*
* Stub out _PAGE_SZHUGE if we don't have a good definition for it,
* to make pte_mkhuge() happy.
*/
#ifndef _PAGE_SZHUGE
# define _PAGE_SZHUGE (_PAGE_FLAGS_HARD)
#endif
/*
* Mask of bits that are to be preserved across pgprot changes.
*/
#define _PAGE_CHG_MASK \
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_SPECIAL)
#ifndef __ASSEMBLY__
#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_READ | \
_PAGE_EXT_KERN_WRITE | \
_PAGE_EXT_USER_READ | \
_PAGE_EXT_USER_WRITE))
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
_PAGE_EXT_KERN_READ | \
_PAGE_EXT_USER_EXEC | \
_PAGE_EXT_USER_READ))
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_READ | \
_PAGE_EXT_USER_READ))
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
_PAGE_EXT_USER_WRITE))
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
_PAGE_EXT_KERN_READ | \
_PAGE_EXT_KERN_EXEC | \
_PAGE_EXT_USER_WRITE | \
_PAGE_EXT_USER_READ | \
_PAGE_EXT_USER_EXEC))
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_READ | \
_PAGE_EXT_KERN_WRITE | \
_PAGE_EXT_KERN_EXEC))
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_HW_SHARED | \
_PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_READ | \
_PAGE_EXT_KERN_WRITE | \
_PAGE_EXT_KERN_EXEC))
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
_PAGE_EXT(_PAGE_EXT_KERN_READ | \
_PAGE_EXT_KERN_EXEC))
#define PAGE_KERNEL_PCC(slot, type) \
__pgprot(0)
#elif defined(CONFIG_MMU) /* SH-X TLB */
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_CACHABLE | _PAGE_ACCESSED | \
_PAGE_FLAGS_HARD)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_EXECREAD PAGE_READONLY
#define PAGE_RWX PAGE_SHARED
#define PAGE_WRITEONLY PAGE_SHARED
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_HW_SHARED | \
_PAGE_FLAGS_HARD)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
_PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL_PCC(slot, type) \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
(slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
(type))
#else /* no mmu */
#define PAGE_NONE __pgprot(0)
#define PAGE_SHARED __pgprot(0)
#define PAGE_COPY __pgprot(0)
#define PAGE_EXECREAD __pgprot(0)
#define PAGE_RWX __pgprot(0)
#define PAGE_READONLY __pgprot(0)
#define PAGE_WRITEONLY __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define PAGE_KERNEL_NOCACHE __pgprot(0)
#define PAGE_KERNEL_RO __pgprot(0)
#define PAGE_KERNEL_PCC(slot, type) \
__pgprot(0)
#endif
#endif /* __ASSEMBLY__ */
#ifndef __ASSEMBLY__
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#ifdef CONFIG_X2TLB
static inline void set_pte(pte_t *ptep, pte_t pte)
{
ptep->pte_high = pte.pte_high;
smp_wmb();
ptep->pte_low = pte.pte_low;
}
#else
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#endif
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define pfn_pte(pfn, prot) \
__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) \
__pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(x) (!pte_val(x))
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) pfn_to_page(pte_pfn(x))
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB
#define pte_write(pte) \
((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
#else
#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
#endif
#define PTE_BIT_FUNC(h,fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
#ifdef CONFIG_X2TLB
/*
* We cheat a bit in the SH-X2 TLB case. As the permission bits are
* individually toggled (and user permissions are entirely decoupled from
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
#endif
PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
/*
* Macro and implementation to make a page protection as uncachable.
*/
#define pgprot_writecombine(prot) \
__pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
#define pgprot_noncached pgprot_writecombine
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
pte.pte_low |= pgprot_val(newprot);
#ifdef CONFIG_X2TLB
pte.pte_high |= pgprot_val(newprot) >> 32;
#endif
return pte;
}
#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
/* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define __pgd_offset(address) pgd_index(address)
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* Find an entry in the third-level page table.. */
#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_offset(address) pte_index(address)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
&(e), (e).pte_high, (e).pte_low)
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
#else
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
#endif
/*
* Encode and de-code a swap entry
*
* Constraints:
* _PAGE_FILE at bit 0
* _PAGE_PRESENT at bit 8
* _PAGE_PROTNONE at bit 9
*
* For the normal case, we encode the swap type into bits 0:7 and the
* swap offset into bits 10:30. For the 64-bit PTE case, we keep the
* preserved bits in the low 32-bits and use the upper 32 as the swap
* offset (along with a 5-bit type), following the same approach as x86
* PAE. This keeps the logic quite simple, and allows for a full 32
* PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
* in the pte_low case.
*
* As is evident by the Alpha code, if we ever get a 64-bit unsigned
* long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
* much cleaner..
*
* NOTE: We should set ZEROs at the position of _PAGE_PRESENT
* and _PAGE_PROTNONE bits
*/
#ifdef CONFIG_X2TLB
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
/*
* Encode and decode a nonlinear file mapping entry
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
#define PTE_FILE_MAX_BITS 32
#else
#define __swp_type(x) ((x).val & 0xff)
#define __swp_offset(x) ((x).val >> 10)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
/*
* Encode and decode a nonlinear file mapping entry
*/
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PGTABLE_32_H */

View file

@ -0,0 +1,317 @@
#ifndef __ASM_SH_PGTABLE_64_H
#define __ASM_SH_PGTABLE_64_H
/*
* include/asm-sh/pgtable_64.h
*
* This file contains the functions and defines necessary to modify and use
* the SuperH page table tree.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Error outputs.
*/
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* Table setting routines. Used within arch/mm only.
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
{
unsigned long long x = ((unsigned long long) pteval.pte_low);
unsigned long long *xp = (unsigned long long *) pteptr;
/*
* Sign-extend based on NPHYS.
*/
*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* PGD defines. Top level.
*/
/* To find an entry in a generic PGD. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* To find an entry in a kernel PGD. */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/*
* PMD level access routines. Same notes as above.
*/
#define _PMD_EMPTY 0x0
/* Either the PMD is empty or present, it's not paged out */
#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_page_vaddr(pmd_entry) \
((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
#define pmd_page(pmd) \
(virt_to_page(pmd_val(pmd)))
/* PMD to PTE dereferencing */
#define pte_index(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_offset(address) pte_index(address)
#define pte_offset_kernel(dir, addr) \
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0)
#ifndef __ASSEMBLY__
/*
* PTEL coherent flags.
* See Chapter 17 ST50 CPU Core Volume 1, Architecture.
*/
/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
positions, to avoid expensive bit shuffling on every refill. The remaining
bits are used for s/w purposes and masked out on each refill.
Note, the PTE slots are used to hold data of type swp_entry_t when a page is
swapped out. Only the _PAGE_PRESENT flag is significant when the page is
swapped out, and it must be placed so that it doesn't overlap either the
type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
[2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
#define _PAGE_PRESENT 0x004 /* software: page referenced */
#define _PAGE_FILE 0x004 /* software: only when !present */
#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
#define _PAGE_ACCESSED 0x800 /* software: page referenced */
/* Wrapper for extended mode pgprot twiddling */
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
/*
* We can use the sign-extended bits in the PTEL to get 32 bits of
* software flags. This works for now because no implementations uses
* anything above the PPN field.
*/
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
/* Mask which drops software flags */
#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
/*
* HugeTLB support
*/
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE (_PAGE_SIZE0)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define _PAGE_SZHUGE (_PAGE_SIZE1)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
#endif
/*
* Stub out _PAGE_SZHUGE if we don't have a good definition for it,
* to make pte_mkhuge() happy.
*/
#ifndef _PAGE_SZHUGE
# define _PAGE_SZHUGE (0)
#endif
/*
* Default flags for a Kernel page.
* This is fundametally also SHARED because the main use of this define
* (other than for PGD/PMD entries) is for the VMALLOC pool which is
* contextless.
*
* _PAGE_EXECUTE is required for modules
*
*/
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | \
_PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SHARED)
/* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/*
* We have full permissions (Read/Write/Execute/Shared).
*/
#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
_PAGE_SHARED)
#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
/*
* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
* protection mode for the stack.
*/
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXECUTE)
#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_SHARED)
/* Make it a device mapping for maximum safety (e.g. for mapping device
registers into user-space via /dev/map). */
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
/*
* PTE level access routines.
*
* Note1:
* It's the tree walk leaf. This is physical address to be stored.
*
* Note 2:
* Regarding the choice of _PTE_EMPTY:
We must choose a bit pattern that cannot be valid, whether or not the page
is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
left for us to select. If we force bit[7]==0 when swapped out, we could use
the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
we force bit[7]==1 when swapped out, we can use all zeroes to indicate
empty. This is convenient, because the page tables get cleared to zero
when they are allocated.
*/
#define _PTE_EMPTY 0x0
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
/*
* Some definitions to translate between mem_map, PTEs, and page
* addresses:
*/
/*
* Given a PTE, return the index of the mem_map[] entry corresponding
* to the page frame the PTE. Get the absolute physical address, make
* a relative physical address and translate it to an index.
*/
#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
__MEMORY_START) >> PAGE_SHIFT)
/*
* Given a PTE, return the "struct page *".
*/
#define pte_page(x) (mem_map + pte_pagenr(x))
/*
* Return number of (down rounded) MB corresponding to x pages.
*/
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* The following have defined behavior only work if pte_present() is true.
*/
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry.
*
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
\
set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
__MEMORY_START | pgprot_val((pgprot)))); \
__pte; \
})
/*
* This takes a (absolute) physical page address that is used
* by the remapping functions
*/
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
/* Encode and decode a swap entry */
#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
#define __swp_offset(x) ((x).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte))
#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
#endif /* !__ASSEMBLY__ */
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* __ASM_SH_PGTABLE_64_H */

View file

@ -0,0 +1,5 @@
# ifdef CONFIG_SUPERH32
# include <asm/posix_types_32.h>
# else
# include <asm/posix_types_64.h>
# endif

View file

@ -0,0 +1,184 @@
#ifndef __ASM_SH_PROCESSOR_H
#define __ASM_SH_PROCESSOR_H
#include <asm/cpu-features.h>
#include <asm/segment.h>
#include <asm/cache.h>
#ifndef __ASSEMBLY__
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
*
* Each one of these also needs a CONFIG_CPU_SUBTYPE_xxx entry
* in arch/sh/mm/Kconfig, as well as an entry in arch/sh/kernel/setup.c
* for parsing the subtype in get_cpu_subtype().
*/
enum cpu_type {
/* SH-2 types */
CPU_SH7619,
/* SH-2A types */
CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_SH7264, CPU_SH7269,
CPU_MXG,
/* SH-3 types */
CPU_SH7705, CPU_SH7706, CPU_SH7707,
CPU_SH7708, CPU_SH7708S, CPU_SH7708R,
CPU_SH7709, CPU_SH7709A, CPU_SH7710, CPU_SH7712,
CPU_SH7720, CPU_SH7721, CPU_SH7729,
/* SH-4 types */
CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R,
CPU_SH7760, CPU_SH4_202, CPU_SH4_501,
/* SH-4A types */
CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786,
CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SH7734, CPU_SHX3,
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
/* SH-5 types */
CPU_SH5_101, CPU_SH5_103,
/* Unknown subtype */
CPU_SH_NONE
};
enum cpu_family {
CPU_FAMILY_SH2,
CPU_FAMILY_SH2A,
CPU_FAMILY_SH3,
CPU_FAMILY_SH4,
CPU_FAMILY_SH4A,
CPU_FAMILY_SH4AL_DSP,
CPU_FAMILY_SH5,
CPU_FAMILY_UNKNOWN,
};
/*
* TLB information structure
*
* Defined for both I and D tlb, per-processor.
*/
struct tlb_info {
unsigned long long next;
unsigned long long first;
unsigned long long last;
unsigned int entries;
unsigned int step;
unsigned long flags;
};
struct sh_cpuinfo {
unsigned int type, family;
int cut_major, cut_minor;
unsigned long loops_per_jiffy;
unsigned long asid_cache;
struct cache_info icache; /* Primary I-cache */
struct cache_info dcache; /* Primary D-cache */
struct cache_info scache; /* Secondary cache */
/* TLB info */
struct tlb_info itlb;
struct tlb_info dtlb;
unsigned int phys_bits;
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
extern struct sh_cpuinfo cpu_data[];
#define boot_cpu_data cpu_data[0]
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
void stop_this_cpu(void *);
/* Forward decl */
struct seq_operations;
struct task_struct;
extern struct pt_regs fake_swapper_regs;
extern void cpu_init(void);
extern void cpu_probe(void);
/* arch/sh/kernel/process.c */
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
/* arch/sh/mm/alignment.c */
extern int get_unalign_ctl(struct task_struct *, unsigned long addr);
extern int set_unalign_ctl(struct task_struct *, unsigned int val);
#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
/* arch/sh/mm/init.c */
extern unsigned int mem_init_done;
/* arch/sh/kernel/setup.c */
const char *get_cpu_subtype(struct sh_cpuinfo *c);
extern const struct seq_operations cpuinfo_op;
/* thread_struct flags */
#define SH_THREAD_UAC_NOPRINT (1 << 0)
#define SH_THREAD_UAC_SIGBUS (1 << 1)
#define SH_THREAD_UAC_MASK (SH_THREAD_UAC_NOPRINT | SH_THREAD_UAC_SIGBUS)
/* processor boot mode configuration */
#define MODE_PIN0 (1 << 0)
#define MODE_PIN1 (1 << 1)
#define MODE_PIN2 (1 << 2)
#define MODE_PIN3 (1 << 3)
#define MODE_PIN4 (1 << 4)
#define MODE_PIN5 (1 << 5)
#define MODE_PIN6 (1 << 6)
#define MODE_PIN7 (1 << 7)
#define MODE_PIN8 (1 << 8)
#define MODE_PIN9 (1 << 9)
#define MODE_PIN10 (1 << 10)
#define MODE_PIN11 (1 << 11)
#define MODE_PIN12 (1 << 12)
#define MODE_PIN13 (1 << 13)
#define MODE_PIN14 (1 << 14)
#define MODE_PIN15 (1 << 15)
int generic_mode_pins(void);
int test_mode_pin(int pin);
#ifdef CONFIG_VSYSCALL
int vsyscall_init(void);
#else
#define vsyscall_init() do { } while (0)
#endif
/*
* SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
#elif defined(CONFIG_SUPERH32)
#define instruction_size(insn) (2)
#else
#define instruction_size(insn) (4)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SUPERH32
# include <asm/processor_32.h>
#else
# include <asm/processor_64.h>
#endif
#endif /* __ASM_SH_PROCESSOR_H */

View file

@ -0,0 +1,216 @@
/*
* include/asm-sh/processor.h
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002, 2003 Paul Mundt
*/
#ifndef __ASM_SH_PROCESSOR_32_H
#define __ASM_SH_PROCESSOR_32_H
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/hw_breakpoint.h>
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
/* Core Processor Version Register */
#define CCN_PVR 0xff000030
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
/*
* User space process size: 2GB.
*
* Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
*/
#define TASK_SIZE 0x7c000000UL
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
*
* FD-bit:
* When it's set, it means the processor doesn't have right to use FPU,
* and it results exception when the floating operation is executed.
*
* IMASK-bit:
* Interrupt level mask
*/
#define SR_DSP 0x00001000
#define SR_IMASK 0x000000f0
#define SR_FD 0x00008000
#define SR_MD 0x40000000
/*
* DSP structure and data
*/
struct sh_dsp_struct {
unsigned long dsp_regs[14];
long status;
};
/*
* FPU structure and data
*/
struct sh_fpu_hard_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
long status; /* software status information */
};
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
unsigned char lookahead;
unsigned long entry_pc;
};
union thread_xstate {
struct sh_fpu_hard_struct hardfpu;
struct sh_fpu_soft_struct softfpu;
};
struct thread_struct {
/* Saved registers when thread is descheduled */
unsigned long sp;
unsigned long pc;
/* Various thread flags, see SH_THREAD_xxx */
unsigned long flags;
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
#ifdef CONFIG_SH_DSP
/* Dsp status information */
struct sh_dsp_struct dsp_status;
#endif
/* Extended processor state */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long) &init_stack, \
.flags = 0, \
}
/* Forward declaration, a strange C thing */
struct task_struct;
extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
/*
* FPU lazy state save handling.
*/
static __inline__ void disable_fpu(void)
{
unsigned long __dummy;
/* Set FD flag in SR */
__asm__ __volatile__("stc sr, %0\n\t"
"or %1, %0\n\t"
"ldc %0, sr"
: "=&r" (__dummy)
: "r" (SR_FD));
}
static __inline__ void enable_fpu(void)
{
unsigned long __dummy;
/* Clear out FD flag in SR */
__asm__ __volatile__("stc sr, %0\n\t"
"and %1, %0\n\t"
"ldc %0, sr"
: "=&r" (__dummy)
: "r" (~SR_FD));
}
/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
#define FPSCR_INIT 0x00080000
#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */
#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */
/*
* Return saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (tsk->thread.pc)
void show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs);
#ifdef CONFIG_DUMP_CODE
void show_code(struct pt_regs *regs);
#else
static inline void show_code(struct pt_regs *regs)
{
}
#endif
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
#define PREFETCH_STRIDE L1_CACHE_BYTES
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
static inline void prefetch(const void *x)
{
__builtin_prefetch(x, 0, 3);
}
static inline void prefetchw(const void *x)
{
__builtin_prefetch(x, 1, 3);
}
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PROCESSOR_32_H */

View file

@ -0,0 +1,234 @@
#ifndef __ASM_SH_PROCESSOR_64_H
#define __ASM_SH_PROCESSOR_64_H
/*
* include/asm-sh/processor_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/types.h>
#include <cpu/registers.h>
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ \
void *pc; \
unsigned long long __dummy = 0; \
__asm__("gettr tr0, %1\n\t" \
"pta 4, tr0\n\t" \
"gettr tr0, %0\n\t" \
"ptabs %1, tr0\n\t" \
:"=r" (pc), "=r" (__dummy) \
: "1" (__dummy)); \
pc; })
#endif
/*
* User space process size: 2GB - 4k.
*/
#define TASK_SIZE 0x7ffff000UL
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
*
* FD-bit:
* When it's set, it means the processor doesn't have right to use FPU,
* and it results exception when the floating operation is executed.
*
* IMASK-bit:
* Interrupt level mask
*
* STEP-bit:
* Single step bit
*
*/
#if defined(CONFIG_SH64_SR_WATCH)
#define SR_MMU 0x84000000
#else
#define SR_MMU 0x80000000
#endif
#define SR_IMASK 0x000000f0
#define SR_FD 0x00008000
#define SR_SSTEP 0x08000000
#ifndef __ASSEMBLY__
/*
* FPU structure and data : require 8-byte alignment as we need to access it
with fld.p, fst.p
*/
struct sh_fpu_hard_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
/* long status; * software status information */
};
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
unsigned char lookahead;
unsigned long entry_pc;
};
union thread_xstate {
struct sh_fpu_hard_struct hardfpu;
struct sh_fpu_soft_struct softfpu;
/*
* The structure definitions only produce 32 bit alignment, yet we need
* to access them using 64 bit load/store as well.
*/
unsigned long long alignment_dummy;
};
struct thread_struct {
unsigned long sp;
unsigned long pc;
/* Various thread flags, see SH_THREAD_xxx */
unsigned long flags;
/* This stores the address of the pt_regs built during a context
switch, or of the register save area built for a kernel mode
exception. It is used for backtracing the stack of a sleeping task
or one that traps in kernel mode. */
struct pt_regs *kregs;
/* This stores the address of the pt_regs constructed on entry from
user mode. It is a fixed value over the lifetime of a process, or
NULL for a kernel thread. */
struct pt_regs *uregs;
unsigned long address;
/* Hardware debugging registers may come here */
/* floating point info */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
#define INIT_THREAD { \
.sp = sizeof(init_stack) + \
(long) &init_stack, \
.pc = 0, \
.kregs = &fake_swapper_regs, \
.uregs = NULL, \
.address = 0, \
.flags = 0, \
}
/*
* Do necessary setup to start up a newly executed thread.
*/
#define SR_USER (SR_MMU | SR_FD)
#define start_thread(_regs, new_pc, new_sp) \
_regs->sr = SR_USER; /* User mode. */ \
_regs->pc = new_pc - 4; /* Compensate syscall exit */ \
_regs->pc |= 1; /* Set SHmedia ! */ \
_regs->regs[18] = 0; \
_regs->regs[15] = new_sp
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* FPU lazy state save handling.
*/
static inline void disable_fpu(void)
{
unsigned long long __dummy;
/* Set FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (SR_FD));
}
static inline void enable_fpu(void)
{
unsigned long long __dummy;
/* Clear out FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~SR_FD));
}
/* Round to nearest, no exceptions on inexact, overflow, underflow,
zero-divide, invalid. Configure option for whether to flush denorms to
zero, or except if a denorm is encountered. */
#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
#define FPSCR_INIT 0x00040000
#else
#define FPSCR_INIT 0x00000000
#endif
#ifdef CONFIG_SH_FPU
/* Initialise the FP state of a task */
void fpinit(struct sh_fpu_hard_struct *fpregs);
#else
#define fpinit(fpregs) do { } while (0)
#endif
extern struct task_struct *last_task_used_math;
/*
* Return saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (tsk->thread.pc)
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PROCESSOR_64_H */

View file

@ -0,0 +1,117 @@
/*
* Copyright (C) 1999, 2000 Niibe Yutaka
*/
#ifndef __ASM_SH_PTRACE_H
#define __ASM_SH_PTRACE_H
#include <linux/stringify.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <uapi/asm/ptrace.h>
#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
#define GET_FP(regs) ((regs)->regs[14])
#define GET_USP(regs) ((regs)->regs[15])
#define arch_has_single_step() (1)
/*
* kprobe-based event tracer support
*/
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REGS_OFFSET_NAME(num) \
{.name = __stringify(r##num), .offset = offsetof(struct pt_regs, regs[num])}
#define TREGS_OFFSET_NAME(num) \
{.name = __stringify(tr##num), .offset = offsetof(struct pt_regs, tregs[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
extern const struct pt_regs_offset regoffset_table[];
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten.
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register. The @offset is the
* offset of the register in struct pt_regs address which specified by @regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
struct perf_event;
struct perf_sample_data;
extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = regs->pc;
if (virt_addr_uncached(pc))
return CAC_ADDR(pc);
return pc;
}
#define profile_pc profile_pc
#include <asm-generic/ptrace.h>
#endif /* __ASM_SH_PTRACE_H */

View file

@ -0,0 +1,13 @@
#ifndef __ASM_SH_PTRACE_32_H
#define __ASM_SH_PTRACE_32_H
#include <uapi/asm/ptrace_32.h>
#define MAX_REG_OFFSET offsetof(struct pt_regs, tra)
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->regs[0];
}
#endif /* __ASM_SH_PTRACE_32_H */

View file

@ -0,0 +1,13 @@
#ifndef __ASM_SH_PTRACE_64_H
#define __ASM_SH_PTRACE_64_H
#include <uapi/asm/ptrace_64.h>
#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->regs[3];
}
#endif /* __ASM_SH_PTRACE_64_H */

View file

@ -0,0 +1,31 @@
#ifndef __ASM_SH_PUSH_SWITCH_H
#define __ASM_SH_PUSH_SWITCH_H
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
struct push_switch {
/* switch state */
unsigned int state:1;
/* debounce timer */
struct timer_list debounce;
/* workqueue */
struct work_struct work;
/* platform device, for workqueue handler */
struct platform_device *pdev;
};
struct push_switch_platform_info {
/* IRQ handler */
irqreturn_t (*irq_handler)(int irq, void *data);
/* Special IRQ flags */
unsigned int irq_flags;
/* Bit location of switch */
unsigned int bit;
/* Symbolic switch name */
const char *name;
};
#endif /* __ASM_SH_PUSH_SWITCH_H */

View file

@ -0,0 +1,21 @@
#ifndef __ASM_SH_REBOOT_H
#define __ASM_SH_REBOOT_H
#include <linux/kdebug.h>
struct pt_regs;
struct machine_ops {
void (*restart)(char *cmd);
void (*halt)(void);
void (*power_off)(void);
void (*shutdown)(void);
void (*crash_shutdown)(struct pt_regs *);
};
extern struct machine_ops machine_ops;
/* arch/sh/kernel/machine_kexec.c */
void native_machine_crash_shutdown(struct pt_regs *regs);
#endif /* __ASM_SH_REBOOT_H */

View file

@ -0,0 +1,73 @@
#ifndef __ROMIMAGE_MACRO_H
#define __ROMIMAGE_MACRO_H
/* The LIST command is used to include comments in the script */
.macro LIST comment
.endm
/* The ED command is used to write a 32-bit word */
.macro ED, addr, data
mov.l 1f, r1
mov.l 2f, r0
mov.l r0, @r1
bra 3f
nop
.align 2
1 : .long \addr
2 : .long \data
3 :
.endm
/* The EW command is used to write a 16-bit word */
.macro EW, addr, data
mov.l 1f, r1
mov.l 2f, r0
mov.w r0, @r1
bra 3f
nop
.align 2
1 : .long \addr
2 : .long \data
3 :
.endm
/* The EB command is used to write an 8-bit word */
.macro EB, addr, data
mov.l 1f, r1
mov.l 2f, r0
mov.b r0, @r1
bra 3f
nop
.align 2
1 : .long \addr
2 : .long \data
3 :
.endm
/* The WAIT command is used to delay the execution */
.macro WAIT, time
mov.l 2f, r3
1 :
nop
tst r3, r3
bf/s 1b
dt r3
bra 3f
nop
.align 2
2 : .long \time * 100
3 :
.endm
/* The DD command is used to read a 32-bit word */
.macro DD, addr, addr2, nr
mov.l 1f, r1
mov.l @r1, r0
bra 2f
nop
.align 2
1 : .long \addr
2 :
.endm
#endif /* __ROMIMAGE_MACRO_H */

28
arch/sh/include/asm/rtc.h Normal file
View file

@ -0,0 +1,28 @@
#ifndef _ASM_RTC_H
#define _ASM_RTC_H
void time_init(void);
extern void (*board_time_init)(void);
extern void (*rtc_sh_get_time)(struct timespec *);
extern int (*rtc_sh_set_time)(const time_t);
/* some dummy definitions */
#define RTC_BATT_BAD 0x100 /* battery bad */
#define RTC_SQWE 0x08 /* enable square-wave output */
#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
struct rtc_time;
unsigned int get_rtc_time(struct rtc_time *);
int set_rtc_time(struct rtc_time *);
#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
struct sh_rtc_platform_info {
unsigned long capabilities;
};
#include <cpu/rtc.h>
#endif /* _ASM_RTC_H */

132
arch/sh/include/asm/rwsem.h Normal file
View file

@ -0,0 +1,132 @@
/*
* include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
* in lib/rwsem.c.
*/
#ifndef _ASM_SH_RWSEM_H
#define _ASM_SH_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifdef __KERNEL__
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
int tmp;
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
__down_write(sem);
}
/*
* implement exchange and add functionality
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#endif /* __KERNEL__ */
#endif /* _ASM_SH_RWSEM_H */

View file

@ -0,0 +1,10 @@
#ifndef __ASM_SECCOMP_H
#include <linux/unistd.h>
#define __NR_seccomp_read __NR_read
#define __NR_seccomp_write __NR_write
#define __NR_seccomp_exit __NR_exit
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
#endif /* __ASM_SECCOMP_H */

Some files were not shown because too many files have changed in this diff Show more