Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,33 @@
generic-y += bitsperlong.h
generic-y += bug.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += fcntl.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += siginfo.h
generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,153 @@
/*
* include/asm-xtensa/asmmacro.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
#include <variant/core.h>
/*
* Some little helpers for loops. Use zero-overhead-loops
* where applicable and if supported by the processor.
*
* __loopi ar, at, size, inc
* ar register initialized with the start address
* at scratch register used by macro
* size size immediate value
* inc increment
*
* __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
* ar register initialized with the start address
* as register initialized with the size
* at scratch register use by macro
* inc_log2 increment [in log2]
* mask_log2 mask [in log2]
* cond true condition (used in loop'cond')
* ncond false condition (used in b'ncond')
*
* __loop as
* restart loop. 'as' register must not have been modified!
*
* __endla ar, at, incr
* ar start address (modified)
* as scratch register used by macro
* inc increment
*/
/*
* loop for given size as immediate
*/
.macro __loopi ar, at, size, incr
#if XCHAL_HAVE_LOOPS
movi \at, ((\size + \incr - 1) / (\incr))
loop \at, 99f
#else
addi \at, \ar, \size
98:
#endif
.endm
/*
* loop for given size in register
*/
.macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
#if XCHAL_HAVE_LOOPS
.ifgt \incr_log2 - 1
addi \at, \as, (1 << \incr_log2) - 1
.ifnc \mask_log2,
extui \at, \at, \incr_log2, \mask_log2
.else
srli \at, \at, \incr_log2
.endif
.endif
loop\cond \at, 99f
#else
.ifnc \mask_log2,
extui \at, \as, \incr_log2, \mask_log2
.else
.ifnc \ncond,
srli \at, \as, \incr_log2
.endif
.endif
.ifnc \ncond,
b\ncond \at, 99f
.endif
.ifnc \mask_log2,
slli \at, \at, \incr_log2
add \at, \ar, \at
.else
add \at, \ar, \as
.endif
#endif
98:
.endm
/*
* loop from ar to ax
*/
.macro __loopt ar, as, at, incr_log2
#if XCHAL_HAVE_LOOPS
sub \at, \as, \ar
.ifgt \incr_log2 - 1
addi \at, \at, (1 << \incr_log2) - 1
srli \at, \at, \incr_log2
.endif
loop \at, 99f
#else
98:
#endif
.endm
/*
* restart loop. registers must be unchanged
*/
.macro __loop as
#if XCHAL_HAVE_LOOPS
loop \as, 99f
#else
98:
#endif
.endm
/*
* end of loop with no increment of the address.
*/
.macro __endl ar, as
#if !XCHAL_HAVE_LOOPS
bltu \ar, \as, 98b
#endif
99:
.endm
/*
* end of loop with increment of the address.
*/
.macro __endla ar, as, incr
addi \ar, \ar, \incr
__endl \ar \as
.endm
#endif /* _XTENSA_ASMMACRO_H */

View file

@ -0,0 +1,324 @@
/*
* include/asm-xtensa/atomic.h
*
* Atomic operations that C can't guarantee us. Useful for resource counting..
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_ATOMIC_H
#define _XTENSA_ATOMIC_H
#include <linux/stringify.h>
#include <linux/types.h>
#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
/*
* This Xtensa implementation assumes that the right mechanism
* for exclusion is for locking interrupts to level EXCM_LEVEL.
*
* Locking interrupts looks like this:
*
* rsil a15, LOCKLEVEL
* <code>
* wsr a15, PS
* rsync
*
* Note that a15 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using
* a15 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow.
*/
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ACCESS_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) ((v)->counter = (i))
#if XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
" " #op " %0, %0, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
\
return result; \
}
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15, "__stringify(LOCKLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: "a15", "memory" \
); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(LOCKLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: "a15", "memory" \
); \
\
return vval; \
}
#endif /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1,(v))
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc_return(v) atomic_add_return(1,(v))
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1,(v))
/**
* atomic_dec_return - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec_return(v) atomic_sub_return(1,(v))
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (~mask), "a" (v)
: "memory"
);
#else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (mask), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
}
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */

View file

@ -0,0 +1,21 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2012 Tensilica Inc.
*/
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
#include <asm-generic/barrier.h>
#endif /* _XTENSA_SYSTEM_H */

View file

@ -0,0 +1,237 @@
/*
* include/asm-xtensa/bitops.h
*
* Atomic operations that C can't guarantee us.Useful for resource counting etc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_BITOPS_H
#define _XTENSA_BITOPS_H
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <asm/processor.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm-generic/bitops/non-atomic.h>
#if XCHAL_HAVE_NSA
static inline unsigned long __cntlz (unsigned long x)
{
int lz;
asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
return lz;
}
/*
* ffz: Find first zero in word. Undefined if no zero exists.
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static inline int ffz(unsigned long x)
{
return 31 - __cntlz(~x & -~x);
}
/*
* __ffs: Find first bit set in word. Return 0 for bit 0
*/
static inline int __ffs(unsigned long x)
{
return 31 - __cntlz(x & -x);
}
/*
* ffs: Find first bit set in word. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(unsigned long x)
{
return 32 - __cntlz(x & -x);
}
/*
* fls: Find last (most-significant) bit set in word.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline int fls (unsigned int x)
{
return 32 - __cntlz(x);
}
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
return 31 - __cntlz(word);
}
#else
/* Use the generic implementation if we don't have the nsa/nsau instructions. */
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/ffz.h>
# include <asm-generic/bitops/fls.h>
# include <asm-generic/bitops/__fls.h>
#endif
#include <asm-generic/bitops/fls64.h>
#if XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
}
static inline void change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
}
static inline int
test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (~mask), "a" (p)
: "memory");
return tmp & mask;
}
static inline int
test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long tmp, value;
unsigned long mask = 1UL << (bit & 31);
p += bit >> 5;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" xor %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp), "=&a" (value)
: "a" (mask), "a" (p)
: "memory");
return tmp & mask;
}
#else
#include <asm-generic/bitops/atomic.h>
#endif /* XCHAL_HAVE_S32C1I */
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
#endif /* _XTENSA_BITOPS_H */

View file

@ -0,0 +1,50 @@
/*
* include/asm-xtensa/bootparam.h
*
* Definition of the Linux/Xtensa boot parameter structure
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* (Concept borrowed from the 68K port)
*/
#ifndef _XTENSA_BOOTPARAM_H
#define _XTENSA_BOOTPARAM_H
#define BP_VERSION 0x0001
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
#define BP_TAG_FDT 0x1006 /* flat device tree addr */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
#ifndef __ASSEMBLY__
/* All records are aligned to 4 bytes */
typedef struct bp_tag {
unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/
unsigned long data[0]; /* data */
} bp_tag_t;
struct bp_meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
#endif
#endif

View file

@ -0,0 +1,18 @@
/*
* include/asm-xtensa/bugs.h
*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Xtensa processors don't have any bugs. :)
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
#ifndef _XTENSA_BUGS_H
#define _XTENSA_BUGS_H
static void check_bugs(void) { }
#endif /* _XTENSA_BUGS_H */

View file

@ -0,0 +1,34 @@
/*
* include/asm-xtensa/cache.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
#include <variant/core.h>
#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
/* Maximum cache size per way. */
#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
#else
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* _XTENSA_CACHE_H */

View file

@ -0,0 +1,176 @@
/*
* include/asm-xtensa/cacheasm.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Tensilica Inc.
*/
#include <asm/cache.h>
#include <asm/asmmacro.h>
#include <linux/stringify.h>
/*
* Define cache functions as macros here so that they can be used
* by the kernel and boot loader. We should consider moving them to a
* library that can be linked by both.
*
* Locking
*
* ___unlock_dcache_all
* ___unlock_icache_all
*
* Flush and invaldating
*
* ___flush_invalidate_dcache_{all|range|page}
* ___flush_dcache_{all|range|page}
* ___invalidate_dcache_{all|range|page}
* ___invalidate_icache_{all|range|page}
*
*/
.macro __loop_cache_all ar at insn size line_width
movi \ar, 0
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
.macro __loop_cache_range ar as at insn line_width
extui \at, \ar, 0, \line_width
add \as, \as, \at
__loops \ar, \as, \at, \line_width
\insn \ar, 0
__endla \ar, \at, (1 << (\line_width))
.endm
.macro __loop_cache_page ar at insn line_width
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
#if XCHAL_DCACHE_LINE_LOCKABLE
.macro ___unlock_dcache_all ar at
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
#endif
#if XCHAL_ICACHE_LINE_LOCKABLE
.macro ___unlock_icache_all ar at
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
.endm
#endif
.macro ___flush_invalidate_dcache_all ar at
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_all ar at
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_all ar at
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_all ar at
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_range ar as at
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_range ar as at
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_page ar as
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_page ar as
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_page ar as
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_page ar as
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm

View file

@ -0,0 +1,269 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Lo-level routines for cache flushing.
*
* invalidate data or instruction cache:
*
* __invalidate_icache_all()
* __invalidate_icache_page(adr)
* __invalidate_dcache_page(adr)
* __invalidate_icache_range(from,size)
* __invalidate_dcache_range(from,size)
*
* flush data cache:
*
* __flush_dcache_page(adr)
*
* flush and invalidate data cache:
*
* __flush_invalidate_dcache_all()
* __flush_invalidate_dcache_page(adr)
* __flush_invalidate_dcache_range(from,size)
*
* specials for cache aliasing:
*
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_icache_page_alias(vaddr,paddr)
*/
extern void __invalidate_dcache_all(void);
extern void __invalidate_icache_all(void);
extern void __invalidate_dcache_page(unsigned long);
extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long);
extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
# define __flush_dcache_range(p,s) do { } while(0)
# define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#else
static inline void __invalidate_icache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
/*
* We have physically tagged caches - nothing to do here -
* unless we have cache aliasing.
*
* Pages can get remapped. Because this might change the 'color' of that page,
* we have to flush the cache before the PTE is changed.
* (see also Documentation/cachetlb.txt)
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
#ifdef CONFIG_SMP
void flush_cache_all(void);
void flush_cache_range(struct vm_area_struct*, ulong, ulong);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else
#define flush_cache_all local_flush_cache_all
#define flush_cache_range local_flush_cache_range
#define flush_icache_range local_flush_icache_range
#define flush_cache_page local_flush_cache_page
#endif
#define local_flush_cache_all() \
do { \
__flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
} while (0)
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*);
void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void local_flush_cache_page(struct vm_area_struct *vma,
unsigned long address, unsigned long pfn);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range local_flush_icache_range
#define flush_cache_page(vma, addr, pfn) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#endif
/* Ensure consistency between data and instruction cache. */
#define local_flush_icache_range(start, end) \
do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
/* This is not required, see Documentation/cachetlb.txt */
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
__flush_dcache_range((unsigned long) dst, len); \
__invalidate_icache_range((unsigned long) dst, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif
#define XTENSA_CACHEBLK_LOG2 29
#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
#if XCHAL_HAVE_CACHEATTR
static inline u32 xtensa_get_cacheattr(void)
{
u32 r;
asm volatile(" rsr %0, cacheattr" : "=a"(r));
return r;
}
static inline u32 xtensa_get_dtlb1(u32 addr)
{
u32 r = addr & XTENSA_CACHEBLK_MASK;
return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
& 0xF);
}
#else
static inline u32 xtensa_get_dtlb1(u32 addr)
{
u32 r;
asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
asm volatile(" dsync");
return r;
}
static inline u32 xtensa_get_cacheattr(void)
{
u32 r = 0;
u32 a = 0;
do {
a -= XTENSA_CACHEBLK_SIZE;
r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
} while (a);
return r;
}
#endif
static inline int xtensa_need_flush_dma_source(u32 addr)
{
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
}
static inline int xtensa_need_invalidate_dma_destination(u32 addr)
{
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
}
static inline void flush_dcache_unaligned(u32 addr, u32 size)
{
u32 cnt;
if (size) {
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt--) {
asm volatile(" dhwb %0, 0" : : "a"(addr));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dsync");
}
}
static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
{
int cnt;
if (size) {
asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt-- > 0) {
asm volatile(" dhi %0, %1" : : "a"(addr),
"n"(XCHAL_DCACHE_LINESIZE));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dhwbi %0, %1" : : "a"(addr),
"n"(XCHAL_DCACHE_LINESIZE));
asm volatile(" dsync");
}
}
static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
{
u32 cnt;
if (size) {
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt--) {
asm volatile(" dhwbi %0, 0" : : "a"(addr));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dsync");
}
}
#endif /* _XTENSA_CACHEFLUSH_H */

View file

@ -0,0 +1,256 @@
/*
* include/asm-xtensa/checksum.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CHECKSUM_H
#define _XTENSA_CHECKSUM_H
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <variant/core.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the access_ok().
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
}
/*
* Fold a partial checksum
*/
static __inline__ __sum16 csum_fold(__wsum sum)
{
unsigned int __dummy;
__asm__("extui %1, %0, 16, 16\n\t"
"extui %0 ,%0, 0, 16\n\t"
"add %0, %0, %1\n\t"
"slli %1, %0, 16\n\t"
"add %0, %0, %1\n\t"
"extui %0, %0, 16, 16\n\t"
"neg %0, %0\n\t"
"addi %0, %0, -1\n\t"
"extui %0, %0, 0, 16\n\t"
: "=r" (sum), "=&r" (__dummy)
: "0" (sum));
return (__force __sum16)sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, tmp, endaddr;
__asm__ __volatile__(
"sub %0, %0, %0\n\t"
#if XCHAL_HAVE_LOOPS
"loopgtz %2, 2f\n\t"
#else
"beqz %2, 2f\n\t"
"slli %4, %2, 2\n\t"
"add %4, %4, %1\n\t"
"0:\t"
#endif
"l32i %3, %1, 0\n\t"
"add %0, %0, %3\n\t"
"bgeu %0, %3, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"addi %1, %1, 4\n\t"
#if !XCHAL_HAVE_LOOPS
"blt %1, %4, 0b\n\t"
#endif
"2:\t"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
"=&r" (endaddr)
: "1" (iph), "2" (ihl)
: "memory");
return csum_fold(sum);
}
static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
#ifdef __XTENSA_EL__
unsigned long len_proto = (len + proto) << 8;
#elif defined(__XTENSA_EB__)
unsigned long len_proto = len + proto;
#else
# error processor byte order undefined!
#endif
__asm__("add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %2\n\t"
"bgeu %0, %2, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %3\n\t"
"bgeu %0, %3, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
unsigned int __dummy;
__asm__("l32i %1, %2, 0\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 4\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 8\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 12\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 0\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 4\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 8\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 12\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %4\n\t"
"bgeu %0, %4, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %5\n\t"
"bgeu %0, %5, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
: "=r" (sum), "=&r" (__dummy)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "memory");
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif

View file

@ -0,0 +1,162 @@
/*
* Atomic xchg and cmpxchg operations.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CMPXCHG_H
#define _XTENSA_CMPXCHG_H
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
/*
* cmpxchg
*/
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
#if XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
: "+a" (new)
: "a" (p), "a" (old)
: "memory"
);
return new;
#else
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: "a15", "memory");
return old;
#endif
}
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
case 4: return __cmpxchg_u32(ptr, old, new);
default: __cmpxchg_called_with_bad_pointer();
return old;
}
}
#define cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof (*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
*
* Note that a15 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using
* a15 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow.
*/
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" mov %0, %3\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (m), "a" (val)
: "memory"
);
return result;
#else
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: "a15", "memory");
return tmp;
#endif
}
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
* This only works if the compiler isn't horribly bad at optimizing.
* gcc-2.5.8 reportedly can't handle this, but I define that one to
* be dead anyway.
*/
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
__xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_CMPXCHG_H */

View file

@ -0,0 +1,177 @@
/*
* include/asm-xtensa/coprocessor.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
#include <linux/stringify.h>
#include <variant/core.h>
#include <variant/tie.h>
#include <asm/types.h>
#ifdef __ASSEMBLY__
# include <variant/tie-asm.h>
.macro xchal_sa_start a b
.set .Lxchal_pofs_, 0
.set .Lxchal_ofs_, 0
.endm
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
.set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
.endm
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_CC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE )
.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_NOCC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#endif /* __ASSEMBLY__ */
/*
* XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
*
* XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
*
*/
#define XTENSA_HAVE_COPROCESSOR(x) \
((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
#define XTENSA_HAVE_COPROCESSORS \
(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
#define XTENSA_HAVE_IO_PORT(x) \
(XCHAL_CP_PORT_MASK & (1 << (x)))
#define XTENSA_HAVE_IO_PORTS \
XCHAL_CP_PORT_MASK
#ifndef __ASSEMBLY__
#if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
/*
* Additional registers.
* We define three types of additional registers:
* ext: extra registers that are used by the compiler
* cpn: optional registers that can be used by a user application
* cpX: coprocessor registers that can only be used if the corresponding
* CPENABLE bit is set.
*/
#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
__REG ## list (cc, abi, type, name, size, align)
#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
#define __REG0_0(abi,name)
#define __REG0_1(abi,name) __REG0_1 ## abi (name)
#define __REG0_10(name) __u32 name;
#define __REG0_11(name) __u32 name;
#define __REG0_12(name)
#define __REG1_0(name) __u32 name;
#define __REG1_1(name)
#define __REG2_0(n,s,a) __u32 name;
#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
#if XTENSA_HAVE_COPROCESSORS
typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
extern void coprocessor_save(void*, int);
extern void coprocessor_load(void*, int);
extern void coprocessor_flush(struct thread_info*, int);
extern void coprocessor_restore(struct thread_info*, int);
extern void coprocessor_release_all(struct thread_info*);
extern void coprocessor_flush_all(struct thread_info*);
static inline void coprocessor_clear_cpenable(void)
{
unsigned long i = 0;
WSR_CPENABLE(i);
}
#endif /* XTENSA_HAVE_COPROCESSORS */
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_COPROCESSOR_H */

View file

@ -0,0 +1,38 @@
/*
* include/asm-xtensa/current.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#else
#define CURRENT_SHIFT 13
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \
#endif
#endif /* XTENSA_CURRENT_H */

View file

@ -0,0 +1,75 @@
/*
* include/asm-xtensa/delay.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
*/
#ifndef _XTENSA_DELAY_H
#define _XTENSA_DELAY_H
#include <asm/timex.h>
#include <asm/param.h>
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops)
{
if (__builtin_constant_p(loops) && loops < 2)
__asm__ __volatile__ ("nop");
else if (loops >= 2)
/* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
: "+r" (loops));
}
/* Undefined function to get compile-time error */
void __bad_udelay(void);
void __bad_ndelay(void);
#define __MAX_UDELAY 30000
#define __MAX_NDELAY 30000
static inline void __udelay(unsigned long usecs)
{
unsigned long start = get_ccount();
unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
/* Note: all variables are unsigned (can wrap around)! */
while (((unsigned long)get_ccount()) - start < cycles)
cpu_relax();
}
static inline void udelay(unsigned long usec)
{
if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
__bad_udelay();
else
__udelay(usec);
}
static inline void __ndelay(unsigned long nsec)
{
/*
* Inner shift makes sure multiplication doesn't overflow
* for legitimate nsec values
*/
unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
__delay(cycles);
}
#define ndelay(n) ndelay(n)
static inline void ndelay(unsigned long nsec)
{
if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
__bad_ndelay();
else
__ndelay(nsec);
}
#endif

View file

@ -0,0 +1,188 @@
/*
* include/asm-xtensa/dma-mapping.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_DMA_MAPPING_H
#define _XTENSA_DMA_MAPPING_H
#include <asm/cache.h>
#include <asm/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
/*
* DMA-consistent mapping functions.
*/
extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
extern void consistent_free(void*, size_t, dma_addr_t);
extern void consistent_sync(void*, size_t, int);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
consistent_sync(ptr, size, direction);
return virt_to_phys(ptr);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++ ) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
consistent_sync(sg_virt(sg), sg->length, direction);
}
return nents;
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
consistent_sync(sg_virt(sg), sg->length, dir);
}
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
consistent_sync(vaddr, size, direction);
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _XTENSA_DMA_MAPPING_H */

View file

@ -0,0 +1,62 @@
/*
* include/asm-xtensa/dma.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_DMA_H
#define _XTENSA_DMA_H
#include <asm/io.h> /* need byte IO */
/*
* This is only to be defined if we have PC-like DMA.
* By default this is not true on an Xtensa processor,
* however on boards with a PCI bus, such functionality
* might be emulated externally.
*
* NOTE: there still exists driver code that assumes
* this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
*/
#define MAX_DMA_CHANNELS 8
/*
* The maximum virtual address to which DMA transfers
* can be performed on this platform.
*
* NOTE: This is board (platform) specific, not processor-specific!
*
* NOTE: This assumes DMA transfers can only be performed on
* the section of physical memory contiguously mapped in virtual
* space for the kernel. For the Xtensa architecture, this
* means the maximum possible size of this DMA area is
* the size of the statically mapped kernel segment
* (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
*
* NOTE: When the entire KSEG area is DMA capable, we subtract
* one from the max address so that the virt_to_phys() macro
* works correctly on the address (otherwise the address
* enters another area, and virt_to_phys() may not return
* the value desired).
*/
#ifndef MAX_DMA_ADDRESS
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
#endif
/* Reserve and release a DMA channel */
extern int request_dma(unsigned int dmanr, const char * device_id);
extern void free_dma(unsigned int dmanr);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif

View file

@ -0,0 +1,207 @@
/*
* include/asm-xtensa/elf.h
*
* ELF register definitions
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/ptrace.h>
#include <asm/coprocessor.h>
/* Xtensa processor ELF architecture-magic number */
#define EM_XTENSA 94
#define EM_XTENSA_OLD 0xABC7
/* Xtensa relocations defined by the ABIs */
#define R_XTENSA_NONE 0
#define R_XTENSA_32 1
#define R_XTENSA_RTLD 2
#define R_XTENSA_GLOB_DAT 3
#define R_XTENSA_JMP_SLOT 4
#define R_XTENSA_RELATIVE 5
#define R_XTENSA_PLT 6
#define R_XTENSA_OP0 8
#define R_XTENSA_OP1 9
#define R_XTENSA_OP2 10
#define R_XTENSA_ASM_EXPAND 11
#define R_XTENSA_ASM_SIMPLIFY 12
#define R_XTENSA_GNU_VTINHERIT 15
#define R_XTENSA_GNU_VTENTRY 16
#define R_XTENSA_DIFF8 17
#define R_XTENSA_DIFF16 18
#define R_XTENSA_DIFF32 19
#define R_XTENSA_SLOT0_OP 20
#define R_XTENSA_SLOT1_OP 21
#define R_XTENSA_SLOT2_OP 22
#define R_XTENSA_SLOT3_OP 23
#define R_XTENSA_SLOT4_OP 24
#define R_XTENSA_SLOT5_OP 25
#define R_XTENSA_SLOT6_OP 26
#define R_XTENSA_SLOT7_OP 27
#define R_XTENSA_SLOT8_OP 28
#define R_XTENSA_SLOT9_OP 29
#define R_XTENSA_SLOT10_OP 30
#define R_XTENSA_SLOT11_OP 31
#define R_XTENSA_SLOT12_OP 32
#define R_XTENSA_SLOT13_OP 33
#define R_XTENSA_SLOT14_OP 34
#define R_XTENSA_SLOT0_ALT 35
#define R_XTENSA_SLOT1_ALT 36
#define R_XTENSA_SLOT2_ALT 37
#define R_XTENSA_SLOT3_ALT 38
#define R_XTENSA_SLOT4_ALT 39
#define R_XTENSA_SLOT5_ALT 40
#define R_XTENSA_SLOT6_ALT 41
#define R_XTENSA_SLOT7_ALT 42
#define R_XTENSA_SLOT8_ALT 43
#define R_XTENSA_SLOT9_ALT 44
#define R_XTENSA_SLOT10_ALT 45
#define R_XTENSA_SLOT11_ALT 46
#define R_XTENSA_SLOT12_ALT 47
#define R_XTENSA_SLOT13_ALT 48
#define R_XTENSA_SLOT14_ALT 49
/* ELF register definitions. This is needed for core dump support. */
typedef unsigned long elf_greg_t;
typedef struct {
elf_greg_t pc;
elf_greg_t ps;
elf_greg_t lbeg;
elf_greg_t lend;
elf_greg_t lcount;
elf_greg_t sar;
elf_greg_t windowstart;
elf_greg_t windowbase;
elf_greg_t threadptr;
elf_greg_t reserved[7+48];
elf_greg_t a[64];
} xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define ELF_NFPREG 18
typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
( (x)->e_machine == EM_XTENSA_OLD ) )
/*
* These are used to set parameters in the core dumps.
*/
#ifdef __XTENSA_EL__
# define ELF_DATA ELFDATA2LSB
#elif defined(__XTENSA_EB__)
# define ELF_DATA ELFDATA2MSB
#else
# error processor byte order undefined!
#endif
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
* For the moment, we have only optimizations for the Intel generations,
* but that could change...
*/
#define ELF_PLATFORM (NULL)
/*
* The Xtensa processor ABI says that when the program starts, a2
* contains a pointer to a function which might be registered using
* `atexit'. This provides a mean for the dynamic linker to call
* DT_FINI functions for shared libraries that have been loaded before
* the code runs.
*
* A value of 0 tells we have no such handler.
*
* We might as well make sure everything else is cleared too (except
* for the stack pointer in a1), just to make things more
* deterministic. Also, clearing a0 terminates debugger backtraces.
*/
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
_r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
_r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0)
typedef struct {
xtregs_opt_t opt;
xtregs_user_t user;
#if XTENSA_HAVE_COPROCESSORS
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
#endif
} elf_xtregs_t;
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
struct task_struct;
extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
#endif /* _XTENSA_ELF_H */

View file

@ -0,0 +1,12 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#define fb_pgprotect(...) do {} while (0)
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

View file

@ -0,0 +1,80 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the start of the consistent memory region upwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN +
(KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
#endif
__end_of_fixed_addresses
};
#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
#ifndef __ASSEMBLY__
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
(vaddr) \
)
#endif

View file

@ -0,0 +1,12 @@
#ifndef __ASM_XTENSA_FLAT_H
#define __ASM_XTENSA_FLAT_H
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
#define flat_put_addr_at_rp(rp, val, relval ) put_unaligned(val, rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
#endif /* __ASM_XTENSA_FLAT_H */

View file

@ -0,0 +1,40 @@
/*
* arch/xtensa/include/asm/ftrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Tensilica Inc.
*/
#ifndef _XTENSA_FTRACE_H
#define _XTENSA_FTRACE_H
#include <asm/processor.h>
#ifndef __ASSEMBLY__
#define ftrace_return_address0 ({ unsigned long a0, a1; \
__asm__ __volatile__ ( \
"mov %0, a0\n" \
"mov %1, a1\n" \
: "=r"(a0), "=r"(a1)); \
MAKE_PC_FROM_RA(a0, a1); })
#ifdef CONFIG_FRAME_POINTER
extern unsigned long return_address(unsigned level);
#define ftrace_return_address(n) return_address(n)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 3
#ifndef __ASSEMBLY__
extern void _mcount(void);
#define mcount _mcount
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _XTENSA_FTRACE_H */

View file

@ -0,0 +1,147 @@
/*
* Atomic futex routines
*
* Based on the PowerPC implementataion
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2013 TangoTec Ltd.
*
* Baruch Siach <baruch@tkos.co.il>
*/
#ifndef _ASM_XTENSA_FUTEX_H
#define _ASM_XTENSA_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile( \
"1: l32i %0, %2, 0\n" \
insn "\n" \
" wsr %0, scompare1\n" \
"2: s32c1i %1, %2, 0\n" \
" bne %1, %0, 1b\n" \
" movi %1, 0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"4: .long 3b\n" \
"5: l32r %0, 4b\n" \
" movi %1, %3\n" \
" jx %0\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 1b,5b,2b,5b\n" \
" .previous\n" \
: "=&r" (oldval), "=&r" (ret) \
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
#endif
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (ret)
return ret;
switch (cmp) {
case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
case FUTEX_OP_CMP_NE: return (oldval != cmparg);
case FUTEX_OP_CMP_LT: return (oldval < cmparg);
case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
case FUTEX_OP_CMP_GT: return (oldval > cmparg);
}
return -ENOSYS;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
#endif
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
"1: l32i %1, %3, 0\n"
" mov %0, %5\n"
" wsr %1, scompare1\n"
"2: s32c1i %0, %3, 0\n"
"3:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
"4: .long 3b\n"
"5: l32r %1, 4b\n"
" movi %0, %6\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .long 1b,5b,2b,5b\n"
" .previous\n"
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
: "memory");
*uval = prev;
return ret;
}
#endif /* __KERNEL__ */
#endif /* _ASM_XTENSA_FUTEX_H */

View file

@ -0,0 +1,4 @@
#ifndef __LINUX_GPIO_H
#warning Include linux/gpio.h instead of asm/gpio.h
#include <linux/gpio.h>
#endif

View file

@ -0,0 +1,95 @@
/*
* include/asm-xtensa/highmem.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
* Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
#include <linux/wait.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
#define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define kmap_prot PAGE_KERNEL_EXEC
#if DCACHE_WAY_SIZE > PAGE_SIZE
#define get_pkmap_color get_pkmap_color
static inline int get_pkmap_color(struct page *page)
{
return DCACHE_ALIAS(page_to_phys(page));
}
extern unsigned int last_pkmap_nr_arr[];
static inline unsigned int get_next_pkmap_nr(unsigned int color)
{
last_pkmap_nr_arr[color] =
(last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
return last_pkmap_nr_arr[color] + color;
}
static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
{
return pkmap_nr < DCACHE_N_COLORS;
}
static inline int get_pkmap_entries_count(unsigned int color)
{
return LAST_PKMAP / DCACHE_N_COLORS;
}
extern wait_queue_head_t pkmap_map_wait_arr[];
static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
return pkmap_map_wait_arr + color;
}
#endif
extern pte_t *pkmap_page_table;
void *kmap_high(struct page *page);
void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
static inline void flush_cache_kmaps(void)
{
flush_cache_all();
}
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void kmap_init(void);
#endif

View file

@ -0,0 +1,14 @@
/*
* include/asm-xtensa/hw_irq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
#endif

View file

@ -0,0 +1,163 @@
/*
* arch/xtensa/include/asm/initialize_mmu.h
*
* Initializes MMU:
*
* For the new V3 MMU we remap the TLB from virtual == physical
* to the standard Linux mapping used in earlier MMU's.
*
* The the MMU we also support a new configuration register that
* specifies how the S32C1I instruction operates with the cache
* controller.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2008 - 2012 Tensilica, Inc.
*
* Marc Gauthier <marc@tensilica.com>
* Pete Delaney <piet@tensilica.com>
*/
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
#include <asm/pgtable.h>
#include <asm/vectors.h>
#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
.macro initialize_mmu
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
/*
* We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
* For details see Documentation/xtensa/atomctl.txt
*/
#if XCHAL_DCACHE_IS_COHERENT
movi a3, 0x25 /* For SMP/MX -- internal for writeback,
* RCW otherwise
*/
#else
movi a3, 0x29 /* non-MX -- Most cores use Std Memory
* Controlers which usually can't use RCW
*/
#endif
wsr a3, atomctl
#endif /* XCHAL_HAVE_S32C1I &&
* (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
*/
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/*
* Have MMU v3
*/
#if !XCHAL_HAVE_VECBASE
# error "MMU v3 requires reloc vectors"
#endif
movi a1, 0
_call0 1f
_j 2f
.align 4
1: movi a2, 0x10000000
movi a3, 0x18000000
add a2, a2, a0
9: bgeu a2, a3, 9b /* PC is out of the expected range */
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
movi a2, 0x40000006
idtlb a2
iitlb a2
isync
/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
* and jump to the new mapping.
*/
srli a3, a0, 27
slli a3, a3, 27
addi a3, a3, CA_BYPASS
addi a7, a2, -1
wdtlb a3, a7
witlb a3, a7
isync
slli a4, a0, 5
srli a4, a4, 5
addi a5, a2, -6
add a4, a4, a5
jx a4
/* Step 3: unmap everything other than current area.
* Start at 0x60000000, wrap around, and end with 0x20000000
*/
2: movi a4, 0x20000000
add a5, a2, a4
3: idtlb a5
iitlb a5
add a5, a5, a4
bne a5, a2, 3b
/* Step 4: Setup MMU with the old V2 mappings. */
movi a6, 0x01000000
wsr a6, ITLBCFG
wsr a6, DTLBCFG
isync
movi a5, 0xd0000005
movi a4, CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
movi a5, 0xd8000005
movi a4, CA_BYPASS
wdtlb a4, a5
witlb a4, a5
movi a5, XCHAL_KIO_CACHED_VADDR + 6
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
movi a5, XCHAL_KIO_BYPASS_VADDR + 6
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
isync
/* Jump to self, using MMU v2 mappings. */
movi a4, 1f
jx a4
1:
movi a2, VECBASE_RESET_VADDR
wsr a2, vecbase
/* Step 5: remove temporary mapping. */
idtlb a7
iitlb a7
isync
movi a0, 0
wsr a0, ptevaddr
rsync
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
.endm
#endif /*__ASSEMBLY__*/
#endif /* _XTENSA_INITIALIZE_MMU_H */

View file

@ -0,0 +1,88 @@
/*
* include/asm-xtensa/io.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IO_H
#define _XTENSA_IO_H
#ifdef __KERNEL__
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/vectors.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
#define IO_SPACE_LIMIT ~0
#ifdef CONFIG_MMU
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
{
return xtensa_kio_paddr;
}
#endif
/*
* Return the virtual address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
*/
static inline void __iomem *ioremap_nocache(unsigned long offset,
unsigned long size)
{
if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
BUG();
}
static inline void __iomem *ioremap_cache(unsigned long offset,
unsigned long size)
{
if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
BUG();
}
#define ioremap_wc ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
static inline void iounmap(volatile void __iomem *addr)
{
}
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif /* CONFIG_MMU */
/*
* Generic I/O
*/
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#endif /* __KERNEL__ */
#include <asm-generic/io.h>
#endif /* _XTENSA_IO_H */

View file

@ -0,0 +1,56 @@
/*
* include/asm-xtensa/irq.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IRQ_H
#define _XTENSA_IRQ_H
#include <linux/init.h>
#include <platform/hardware.h>
#include <variant/core.h>
#ifdef CONFIG_VARIANT_IRQ_SWITCH
#include <variant/irq.h>
#else
static inline void variant_irq_enable(unsigned int irq) { }
static inline void variant_irq_disable(unsigned int irq) { }
#endif
#ifndef VARIANT_NR_IRQS
# define VARIANT_NR_IRQS 0
#endif
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
#else
void variant_init_irq(void) __init;
#endif
static __inline__ int irq_canonicalize(int irq)
{
return (irq);
}
struct irqaction;
struct irq_domain;
void migrate_irqs(void);
int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
unsigned long int_irq, unsigned long ext_irq,
unsigned long *out_hwirq, unsigned int *out_type);
int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
unsigned xtensa_map_ext_irq(unsigned ext_irq);
unsigned xtensa_get_ext_irq_no(unsigned irq);
#endif /* _XTENSA_IRQ_H */

View file

@ -0,0 +1,61 @@
/*
* Xtensa IRQ flags handling functions
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IRQFLAGS_H
#define _XTENSA_IRQFLAGS_H
#include <linux/types.h>
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("rsr %0, ps" : "=a" (flags));
return flags;
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("rsil %0, "__stringify(LOCKLEVEL)
: "=a" (flags) :: "memory");
return flags;
}
static inline void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
static inline void arch_local_irq_enable(void)
{
unsigned long flags;
asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
}
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile("wsr %0, ps; rsync"
:: "a" (flags) : "memory");
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
#endif
return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
}
static inline bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif /* _XTENSA_IRQFLAGS_H */

View file

@ -0,0 +1,22 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_H
#define _XTENSA_MMU_H
#ifndef CONFIG_MMU
#include <asm-generic/mmu.h>
#else
typedef struct {
unsigned long asid[NR_CPUS];
unsigned int cpu;
} mm_context_t;
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_H */

View file

@ -0,0 +1,161 @@
/*
* Switch an MMU context.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_CONTEXT_H
#define _XTENSA_MMU_CONTEXT_H
#ifndef CONFIG_MMU
#include <asm/nommu_context.h>
#else
#include <linux/stringify.h>
#include <linux/sched.h>
#include <asm/vectors.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
#include <asm-generic/percpu.h>
#if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!"
#endif
DECLARE_PER_CPU(unsigned long, asid_cache);
#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
/*
* NO_CONTEXT is the invalid ASID value that we don't ever assign to
* any user or kernel context. We use the reserved values in the
* ASID_INSERT macro below.
*
* 0 invalid
* 1 kernel
* 2 reserved
* 3 reserved
* 4...255 available
*/
#define NO_CONTEXT 0
#define ASID_USER_FIRST 4
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
#ifdef CONFIG_MMU
void init_mmu(void);
#else
static inline void init_mmu(void) { }
#endif
static inline void set_rasid_register (unsigned long val)
{
__asm__ __volatile__ (" wsr %0, rasid\n\t"
" isync\n" : : "a" (val));
}
static inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
__asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
return tmp;
}
static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
unsigned long asid = cpu_asid_cache(cpu);
if ((++asid & ASID_MASK) == 0) {
/*
* Start new asid cycle; continue counting with next
* incarnation bits; skipping over 0, 1, 2, 3.
*/
local_flush_tlb_all();
asid += ASID_USER_FIRST;
}
cpu_asid_cache(cpu) = asid;
mm->context.asid[cpu] = asid;
mm->context.cpu = cpu;
}
static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
/*
* Check if our ASID is of an older version and thus invalid.
*/
if (mm) {
unsigned long asid = mm->context.asid[cpu];
if (asid == NO_CONTEXT ||
((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
get_new_mmu_context(mm, cpu);
}
}
static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
{
get_mmu_context(mm, cpu);
set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
invalidate_page_directory();
}
/*
* Initialize the context related info for a new mm_struct
* instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
* to -1 says the process has never run on any core.
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
int cpu;
for_each_possible_cpu(cpu) {
mm->context.asid[cpu] = NO_CONTEXT;
}
mm->context.cpu = -1;
return 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
int migrated = next->context.cpu != cpu;
/* Flush the icache if we migrated to a new core. */
if (migrated) {
__invalidate_icache_all();
next->context.cpu = cpu;
}
if (migrated || prev != next)
activate_context(next, cpu);
}
#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
#define deactivate_mm(tsk, mm) do { } while (0)
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
invalidate_page_directory();
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
/* Nothing to do. */
}
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_CONTEXT_H */

View file

@ -0,0 +1,20 @@
/*
* include/asm-xtensa/module.h
*
* This file contains the module code specific to the Xtensa architecture.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MODULE_H
#define _XTENSA_MODULE_H
#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
#include <asm-generic/module.h>
#endif /* _XTENSA_MODULE_H */

View file

@ -0,0 +1,9 @@
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>

View file

@ -0,0 +1,46 @@
/*
* Xtensa MX interrupt distributor
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MXREGS_H
#define _XTENSA_MXREGS_H
/*
* RER/WER at, as Read/write external register
* at: value
* as: address
*
* Address Value
* 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
* 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
* 0180 0...0m..m Clear enable specified by mask (m)
* 0184 0...0m..m Set enable specified by mask (m)
* 0190 0...0x..x 8-bit IPI partition register
* VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
* V (10-bit) Release/Version
* P ( 4-bit) Number of cores - 1
* U (18-bit) ID
* 01a0 i.......i 32-bit ConfigID
* 0200 0...0m..m RunStall core 'n'
* 0220 c Cache coherency enabled
*/
#define MIROUT(irq) (0x000 + (irq))
#define MIPICAUSE(cpu) (0x100 + (cpu))
#define MIPISET(cause) (0x140 + (cause))
#define MIENG 0x180
#define MIENGSET 0x184
#define MIASG 0x188 /* Read Global Assert Register */
#define MIASGSET 0x18c /* Set Global Addert Regiter */
#define MIPIPART 0x190
#define SYSCFGID 0x1a0
#define MPSCORE 0x200
#define CCON 0x220
#endif /* _XTENSA_MXREGS_H */

View file

@ -0,0 +1,25 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{
return 0;
}
static inline void destroy_context(struct mm_struct *mm)
{
}
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
}
static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
{
}

View file

@ -0,0 +1,192 @@
/*
* include/asm-xtensa/page.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
#include <platform/hardware.h>
/*
* Fixed TLB translations in the processor.
*/
#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
#define XCHAL_KSEG_PADDR 0x00000000
#define XCHAL_KSEG_SIZE 0x08000000
/*
* PAGE_SHIFT determines the page size
*/
#define PAGE_SHIFT 12
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
#define PAGE_OFFSET 0
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
#define PGTABLE_START 0x80000000
/*
* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* | |cache| cache index
* | pfn |off| virtual address
* |xxxx:X|zzz|
* | : | |
* | \ / | |
* |trans.| |
* | / \ | |
* |yyyy:Y|zzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are part of the cache index are also translated (Y).
* If this translation changes bit(s) (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if DCACHE_WAY_SIZE > PAGE_SIZE
# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
#else
# define DCACHE_ALIAS_ORDER 0
# define DCACHE_ALIAS(a) ((void)(a), 0)
#endif
#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
#if ICACHE_WAY_SIZE > PAGE_SIZE
# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
#else
# define ICACHE_ALIAS_ORDER 0
#endif
#ifdef __ASSEMBLY__
#define __pgprot(x) (x)
#else
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
/*
* Pure 2^n version of get_order
* Use 'nsau' instructions if supported by the processor or the generic version.
*/
#if XCHAL_HAVE_NSA
static inline __attribute_const__ int get_order(unsigned long size)
{
int lz;
asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
return 32 - lz;
}
#else
# include <asm-generic/getorder.h>
#endif
struct page;
struct vm_area_struct;
extern void clear_page(void *page);
extern void copy_page(void *to, void *from);
/*
* If we have cache aliasing and writeback caches, we might have to do
* some extra work
*/
#if DCACHE_WAY_SIZE > PAGE_SIZE
extern void clear_page_alias(void *vaddr, unsigned long paddr);
extern void copy_page_alias(void *to, void *from,
unsigned long to_paddr, unsigned long from_paddr);
#define clear_user_highpage clear_user_highpage
void clear_user_highpage(struct page *page, unsigned long vaddr);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#else
# define clear_user_page(page, vaddr, pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif
/*
* This handles the memory map. We handle pages at
* XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
* These macros are for conversion of kernel address, not user
* addresses.
*/
#define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#endif /* _XTENSA_PAGE_H */

View file

@ -0,0 +1,18 @@
/*
* include/asm-xtensa/param.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <uapi/asm/param.h>
# define HZ CONFIG_HZ /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#endif /* _XTENSA_PARAM_H */

View file

@ -0,0 +1,88 @@
/*
* include/asm-xtensa/pci-bridge.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PCI_BRIDGE_H
#define _XTENSA_PCI_BRIDGE_H
#ifdef __KERNEL__
struct device_node;
struct pci_controller;
/*
* pciauto_bus_scan() enumerates the pci space.
*/
extern int pciauto_bus_scan(struct pci_controller *, int);
struct pci_space {
unsigned long start;
unsigned long end;
unsigned long base;
};
/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
int index; /* used for pci_controller_num */
struct pci_controller *next;
struct pci_bus *bus;
void *arch_data;
int first_busno;
int last_busno;
struct pci_ops *ops;
volatile unsigned int *cfg_addr;
volatile unsigned char *cfg_data;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
struct resource io_resource;
struct resource mem_resources[3];
int mem_resource_count;
/* Host bridge I/O and Memory space
* Used for BAR placement algorithms
*/
struct pci_space io_space;
struct pci_space mem_space;
/* Return the interrupt number fo a device. */
int (*map_irq)(struct pci_dev*, u8, u8);
};
static inline void pcibios_init_resource(struct resource *res,
unsigned long start, unsigned long end, int flags, char *name)
{
res->start = start;
res->end = end;
res->flags = flags;
res->name = name;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
}
/* These are used for config access before all the PCI probing has been done. */
int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
int early_read_config_word(struct pci_controller*, int, int, int, u16*);
int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
int early_write_config_byte(struct pci_controller*, int, int, int, u8);
int early_write_config_word(struct pci_controller*, int, int, int, u16);
int early_write_config_dword(struct pci_controller*, int, int, int, u32);
#endif /* __KERNEL__ */
#endif /* _XTENSA_PCI_BRIDGE_H */

View file

@ -0,0 +1,64 @@
/*
* linux/include/asm-xtensa/pci.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PCI_H
#define _XTENSA_PCI_H
#ifdef __KERNEL__
/* Can be used to override the logic in pci_scan_bus for skipping
* already-configured bus numbers - to be used for buggy BIOSes
* or architectures with incomplete PCI setup by the loader
*/
#define pcibios_assign_all_busses() 0
extern struct pci_controller* pcibios_alloc_controller(void);
/* Assume some values. (We should revise them, if necessary) */
#define PCIBIOS_MIN_IO 0x2000
#define PCIBIOS_MIN_MEM 0x10000000
/* Dynamic DMA mapping stuff.
* Xtensa has everything mapped statically like x86.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
struct pci_dev;
/* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce buffer
* decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#endif /* __KERNEL__ */
/* Implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
/* Generic PCI */
#include <asm-generic/pci.h>
#endif /* _XTENSA_PCI_H */

View file

@ -0,0 +1,4 @@
#ifndef __ASM_XTENSA_PERF_EVENT_H
#define __ASM_XTENSA_PERF_EVENT_H
#endif /* __ASM_XTENSA_PERF_EVENT_H */

View file

@ -0,0 +1,85 @@
/*
* include/asm-xtensa/pgalloc.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001-2007 Tensilica Inc.
*/
#ifndef _XTENSA_PGALLOC_H
#define _XTENSA_PGALLOC_H
#ifdef __KERNEL__
#include <linux/highmem.h>
#include <linux/slab.h>
/*
* Allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_populate_kernel(mm, pmdp, ptep) \
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *ptep;
int i;
ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
pte_clear(NULL, 0, ptep + i);
return ptep;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
{
pte_t *pte;
struct page *page;
pte = pte_alloc_one_kernel(mm, addr);
if (!pte)
return NULL;
page = virt_to_page(pte);
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */

View file

@ -0,0 +1,448 @@
/*
* include/asm-xtensa/pgtable.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
/*
* We only use two ring levels, user and kernel space.
*/
#define USER_RING 1 /* user ring level */
#define KERNEL_RING 0 /* kernel ring level */
/*
* The Xtensa architecture port of Linux has a two-level page table system,
* i.e. the logical three-level Linux page table layout is folded.
* Each task has the following memory page tables:
*
* PGD table (page directory), ie. 3rd-level page table:
* One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
* (Architectures that don't have the PMD folded point to the PMD tables)
*
* The pointer to the PGD table for a given task can be retrieved from
* the task structure (struct task_struct*) t, e.g. current():
* (t->mm ? t->mm : t->active_mm)->pgd
*
* PMD tables (page middle-directory), ie. 2nd-level page tables:
* Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
*
* PTE tables (page table entry), ie. 1st-level page tables:
* One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
* invalid_pte_table for absent mappings.
*
* The individual pages are 4 kB big with special pages for the empty_zero_page.
*/
#define PGDIR_SHIFT 22
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* Entries per page directory level: we use two-level, so
* we don't really have any PMD directory physically.
*/
#define PTRS_PER_PTE 1024
#define PTRS_PER_PTE_SHIFT 10
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/*
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
*/
#define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC7FEFFFF
#define TLBTEMP_BASE_1 0xC7FF0000
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
#else
#define TLBTEMP_SIZE ICACHE_WAY_SIZE
#endif
/*
* For the Xtensa architecture, the PTE layout is as follows:
*
* 31------12 11 10-9 8-6 5-4 3-2 1-0
* +-----------------------------------------+
* | | Software | HARDWARE |
* | PPN | ADW | RI |Attribute|
* +-----------------------------------------+
* pte_none | MBZ | 01 | 11 | 00 |
* +-----------------------------------------+
* present | PPN | 0 | 00 | ADW | RI | CA | wx |
* +- - - - - - - - - - - - - - - - - - - - -+
* (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
* +-----------------------------------------+
* swap | index | type | 01 | 11 | 00 |
* +- - - - - - - - - - - - - - - - - - - - -+
* file | file offset | 01 | 11 | 10 |
* +-----------------------------------------+
*
* For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
* +-----------------------------------------+
* present | PPN | 0 | 00 | ADW | RI | CA | w1 |
* +-----------------------------------------+
* (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
* +-----------------------------------------+
*
* Legend:
* PPN Physical Page Number
* ADW software: accessed (young) / dirty / writable
* RI ring (0=privileged, 1=user, 2 and 3 are unused)
* CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
* (11 is invalid and used to mark pages that are not present)
* w page is writable (hw)
* x page is executable (hw)
* index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
* (note that the index is always non-zero)
* type swap type (5 bits -> 32 types)
* file offset 26-bit offset into the file, in increments of PAGE_SIZE
*
* Notes:
* - (PROT_NONE) is a special case of 'present' but causes an exception for
* any access (read, write, and execute).
* - 'multihit-exception' has the highest priority of all MMU exceptions,
* so the ring must be set to 'RING_USER' even for 'non-present' pages.
* - on older hardware, the exectuable flag was not supported and
* used as a 'valid' flag, so it needs to be always set.
* - we need to keep track of certain flags in software (dirty and young)
* to do this, we use write exceptions and have a separate software w-flag.
* - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
*/
#define _PAGE_ATTRIB_MASK 0xf
#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
#define _PAGE_CA_WB (1<<2) /* write-back */
#define _PAGE_CA_WT (2<<2) /* write-through */
#define _PAGE_CA_MASK (3<<2)
#define _PAGE_CA_INVALID (3<<2)
/* We use invalid attribute values to distinguish special pte entries */
#if XCHAL_HW_VERSION_MAJOR < 2000
#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
#define _PAGE_NONE 0x04
#else
#define _PAGE_HW_VALID 0x00
#define _PAGE_NONE 0x0f
#endif
#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
#define _PAGE_USER (1<<4) /* user access (ring=1) */
/* Software */
#define _PAGE_WRITABLE_BIT 6
#define _PAGE_WRITABLE (1<<6) /* software: page writable */
#define _PAGE_DIRTY (1<<7) /* software: page dirty */
#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
#ifdef CONFIG_MMU
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
#else
# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
#endif
#else /* no mmu */
# define PAGE_NONE __pgprot(0)
# define PAGE_SHARED __pgprot(0)
# define PAGE_COPY __pgprot(0)
# define PAGE_READONLY __pgprot(0)
# define PAGE_KERNEL __pgprot(0)
#endif
/*
* On certain configurations of Xtensa MMUs (eg. the initial Linux config),
* the MMU can't do page protection for execute, and considers that the same as
* read. Also, write permissions may imply read permissions.
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
#define __P000 PAGE_NONE /* private --- */
#define __P001 PAGE_READONLY /* private --r */
#define __P010 PAGE_COPY /* private -w- */
#define __P011 PAGE_COPY /* private -wr */
#define __P100 PAGE_READONLY_EXEC /* private x-- */
#define __P101 PAGE_READONLY_EXEC /* private x-r */
#define __P110 PAGE_COPY_EXEC /* private xw- */
#define __P111 PAGE_COPY_EXEC /* private xwr */
#define __S000 PAGE_NONE /* shared --- */
#define __S001 PAGE_READONLY /* shared --r */
#define __S010 PAGE_SHARED /* shared -w- */
#define __S011 PAGE_SHARED /* shared -wr */
#define __S100 PAGE_READONLY_EXEC /* shared x-- */
#define __S101 PAGE_READONLY_EXEC /* shared x-r */
#define __S110 PAGE_SHARED_EXEC /* shared xw- */
#define __S111 PAGE_SHARED_EXEC /* shared xwr */
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
extern unsigned long empty_zero_page[1024];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#ifdef CONFIG_MMU
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
extern void paging_init(void);
#else
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
#endif
static inline void pgtable_cache_init(void) { }
/*
* The pmd contains the kernel virtual address of the pte page.
*/
#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
/*
* pte status.
*/
# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
#if XCHAL_HW_VERSION_MAJOR < 2000
# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
#else
# define pte_present(pte) \
(((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
|| ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
#endif
#define pte_clear(mm,addr,ptep) \
do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte)
{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkdirty(pte_t pte)
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte)
{ return pte; }
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_same(a,b) (pte_val(a) == pte_val(b))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
/*
* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
static inline void update_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
__asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
#endif
}
struct mm_struct;
static inline void
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
}
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
}
static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
*pmdp = pmdval;
}
struct vm_area_struct;
static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
update_pte(ptep, pte_mkold(pte));
return 1;
}
static inline pte_t
ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(mm, addr, ptep);
return pte;
}
static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
update_pte(ptep, pte_wrprotect(pte));
}
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* to find an entry in a page-table-directory */
#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,address) ((pmd_t*)(dir))
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_unmap(pte) do { } while (0)
/*
* Encode and decode a swap and file entry.
*/
#define SWP_TYPE_BITS 5
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
#define __swp_offset(entry) ((entry).val >> 11)
#define __swp_entry(type,offs) \
((swp_entry_t){((type) << 6) | ((offs) << 11) | \
_PAGE_CA_INVALID | _PAGE_USER})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 26
#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
#define pgoff_to_pte(off) \
((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
#endif /* !defined (__ASSEMBLY__) */
#ifdef __ASSEMBLY__
/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
* _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
* _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
* _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
*
* Note: We require an additional temporary register which can be the same as
* the register that holds the address.
*
* ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
*
*/
#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
_PGD_INDEX(tmp, adr); \
addx4 mm, tmp, mm
#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
srli pmd, pmd, PAGE_SHIFT; \
slli pmd, pmd, PAGE_SHIFT; \
addx4 pmd, tmp, pmd
#else
#define kern_addr_valid(addr) (1)
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t *ptep);
typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
/* We provide our own get_unmapped_area to cope with
* SHM area cache aliasing for userland.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#include <asm-generic/pgtable.h>
#endif /* _XTENSA_PGTABLE_H */

View file

@ -0,0 +1,72 @@
/*
* Platform specific functions
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PLATFORM_H
#define _XTENSA_PLATFORM_H
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/bootparam.h>
/*
* platform_init is called before the mmu is initialized to give the
* platform a early hook-up. bp_tag_t is a list of configuration tags
* passed from the boot-loader.
*/
extern void platform_init(bp_tag_t*);
/*
* platform_setup is called from setup_arch with a pointer to the command-line
* string.
*/
extern void platform_setup (char **);
/*
* platform_restart is called to restart the system.
*/
extern void platform_restart (void);
/*
* platform_halt is called to stop the system and halt.
*/
extern void platform_halt (void);
/*
* platform_power_off is called to stop the system and power it off.
*/
extern void platform_power_off (void);
/*
* platform_idle is called from the idle function.
*/
extern void platform_idle (void);
/*
* platform_heartbeat is called every HZ
*/
extern void platform_heartbeat (void);
/*
* platform_pcibios_init is called to allow the platform to setup the pci bus.
*/
extern void platform_pcibios_init (void);
/*
* platform_pcibios_fixup allows to modify the PCI configuration.
*/
extern int platform_pcibios_fixup (void);
/*
* platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
*/
extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */

View file

@ -0,0 +1,216 @@
/*
* include/asm-xtensa/processor.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_PROCESSOR_H
#define _XTENSA_PROCESSOR_H
#include <variant/core.h>
#include <platform/hardware.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/regs.h>
/* Assertions. */
#if (XCHAL_HAVE_WINDOWED != 1)
# error Linux requires the Xtensa Windowed Registers Option.
#endif
#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
/*
* User space process size: 1 GB.
* Windowed call ABI requires caller and callee to be located within the same
* 1 GB region. The C compiler places trampoline code on the stack for sources
* that take the address of a nested C function (a feature used by glibc), so
* the 1 GB requirement applies to the stack as well.
*/
#ifdef CONFIG_MMU
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
#else
#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/*
* General exception cause assigned to debug exceptions. Debug exceptions go
* to their own vector, rather than the general exception vectors (user,
* kernel, double); and their specific causes are reported via DEBUGCAUSE
* rather than EXCCAUSE. However it is sometimes convenient to redirect debug
* exceptions to the general exception mechanism. To do this, an otherwise
* unused EXCCAUSE value was assigned to debug exceptions for this purpose.
*/
#define EXCCAUSE_MAPPED_DEBUG 63
/*
* We use DEPC also as a flag to distinguish between double and regular
* exceptions. For performance reasons, DEPC might contain the value of
* EXCCAUSE for regular exceptions, so we use this definition to mark a
* valid double exception address.
* (Note: We use it in bgeui, so it should be 64, 128, or 256)
*/
#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
/* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts.
*/
#define LOCKLEVEL XCHAL_EXCM_LEVEL
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
*/
#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
#ifndef __ASSEMBLY__
/* Build a valid return address for the specified call winsize.
* winsize must be 1 (call4), 2 (call8), or 3 (call12)
*/
#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
/* Convert return address to a valid pc
* Note: We assume that the stack pointer is in the same 1GB ranges as the ra
*/
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_struct {
/* kernel's return address and stack pointer for context switching */
unsigned long ra; /* kernel's a0: return address and window call size */
unsigned long sp; /* kernel's a1: stack pointer */
mm_segment_t current_ds; /* see uaccess.h for example uses */
/* struct xtensa_cpuinfo info; */
unsigned long bad_vaddr; /* last user fault */
unsigned long bad_uaddr; /* last kernel fault accessing user space */
unsigned long error_code;
unsigned long ibreak[XCHAL_NUM_IBREAK];
unsigned long dbreaka[XCHAL_NUM_DBREAK];
unsigned long dbreakc[XCHAL_NUM_DBREAK];
/* Make structure 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16)));
};
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
#define INIT_THREAD \
{ \
ra: 0, \
sp: sizeof(init_stack) + (long) &init_stack, \
current_ds: {0}, \
/*info: {0}, */ \
bad_vaddr: 0, \
bad_uaddr: 0, \
error_code: 0, \
}
/*
* Do necessary setup to start up a newly executed thread.
* Note: We set-up ps as if we did a call4 to the new pc.
* set_thread_state in signal.c depends on it.
*/
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
(1 << PS_CALLINC_SHIFT) | \
(USER_RING << PS_RING_SHIFT) | \
(1 << PS_UM_BIT) | \
(1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
memset(regs, 0, sizeof(*regs)); \
regs->pc = new_pc; \
regs->ps = USER_PS_VALUE; \
regs->areg[1] = new_sp; \
regs->areg[0] = 0; \
regs->wmask = 1; \
regs->depc = 0; \
regs->windowbase = 0; \
regs->windowstart = 1;
/* Forward declaration */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0)
#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/* Special register access. */
#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
#ifndef XCHAL_HAVE_EXTERN_REGS
#define XCHAL_HAVE_EXTERN_REGS 0
#endif
#if XCHAL_HAVE_EXTERN_REGS
static inline void set_er(unsigned long value, unsigned long addr)
{
asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
}
static inline unsigned long get_er(unsigned long addr)
{
register unsigned long value;
asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
return value;
}
#endif /* XCHAL_HAVE_EXTERN_REGS */
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_PROCESSOR_H */

View file

@ -0,0 +1,84 @@
/*
* include/asm-xtensa/ptrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry.
*/
struct pt_regs {
unsigned long pc; /* 4 */
unsigned long ps; /* 8 */
unsigned long depc; /* 12 */
unsigned long exccause; /* 16 */
unsigned long excvaddr; /* 20 */
unsigned long debugcause; /* 24 */
unsigned long wmask; /* 28 */
unsigned long lbeg; /* 32 */
unsigned long lend; /* 36 */
unsigned long lcount; /* 40 */
unsigned long sar; /* 44 */
unsigned long windowbase; /* 48 */
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */
unsigned long scompare1; /* 64 */
unsigned long threadptr; /* 68 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
/* Make sure the areg field is 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16)));
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
unsigned long areg[16];
};
#include <variant/core.h>
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
(regs)->areg[1]))
# ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs)
# else
# define profile_pc(regs) \
({ \
in_lock_functions(instruction_pointer(regs)) ? \
return_pointer(regs) : instruction_pointer(regs); \
})
# endif
#define user_stack_pointer(regs) ((regs)->areg[1])
#else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h>
#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_PTRACE_H */

View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*/
#ifndef _XTENSA_REGS_H
#define _XTENSA_REGS_H
/* Special registers. */
#define SREG_MR 32
#define SREG_IBREAKA 128
#define SREG_DBREAKA 144
#define SREG_DBREAKC 160
#define SREG_EPC 176
#define SREG_EPS 192
#define SREG_EXCSAVE 208
#define SREG_CCOMPARE 240
#define SREG_MISC 244
/* EXCCAUSE register fields */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
#define EXCCAUSE_SYSTEM_CALL 1
#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
#define EXCCAUSE_LOAD_STORE_ERROR 3
#define EXCCAUSE_LEVEL1_INTERRUPT 4
#define EXCCAUSE_ALLOCA 5
#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_INSTR_DATA_ERROR 12
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
#define EXCCAUSE_INSTR_ADDR_ERROR 14
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
#define EXCCAUSE_DTLB_MISS 24
#define EXCCAUSE_DTLB_MULTIHIT 25
#define EXCCAUSE_DTLB_PRIVILEGE 26
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_COPROCESSOR0_DISABLED 32
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
/* PS register fields. */
#define PS_WOE_BIT 18
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
#define PS_OWB_WIDTH 4
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_WIDTH 4
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
#define DBREAKC_MASK_BIT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOAD_BIT 30
#define DBREAKC_LOAD_MASK 0x40000000
#define DBREAKC_STOR_BIT 31
#define DBREAKC_STOR_MASK 0x80000000
/* DEBUGCAUSE register fields. */
#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */

View file

@ -0,0 +1,131 @@
/*
* include/asm-xtensa/rwsem.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Largely copied from include/asm-ppc/rwsem.h
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_RWSEM_H
#define _XTENSA_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
int tmp;
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
/*
* implement exchange and add functionality
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#endif /* _XTENSA_RWSEM_H */

View file

@ -0,0 +1,16 @@
/*
* include/asm-xtensa/segment.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SEGMENT_H
#define _XTENSA_SEGMENT_H
#include <asm/uaccess.h>
#endif /* _XTENSA_SEGEMENT_H */

View file

@ -0,0 +1,18 @@
/*
* include/asm-xtensa/serial.h
*
* Configuration details for 8250, 16450, 16550, etc. serial ports
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SERIAL_H
#define _XTENSA_SERIAL_H
#include <platform/serial.h>
#endif /* _XTENSA_SERIAL_H */

View file

@ -0,0 +1,21 @@
/*
* include/asm-xtensa/shmparam.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
#ifndef _XTENSA_SHMPARAM_H
#define _XTENSA_SHMPARAM_H
/*
* Xtensa can have variable size caches, and if
* the size of single way is larger than the page size,
* then we have to start worrying about cache aliasing
* problems.
*/
#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
#endif /* _XTENSA_SHMPARAM_H */

View file

@ -0,0 +1,23 @@
/*
* include/asm-xtensa/signal.h
*
* Swiped from SH.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SIGNAL_H
#define _XTENSA_SIGNAL_H
#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_SIGNAL_H */

View file

@ -0,0 +1,43 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_SMP_H
#define _XTENSA_SMP_H
#ifdef CONFIG_SMP
#define raw_smp_processor_id() (current_thread_info()->cpu)
#define cpu_logical_map(cpu) (cpu)
struct start_info {
unsigned long stack;
};
extern struct start_info start_info;
struct cpumask;
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
void arch_send_call_function_single_ipi(int cpu);
void smp_init_cpus(void);
void secondary_init_irq(void);
void ipi_init(void);
struct seq_file;
void show_ipi_list(struct seq_file *p, int prec);
#ifdef CONFIG_HOTPLUG_CPU
void __cpu_die(unsigned int cpu);
int __cpu_disable(void);
void cpu_die(void);
void cpu_restart(void);
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
#endif /* _XTENSA_SMP_H */

View file

@ -0,0 +1,205 @@
/*
* include/asm-xtensa/spinlock.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H
/*
* spinlock
*
* There is at most one owner of a spinlock. There are not different
* types of spinlock owners like there are for rwlocks (see below).
*
* When trying to obtain a spinlock, the function "spins" forever, or busy-
* waits, until the lock is obtained. When spinning, presumably some other
* owner will soon give up the spinlock making it available to others. Use
* the trylock functions to avoid spinning forever.
*
* possible values:
*
* 0 nobody owns the spinlock
* 1 somebody owns the spinlock
*/
#define arch_spin_is_locked(x) ((x)->slock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/*
* rwlock
*
* Read-write locks are really a more flexible spinlock. They allow
* multiple readers but only one writer. Write ownership is exclusive
* (i.e., all other readers and writers are blocked from ownership while
* there is a write owner). These rwlocks are unfair to writers. Writers
* can be starved for an indefinite time by readers.
*
* possible values:
*
* 0 nobody owns the rwlock
* >0 one or more readers own the rwlock
* (the positive value is the actual number of readers)
* 0x80000000 one writer owns the rwlock, no other writers, no readers
*/
#define arch_write_can_lock(x) ((x)->lock == 0)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
unsigned long result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" bltz %1, 1b\n"
" wsr %1, scompare1\n"
" addi %0, %1, 1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long result;
unsigned long tmp;
__asm__ __volatile__(
" l32i %1, %2, 0\n"
" addi %0, %1, 1\n"
" bltz %0, 1f\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" sub %0, %0, %1\n"
"1:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return result == 0;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" addi %0, %1, -1\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp1), "=&a" (tmp2)
: "a" (&rw->lock)
: "memory");
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _XTENSA_SPINLOCK_H */

View file

@ -0,0 +1,20 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif

View file

@ -0,0 +1,36 @@
/*
* arch/xtensa/include/asm/stacktrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_STACKTRACE_H
#define _XTENSA_STACKTRACE_H
#include <linux/sched.h>
struct stackframe {
unsigned long pc;
unsigned long sp;
};
static __always_inline unsigned long *stack_pointer(struct task_struct *task)
{
unsigned long *sp;
if (!task || task == current)
__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
else
sp = (unsigned long *)task->thread.sp;
return sp;
}
void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data),
void *data);
#endif /* _XTENSA_STACKTRACE_H */

View file

@ -0,0 +1,121 @@
/*
* include/asm-xtensa/string.h
*
* These trivial string functions are considered part of the public domain.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
#ifndef _XTENSA_STRING_H
#define _XTENSA_STRING_H
#define __HAVE_ARCH_STRCPY
static inline char *strcpy(char *__dest, const char *__src)
{
register char *__xdest = __dest;
unsigned long __dummy;
__asm__ __volatile__("1:\n\t"
"l8ui %2, %1, 0\n\t"
"s8i %2, %0, 0\n\t"
"addi %1, %1, 1\n\t"
"addi %0, %0, 1\n\t"
"bnez %2, 1b\n\t"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy)
: "0" (__dest), "1" (__src)
: "memory");
return __xdest;
}
#define __HAVE_ARCH_STRNCPY
static inline char *strncpy(char *__dest, const char *__src, size_t __n)
{
register char *__xdest = __dest;
unsigned long __dummy;
if (__n == 0)
return __xdest;
__asm__ __volatile__(
"1:\n\t"
"l8ui %2, %1, 0\n\t"
"s8i %2, %0, 0\n\t"
"addi %1, %1, 1\n\t"
"addi %0, %0, 1\n\t"
"beqz %2, 2f\n\t"
"bne %1, %5, 1b\n"
"2:"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy)
: "0" (__dest), "1" (__src), "r" (__src+__n)
: "memory");
return __xdest;
}
#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char *__cs, const char *__ct)
{
register int __res;
unsigned long __dummy;
__asm__ __volatile__(
"1:\n\t"
"l8ui %3, %1, 0\n\t"
"addi %1, %1, 1\n\t"
"l8ui %2, %0, 0\n\t"
"addi %0, %0, 1\n\t"
"beqz %2, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
"sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct));
return __res;
}
#define __HAVE_ARCH_STRNCMP
static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
{
register int __res;
unsigned long __dummy;
__asm__ __volatile__(
"mov %2, %3\n"
"1:\n\t"
"beq %0, %6, 2f\n\t"
"l8ui %3, %1, 0\n\t"
"addi %1, %1, 1\n\t"
"l8ui %2, %0, 0\n\t"
"addi %0, %0, 1\n\t"
"beqz %2, 2f\n\t"
"beqz %3, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
"sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct), "r" (__cs+__n));
return __res;
}
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
#endif /* _XTENSA_STRING_H */

View file

@ -0,0 +1,22 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SWITCH_TO_H
#define _XTENSA_SWITCH_TO_H
/* * switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
*/
extern void *_switch_to(void *last, void *next);
#define switch_to(prev,next,last) \
do { \
(last) = _switch_to(prev, next); \
} while(0)
#endif /* _XTENSA_SWITCH_TO_H */

View file

@ -0,0 +1,27 @@
/*
* include/asm-xtensa/syscall.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
struct pt_regs;
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
/* Should probably move to linux/syscalls.h */
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp,
void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp,
const sigset_t __user *sigmask,
size_t sigsetsize);

View file

@ -0,0 +1,38 @@
/*
* sysmem-related prototypes.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_SYSMEM_H
#define _XTENSA_SYSMEM_H
#define SYSMEM_BANKS_MAX 31
struct meminfo {
unsigned long start;
unsigned long end;
};
/*
* Bank array is sorted by .start.
* Banks don't overlap and there's at least one page gap
* between adjacent bank entries.
*/
struct sysmem_info {
int nr_banks;
struct meminfo bank[SYSMEM_BANKS_MAX];
};
extern struct sysmem_info sysmem;
int add_sysmem_bank(unsigned long start, unsigned long end);
int mem_reserve(unsigned long, unsigned long, int);
void bootmem_init(void);
void zones_init(void);
#endif /* _XTENSA_SYSMEM_H */

View file

@ -0,0 +1,154 @@
/*
* include/asm-xtensa/thread_info.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
# include <asm/processor.h>
#endif
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants
* must also be changed
*/
#ifndef __ASSEMBLY__
#if XTENSA_HAVE_COPROCESSORS
typedef struct xtregs_coprocessor {
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
} xtregs_coprocessor_t;
#endif
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
mm_segment_t addr_limit; /* thread address space */
struct restart_block restart_block;
unsigned long cpenable;
/* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp;
#endif
xtregs_user_t xtregs_user;
};
#else /* !__ASSEMBLY__ */
/* offsets into the thread_info struct for assembly code access */
#define TI_TASK 0x00000000
#define TI_EXEC_DOMAIN 0x00000004
#define TI_FLAGS 0x00000008
#define TI_STATUS 0x0000000C
#define TI_CPU 0x00000010
#define TI_PRE_COUNT 0x00000014
#define TI_ADDR_LIMIT 0x00000018
#define TI_RESTART_BLOCK 0x000001C
#endif
/*
* macros/functions for gaining access to the thread information structure
*/
#ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("extui %0,a1,0,13\n\t"
"xor %0, a1, %0" : "=&r" (ti) : );
return ti;
}
#else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \
extui reg, sp, 0, 13; \
xor reg, sp, reg
#endif
/*
* thread information flags
* - these are process state flags that various assembly files may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
/*
* Thread-synchronous status.
*
* This is different from the flags in that nobody else
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
#define THREAD_SIZE_ORDER 1
#endif /* __KERNEL__ */
#endif /* _XTENSA_THREAD_INFO */

View file

@ -0,0 +1,75 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_TIMEX_H
#define _XTENSA_TIMEX_H
#include <asm/processor.h>
#include <linux/stringify.h>
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
#if XCHAL_NUM_TIMERS > 0 && \
INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif XCHAL_NUM_TIMERS > 1 && \
INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif XCHAL_NUM_TIMERS > 2 && \
INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
# error "Bad timer number for Linux configurations!"
#endif
extern unsigned long ccount_freq;
typedef unsigned long long cycles_t;
#define get_cycles() (0)
void local_timer_setup(unsigned cpu);
/*
* Register access.
*/
#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
unsigned long ccount;
RSR_CCOUNT(ccount);
return ccount;
}
static inline void set_ccount (unsigned long ccount)
{
WSR_CCOUNT(ccount);
}
static inline unsigned long get_linux_timer (void)
{
unsigned ccompare;
RSR_CCOMPARE(LINUX_TIMER, ccompare);
return ccompare;
}
static inline void set_linux_timer (unsigned long ccompare)
{
WSR_CCOMPARE(LINUX_TIMER, ccompare);
}
#endif /* _XTENSA_TIMEX_H */

View file

@ -0,0 +1,47 @@
/*
* include/asm-xtensa/tlb.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TLB_H
#define _XTENSA_TLB_H
#include <asm/cache.h>
#include <asm/page.h>
#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
# define tlb_start_vma(tlb,vma) do { } while (0)
# define tlb_end_vma(tlb,vma) do { } while (0)
#else
# define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
# define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _XTENSA_TLB_H */

View file

@ -0,0 +1,208 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_TLBFLUSH_H
#define _XTENSA_TLBFLUSH_H
#include <linux/stringify.h>
#include <asm/processor.h>
#define DTLB_WAY_PGD 7
#define ITLB_ARF_WAYS 4
#define DTLB_ARF_WAYS 4
#define ITLB_HIT_BIT 3
#define DTLB_HIT_BIT 4
#ifndef __ASSEMBLY__
/* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
* - flush_tlb_page(mm, vmaddr) flushes a single page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*/
void local_flush_tlb_all(void);
void local_flush_tlb_mm(struct mm_struct *mm);
void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_SMP
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *);
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long);
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#else /* !CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
end)
#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
end)
#endif /* CONFIG_SMP */
/* TLB operations. */
static inline unsigned long itlb_probe(unsigned long addr)
{
unsigned long tmp;
__asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp;
}
static inline unsigned long dtlb_probe(unsigned long addr)
{
unsigned long tmp;
__asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp;
}
static inline void invalidate_itlb_entry (unsigned long probe)
{
__asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
}
static inline void invalidate_dtlb_entry (unsigned long probe)
{
__asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
}
/* Use the .._no_isync functions with caution. Generally, these are
* handy for bulk invalidates followed by a single 'isync'. The
* caller must follow up with an 'isync', which can be relatively
* expensive on some Xtensa implementations.
*/
static inline void invalidate_itlb_entry_no_isync (unsigned entry)
{
/* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
}
static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
{
/* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
}
static inline void set_itlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
: : "a" (val));
}
static inline void set_dtlbcfg_register (unsigned long val)
{
__asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
: : "a" (val));
}
static inline void set_ptevaddr_register (unsigned long val)
{
__asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
: : "a" (val));
}
static inline unsigned long read_ptevaddr_register (void)
{
unsigned long tmp;
__asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
return tmp;
}
static inline void write_dtlb_entry (pte_t entry, int way)
{
__asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
: : "r" (way), "r" (entry) );
}
static inline void write_itlb_entry (pte_t entry, int way)
{
__asm__ __volatile__("witlb %1, %0; isync\n\t"
: : "r" (way), "r" (entry) );
}
static inline void invalidate_page_directory (void)
{
invalidate_dtlb_entry (DTLB_WAY_PGD);
invalidate_dtlb_entry (DTLB_WAY_PGD+1);
invalidate_dtlb_entry (DTLB_WAY_PGD+2);
}
static inline void invalidate_itlb_mapping (unsigned address)
{
unsigned long tlb_entry;
if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
invalidate_itlb_entry(tlb_entry);
}
static inline void invalidate_dtlb_mapping (unsigned address)
{
unsigned long tlb_entry;
if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
invalidate_dtlb_entry(tlb_entry);
}
#define check_pgt_cache() do { } while (0)
/*
* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
* ISA and exist only for test purposes..
* You may find it helpful for MMU debugging, however.
*
* 'at' is the unmodified input register
* 'as' is the output register, as follows (specific to the Linux config):
*
* as[31..12] contain the virtual address
* as[11..08] are meaningless
* as[07..00] contain the asid
*/
static inline unsigned long read_dtlb_virtual (int way)
{
unsigned long tmp;
__asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
static inline unsigned long read_dtlb_translation (int way)
{
unsigned long tmp;
__asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
static inline unsigned long read_itlb_virtual (int way)
{
unsigned long tmp;
__asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
static inline unsigned long read_itlb_translation (int way)
{
unsigned long tmp;
__asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp;
}
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_TLBFLUSH_H */

View file

@ -0,0 +1,59 @@
/*
* arch/xtensa/include/asm/traps.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Tensilica Inc.
*/
#ifndef _XTENSA_TRAPS_H
#define _XTENSA_TRAPS_H
#include <asm/ptrace.h>
/*
* handler must be either of the following:
* void (*)(struct pt_regs *regs);
* void (*)(struct pt_regs *regs, unsigned long exccause);
*/
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
void secondary_trap_init(void);
static inline void spill_registers(void)
{
#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
" call12 1f\n"
" _j 2f\n"
" retw\n"
" .align 4\n"
"1:\n"
" _entry a1, 48\n"
" addi a12, a0, 3\n"
#if XCHAL_NUM_AREGS > 32
" .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
" _entry a1, 48\n"
" mov a12, a0\n"
" .endr\n"
#endif
" _entry a1, 48\n"
#if XCHAL_NUM_AREGS % 12 == 0
" mov a8, a8\n"
#elif XCHAL_NUM_AREGS % 12 == 4
" mov a12, a12\n"
#elif XCHAL_NUM_AREGS % 12 == 8
" mov a4, a4\n"
#endif
" retw\n"
"2:\n"
: : : "a12", "a13", "memory");
#else
__asm__ __volatile__ (
" mov a12, a12\n"
: : : "memory");
#endif
}
#endif /* _XTENSA_TRAPS_H */

View file

@ -0,0 +1,23 @@
/*
* include/asm-xtensa/types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H
#include <uapi/asm/types.h>
#ifndef __ASSEMBLY__
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#define BITS_PER_LONG 32
#endif
#endif /* _XTENSA_TYPES_H */

View file

@ -0,0 +1,510 @@
/*
* include/asm-xtensa/uaccess.h
*
* User space memory access functions
*
* These routines provide basic accessing functions to the user memory
* space for the kernel. This header file provides functions such as:
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_UACCESS_H
#define _XTENSA_UACCESS_H
#include <linux/errno.h>
#ifndef __ASSEMBLY__
#include <linux/prefetch.h>
#endif
#include <asm/types.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#ifdef __ASSEMBLY__
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
/*
* These assembly macros mirror the C macros that follow below. They
* should always have identical functionality. See
* arch/xtensa/kernel/sys.S for usage.
*/
#define KERNEL_DS 0
#define USER_DS 1
#define get_ds (KERNEL_DS)
/*
* get_fs reads current->thread.current_ds into a register.
* On Entry:
* <ad> anything
* <sp> stack
* On Exit:
* <ad> contains current->thread.current_ds
*/
.macro get_fs ad, sp
GET_CURRENT(\ad,\sp)
#if THREAD_CURRENT_DS > 1020
addi \ad, \ad, TASK_THREAD
l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
#else
l32i \ad, \ad, THREAD_CURRENT_DS
#endif
.endm
/*
* set_fs sets current->thread.current_ds to some value.
* On Entry:
* <at> anything (temp register)
* <av> value to write
* <sp> stack
* On Exit:
* <at> destroyed (actually, current)
* <av> preserved, value to write
*/
.macro set_fs at, av, sp
GET_CURRENT(\at,\sp)
s32i \av, \at, THREAD_CURRENT_DS
.endm
/*
* kernel_ok determines whether we should bypass addr/size checking.
* See the equivalent C-macro version below for clarity.
* On success, kernel_ok branches to a label indicated by parameter
* <success>. This implies that the macro falls through to the next
* insruction on an error.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on error).
*
* On Entry:
* <at> anything (temp register)
* <success> label to branch to on success; implies
* fall-through macro on error
* <sp> stack pointer
* On Exit:
* <at> destroyed (actually, current->thread.current_ds)
*/
#if ((KERNEL_DS != 0) || (USER_DS == 0))
# error Assembly macro kernel_ok fails
#endif
.macro kernel_ok at, sp, success
get_fs \at, \sp
beqz \at, \success
.endm
/*
* user_ok determines whether the access to user-space memory is allowed.
* See the equivalent C-macro version below for clarity.
*
* On error, user_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that while this macro can be used independently, we designed
* in for optimal use in the access_ok macro below (i.e., we fall
* through on success).
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed (actually, (TASK_SIZE + 1 - size))
*/
.macro user_ok aa, as, at, error
movi \at, __XTENSA_UL_CONST(TASK_SIZE)
bgeu \as, \at, \error
sub \at, \at, \as
bgeu \aa, \at, \error
.endm
/*
* access_ok determines whether a memory access is allowed. See the
* equivalent C-macro version below for clarity.
*
* On error, access_ok branches to a label indicated by parameter
* <error>. This implies that the macro falls through to the next
* instruction on success.
*
* Note that we assume success is the common case, and we optimize the
* branch fall-through case on success.
*
* On Entry:
* <aa> register containing memory address
* <as> register containing memory size
* <at> temp register
* <sp>
* <error> label to branch to on error; implies fall-through
* macro on success
* On Exit:
* <aa> preserved
* <as> preserved
* <at> destroyed
*/
.macro access_ok aa, as, at, sp, error
kernel_ok \at, \sp, .Laccess_ok_\@
user_ok \aa, \as, \at, \error
.Laccess_ok_\@:
.endm
#else /* __ASSEMBLY__ not defined */
#include <linux/sched.h>
/*
* The fs value determines whether argument validity checking should
* be performed or not. If get_fs() == USER_DS, checking is
* performed, with get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons (Data Segment Register?), these macros are
* grossly misnamed.
*/
#define KERNEL_DS ((mm_segment_t) { 0 })
#define USER_DS ((mm_segment_t) { 1 })
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.current_ds)
#define set_fs(val) (current->thread.current_ds = (val))
#define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __user_ok(addr,size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
* type.
*
* This gets kind of ugly. We want to return _two_ values in
* "get_user()" and yet we don't want to do any pointers, because that
* is too much of a performance impact. Thus we have a few rather ugly
* macros here, and hide all the uglyness from the user.
*
* Careful to not
* (a) re-use the arguments for side effects (sizeof is ok)
* (b) require any knowledge of processes at this stage
*/
#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
/*
* The "__xxx" versions of the user access functions are versions that
* do not verify the address space, that must have been done previously
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*/
#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
extern long __put_user_bad(void);
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
})
#define __put_user_size(x,ptr,size,retval) \
do { \
int __cb; \
retval = 0; \
switch (size) { \
case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \
break; \
} \
default: __put_user_bad(); \
} \
} while (0)
/*
* Consider a case of a user single load/store would cause both an
* unaligned exception and an MMU-related exception (unaligned
* exceptions happen first):
*
* User code passes a bad variable ptr to a system call.
* Kernel tries to access the variable.
* Unaligned exception occurs.
* Unaligned exception handler tries to make aligned accesses.
* Double exception occurs for MMU-related cause (e.g., page not mapped).
* do_page_fault() thinks the fault address belongs to the kernel, not the
* user, and panics.
*
* The kernel currently prohibits user unaligned accesses. We use the
* __check_align_* macros to check for unaligned addresses before
* accessing user space so we don't crash the kernel. Both
* __put_user_asm and __get_user_asm use these alignment macros, so
* macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
* sync.
*/
#define __check_align_1 ""
#define __check_align_2 \
" _bbci.l %3, 0, 1f \n" \
" movi %0, %4 \n" \
" _j 2f \n"
#define __check_align_4 \
" _bbsi.l %3, 0, 0f \n" \
" _bbci.l %3, 1, 1f \n" \
"0: movi %0, %4 \n" \
" _j 2f \n"
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
#define __put_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
"4: \n" \
" .long 2b \n" \
"5: \n" \
" l32r %1, 4b \n" \
" movi %0, %4 \n" \
" jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:"=r" (err), "=r" (cb) \
:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
#define __get_user_nocheck(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
extern long __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval) \
do { \
int __cb; \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
case 8: retval = __copy_from_user(&x,ptr,8); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
/*
* WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work.
*/
#define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
"4: \n" \
" .long 2b \n" \
"5: \n" \
" l32r %1, 4b \n" \
" movi %2, 0 \n" \
" movi %0, %4 \n" \
" jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
:"=r" (err), "=r" (cb), "=r" (x) \
:"r" (addr), "i" (-EFAULT), "0" (err))
/*
* Copy to/from user space
*/
/*
* We use a generic, arbitrary-sized copy subroutine. The Xtensa
* architecture would cause heavy code bloat if we tried to inline
* these functions and provide __constant_copy_* equivalents like the
* i386 versions. __xtensa_copy_user is quite efficient. See the
* .fixup section of __xtensa_copy_user for a discussion on the
* X_zeroing equivalents for Xtensa.
*/
extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
return __copy_user(to,from,n);
}
static inline unsigned long
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
return __copy_user(to,from,n);
}
static inline unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
return __copy_user(to,from,n);
return n;
}
static inline unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
return __copy_user(to,from,n);
else
memset(to, 0, n);
return n;
}
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
#define __copy_to_user(to,from,n) \
__generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) \
__generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/*
* We need to return the number of bytes not cleared. Our memset()
* returns zero if a problem occurs while accessing user-space memory.
* In that event, return no memory cleared. Otherwise, zero for
* success.
*/
static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size)
{
if ( ! memset(addr, 0, size) )
return size;
return 0;
}
static inline unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
return __xtensa_clear_user(addr, size);
return size ? -EFAULT : 0;
}
#define __clear_user __xtensa_clear_user
extern long __strncpy_user(char *, const char *, long);
#define __strncpy_from_user __strncpy_user
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, count);
return -EFAULT;
}
#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
/*
* Return the size of a string (including the ending 0!)
*/
extern long __strnlen_user(const char *, long);
static inline long strnlen_user(const char *str, long len)
{
unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
if ((unsigned long)str > top)
return 0;
return __strnlen_user(str, len);
}
struct exception_table_entry
{
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup.unit otherwise. */
extern unsigned long search_exception_table(unsigned long addr);
extern void sort_exception_table(void);
/* Returns the new pc */
#define fixup_exception(map_reg, fixup_unit, pc) \
({ \
fixup_unit; \
})
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_UACCESS_H */

View file

@ -0,0 +1,22 @@
/*
* include/asm-xtensa/ucontext.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_UCONTEXT_H
#define _XTENSA_UCONTEXT_H
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct sigcontext uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* _XTENSA_UCONTEXT_H */

View file

@ -0,0 +1,29 @@
/*
* Xtensa doesn't handle unaligned accesses efficiently.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _ASM_XTENSA_UNALIGNED_H
#define _ASM_XTENSA_UNALIGNED_H
#include <asm/byteorder.h>
#ifdef __LITTLE_ENDIAN
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#else
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#endif
#endif /* _ASM_XTENSA_UNALIGNED_H */

View file

@ -0,0 +1,24 @@
#ifndef _XTENSA_UNISTD_H
#define _XTENSA_UNISTD_H
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_GETPGRP
/*
* Ignore legacy system calls in the checksyscalls.sh script
*/
#define __IGNORE_fork /* use clone */
#define __IGNORE_time
#define __IGNORE_alarm /* use setitimer */
#define __IGNORE_pause
#define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
#endif /* _XTENSA_UNISTD_H */

View file

@ -0,0 +1,20 @@
/*
* include/asm-xtensa/user.h
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_USER_H
#define _XTENSA_USER_H
/* This file usually defines a 'struct user' structure. However, it it only
* used for a.out file, which are not supported on Xtensa.
*/
#endif /* _XTENSA_USER_H */

View file

@ -0,0 +1,130 @@
/*
* arch/xtensa/include/asm/xchal_vaddr_remap.h
*
* Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
* Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
* mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 - 2012 Tensilica Inc.
*
* Pete Delaney <piet@tensilica.com>
* Marc Gauthier <marc@tensilica.com
*/
#ifndef _XTENSA_VECTORS_H
#define _XTENSA_VECTORS_H
#include <variant/core.h>
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x10000000
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
#if defined(CONFIG_MMU)
/* Will Become VECBASE */
#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
/* Image Virtual Start Address */
#define KERNELOFFSET 0xD0003000
#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
#define LOAD_MEMORY_ADDRESS 0x00003000
#else
/* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
#define LOAD_MEMORY_ADDRESS 0xD0003000
#endif
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
/* VECBASE */
#define VIRTUAL_MEMORY_ADDRESS 0x00002000
/* Location of the start of the kernel text, _start */
#define KERNELOFFSET 0x00003000
/* Loaded just above possibly live vectors */
#define LOAD_MEMORY_ADDRESS 0x00003000
#endif /* CONFIG_MMU */
#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
/* Used to set VECBASE register */
#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
VECBASE_RESET_VADDR)
#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
VECBASE_RESET_VADDR)
#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
/*
* These XCHAL_* #defines from varian/core.h
* are not valid to use with V3 MMU. Non-XCHAL
* constants are defined above and should be used.
*/
#undef XCHAL_VECBASE_RESET_VADDR
#undef XCHAL_RESET_VECTOR0_VADDR
#undef XCHAL_USER_VECTOR_VADDR
#undef XCHAL_KERNEL_VECTOR_VADDR
#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
#undef XCHAL_WINDOW_VECTORS_VADDR
#undef XCHAL_INTLEVEL2_VECTOR_VADDR
#undef XCHAL_INTLEVEL3_VECTOR_VADDR
#undef XCHAL_INTLEVEL4_VECTOR_VADDR
#undef XCHAL_INTLEVEL5_VECTOR_VADDR
#undef XCHAL_INTLEVEL6_VECTOR_VADDR
#undef XCHAL_DEBUG_VECTOR_VADDR
#undef XCHAL_NMI_VECTOR_VADDR
#undef XCHAL_INTLEVEL7_VECTOR_VADDR
#else
#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
#endif
#endif /* _XTENSA_VECTORS_H */

View file

@ -0,0 +1,19 @@
/*
* include/asm-xtensa/vga.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_VGA_H
#define _XTENSA_VGA_H
#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
#define vga_readb(x) (*(x))
#define vga_writeb(x,y) (*(y) = (x))
#endif

View file

@ -0,0 +1,25 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
header-y += auxvec.h
header-y += byteorder.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += mman.h
header-y += msgbuf.h
header-y += param.h
header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += sigcontext.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += stat.h
header-y += swab.h
header-y += termbits.h
header-y += types.h
header-y += unistd.h

View file

@ -0,0 +1,4 @@
#ifndef __XTENSA_AUXVEC_H
#define __XTENSA_AUXVEC_H
#endif

View file

@ -0,0 +1,12 @@
#ifndef _XTENSA_BYTEORDER_H
#define _XTENSA_BYTEORDER_H
#ifdef __XTENSA_EL__
#include <linux/byteorder/little_endian.h>
#elif defined(__XTENSA_EB__)
#include <linux/byteorder/big_endian.h>
#else
# error processor byte order undefined!
#endif
#endif /* _XTENSA_BYTEORDER_H */

View file

@ -0,0 +1,126 @@
/*
* include/asm-xtensa/ioctls.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*
* Derived from "include/asm-i386/ioctls.h"
*/
#ifndef _XTENSA_IOCTLS_H
#define _XTENSA_IOCTLS_H
#include <asm/ioctl.h>
#define FIOCLEX _IO('f', 1)
#define FIONCLEX _IO('f', 2)
#define FIOASYNC _IOW('f', 125, int)
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
#define FIOQSIZE _IOR('f', 128, loff_t)
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
#define TIOCSPGRP _IOW('t', 118, int)
#define TIOCGPGRP _IOR('t', 119, int)
#define TIOCEXCL _IO('T', 12)
#define TIOCNXCL _IO('T', 13)
#define TIOCSCTTY _IO('T', 14)
#define TIOCSTI _IOW('T', 18, char)
#define TIOCMGET _IOR('T', 21, unsigned int)
#define TIOCMBIS _IOW('T', 22, unsigned int)
#define TIOCMBIC _IOW('T', 23, unsigned int)
#define TIOCMSET _IOW('T', 24, unsigned int)
# define TIOCM_LE 0x001
# define TIOCM_DTR 0x002
# define TIOCM_RTS 0x004
# define TIOCM_ST 0x008
# define TIOCM_SR 0x010
# define TIOCM_CTS 0x020
# define TIOCM_CAR 0x040
# define TIOCM_RNG 0x080
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
#define TIOCGSOFTCAR _IOR('T', 25, unsigned int)
#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
#define TIOCLINUX _IOW('T', 28, char)
#define TIOCCONS _IO('T', 29)
#define TIOCGSERIAL 0x803C541E /*_IOR('T', 30, struct serial_struct)*/
#define TIOCSSERIAL 0x403C541F /*_IOW('T', 31, struct serial_struct)*/
#define TIOCPKT _IOW('T', 32, int)
# define TIOCPKT_DATA 0
# define TIOCPKT_FLUSHREAD 1
# define TIOCPKT_FLUSHWRITE 2
# define TIOCPKT_STOP 4
# define TIOCPKT_START 8
# define TIOCPKT_NOSTOP 16
# define TIOCPKT_DOSTOP 32
# define TIOCPKT_IOCTL 64
#define TIOCNOTTY _IO('T', 34)
#define TIOCSETD _IOW('T', 35, int)
#define TIOCGETD _IOR('T', 36, int)
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
#define TCGETS2 _IOR('T', 42, struct termios2)
#define TCSETS2 _IOW('T', 43, struct termios2)
#define TCSETSW2 _IOW('T', 44, struct termios2)
#define TCSETSF2 _IOW('T', 45, struct termios2)
#define TIOCGRS485 _IOR('T', 46, struct serial_rs485)
#define TIOCSRS485 _IOWR('T', 47, struct serial_rs485)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
#define TIOCVHANGUP _IO('T', 0x37)
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCSERCONFIG _IO('T', 83)
#define TIOCSERGWILD _IOR('T', 84, int)
#define TIOCSERSWILD _IOW('T', 85, int)
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
/* _IOR('T', 90, struct serial_multiport_struct) */
#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
/* _IOW('T', 91, struct serial_multiport_struct) */
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#endif /* _XTENSA_IOCTLS_H */

View file

@ -0,0 +1,37 @@
/*
* include/asm-xtensa/ipcbuf.h
*
* The ipc64_perm structure for the Xtensa architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IPCBUF_H
#define _XTENSA_IPCBUF_H
/*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned long seq;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _XTENSA_IPCBUF_H */

View file

@ -0,0 +1,107 @@
/*
* include/asm-xtensa/mman.h
*
* Xtensa Processor memory-manager definitions
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 by Ralf Baechle
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MMAN_H
#define _XTENSA_MMAN_H
/*
* Protections are chosen from these bits, OR'd together. The
* implementation does not necessarily support PROT_EXEC or PROT_WRITE
* without PROT_READ. The only guarantees are that no writing will be
* allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
*/
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x10 /* page may be used for atomic ops */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */
/*
* Flags for mmap
*/
#define MAP_SHARED 0x001 /* Share changes */
#define MAP_PRIVATE 0x002 /* Changes are private */
#define MAP_TYPE 0x00f /* Mask for type of mapping */
#define MAP_FIXED 0x010 /* Interpret addr exactly */
/* not used by linux, but here to make sure we don't clash with ABI defines */
#define MAP_RENAME 0x020 /* Assign page to file */
#define MAP_AUTOGROW 0x040 /* File may grow by writing */
#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
/* These are linux-specific */
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
#define MAP_ANONYMOUS 0x0800 /* don't use a file */
#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
#define MAP_LOCKED 0x8000 /* pages are locked */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
/*
* Flags for msync
*/
#define MS_ASYNC 0x0001 /* sync memory asynchronously */
#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
#define MS_SYNC 0x0004 /* synchronous memory sync */
/*
* Flags for mlockall
*/
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
/* common parameters: try to keep these consistent across architectures */
#define MADV_REMOVE 9 /* remove these pages & resources */
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
/* compatibility flags */
#define MAP_FILE 0
/*
* When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
* This gives us 6 bits, which is enough until someone invents 128 bit address
* spaces.
*
* Assume these are all power of twos.
* When 0 use the default page size.
*/
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
#endif /* _XTENSA_MMAN_H */

View file

@ -0,0 +1,48 @@
/*
* include/asm-xtensa/msgbuf.h
*
* The msqid64_ds structure for the Xtensa architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
#ifndef _XTENSA_MSGBUF_H
#define _XTENSA_MSGBUF_H
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __XTENSA_EB__
unsigned int __unused1;
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned int __unused2;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned int __unused3;
__kernel_time_t msg_ctime; /* last change time */
#elif defined(__XTENSA_EL__)
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned int __unused1;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned int __unused2;
__kernel_time_t msg_ctime; /* last change time */
unsigned int __unused3;
#else
# error processor byte order undefined!
#endif
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* _XTENSA_MSGBUF_H */

View file

@ -0,0 +1,30 @@
/*
* include/asm-xtensa/param.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_PARAM_H
#define _UAPI_XTENSA_PARAM_H
#ifndef __KERNEL__
# define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* _UAPI_XTENSA_PARAM_H */

View file

@ -0,0 +1,20 @@
/*
* include/asm-xtensa/poll.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_POLL_H
#define _XTENSA_POLL_H
#define POLLWRNORM POLLOUT
#define POLLWRBAND 0x0100
#define POLLREMOVE 0x0800
#include <asm-generic/poll.h>
#endif /* _XTENSA_POLL_H */

View file

@ -0,0 +1,39 @@
/*
* include/asm-xtensa/posix_types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Largely copied from include/asm-ppc/posix_types.h
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_POSIX_TYPES_H
#define _XTENSA_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
#include <asm-generic/posix_types.h>
#endif /* _XTENSA_POSIX_TYPES_H */

View file

@ -0,0 +1,77 @@
/*
* include/asm-xtensa/ptrace.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_PTRACE_H
#define _UAPI_XTENSA_PTRACE_H
/*
* Kernel stack
*
* +-----------------------+ -------- STACK_SIZE
* | register file | |
* +-----------------------+ |
* | struct pt_regs | |
* +-----------------------+ | ------ PT_REGS_OFFSET
* double : 16 bytes spill area : | ^
* excetion :- - - - - - - - - - - -: | |
* frame : struct pt_regs : | |
* :- - - - - - - - - - - -: | |
* | | | |
* | memory stack | | |
* | | | |
* ~ ~ ~ ~
* ~ ~ ~ ~
* | | | |
* | | | |
* +-----------------------+ | | --- STACK_BIAS
* | struct task_struct | | | ^
* current --> +-----------------------+ | | |
* | struct thread_info | | | |
* +-----------------------+ --------
*/
#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
#define EXC_TABLE_SIZE 0x400
/* Registers used by strace */
#define REG_A_BASE 0x0000
#define REG_AR_BASE 0x0100
#define REG_PC 0x0020
#define REG_PS 0x02e6
#define REG_WB 0x0248
#define REG_WS 0x0249
#define REG_LBEG 0x0200
#define REG_LEND 0x0201
#define REG_LCOUNT 0x0202
#define REG_SAR 0x0203
#define SYSCALL_NR 0x00ff
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETXTREGS 18
#define PTRACE_SETXTREGS 19
#endif /* _UAPI_XTENSA_PTRACE_H */

View file

@ -0,0 +1,44 @@
/*
* include/asm-xtensa/sembuf.h
*
* The semid64_ds structure for Xtensa architecture.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*
*/
#ifndef _XTENSA_SEMBUF_H
#define _XTENSA_SEMBUF_H
#include <asm/byteorder.h>
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#ifdef __XTENSA_EL__
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused1;
__kernel_time_t sem_ctime; /* last change time */
unsigned long __unused2;
#else
unsigned long __unused1;
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused2;
__kernel_time_t sem_ctime; /* last change time */
#endif
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* __ASM_XTENSA_SEMBUF_H */

View file

@ -0,0 +1,18 @@
/*
* include/asm-xtensa/setup.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SETUP_H
#define _XTENSA_SETUP_H
#define COMMAND_LINE_SIZE 256
extern void set_except_vector(int n, void *addr);
#endif

View file

@ -0,0 +1,71 @@
/*
* include/asm-xtensa/shmbuf.h
*
* The shmid64_ds structure for Xtensa architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SHMBUF_H
#define _XTENSA_SHMBUF_H
#if defined (__XTENSA_EL__)
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
unsigned long __unused1;
__kernel_time_t shm_dtime; /* last detach time */
unsigned long __unused2;
__kernel_time_t shm_ctime; /* last change time */
unsigned long __unused3;
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
#elif defined (__XTENSA_EB__)
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
unsigned long __unused1;
__kernel_time_t shm_dtime; /* last detach time */
unsigned long __unused2;
__kernel_time_t shm_ctime; /* last change time */
unsigned long __unused3;
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
#else
# error endian order not defined
#endif
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* _XTENSA_SHMBUF_H */

View file

@ -0,0 +1,28 @@
/*
* include/asm-xtensa/sigcontext.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_SIGCONTEXT_H
#define _XTENSA_SIGCONTEXT_H
struct sigcontext {
unsigned long sc_pc;
unsigned long sc_ps;
unsigned long sc_lbeg;
unsigned long sc_lend;
unsigned long sc_lcount;
unsigned long sc_sar;
unsigned long sc_acclo;
unsigned long sc_acchi;
unsigned long sc_a[16];
void *sc_xtregs;
};
#endif /* _XTENSA_SIGCONTEXT_H */

View file

@ -0,0 +1,133 @@
/*
* include/asm-xtensa/signal.h
*
* Swiped from SH.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_SIGNAL_H
#define _UAPI_XTENSA_SIGNAL_H
#define _NSIG 64
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
#ifndef __ASSEMBLY__
#include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#endif
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/* #define SIGLOST 29 */
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#ifndef __ASSEMBLY__
#include <asm-generic/signal-defs.h>
#ifndef __KERNEL__
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_XTENSA_SIGNAL_H */

View file

@ -0,0 +1,94 @@
/*
* include/asm-xtensa/socket.h
*
* Copied from i386.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _XTENSA_SOCKET_H
#define _XTENSA_SOCKET_H
#include <asm/sockios.h>
/* For setsockoptions(2) */
#define SOL_SOCKET 1
#define SO_DEBUG 1
#define SO_REUSEADDR 2
#define SO_TYPE 3
#define SO_ERROR 4
#define SO_DONTROUTE 5
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
#define SO_SNDBUFFORCE 32
#define SO_RCVBUFFORCE 33
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
#define SO_RCVTIMEO 20
#define SO_SNDTIMEO 21
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 22
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
#define SO_SECURITY_ENCRYPTION_NETWORK 24
#define SO_BINDTODEVICE 25
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#define SO_PASSSEC 34
#define SO_TIMESTAMPNS 35
#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SO_MARK 36
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
#define SO_PEEK_OFF 42
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
#define SO_LOCK_FILTER 44
#define SO_SELECT_ERR_QUEUE 45
#define SO_BUSY_POLL 46
#define SO_MAX_PACING_RATE 47
#define SO_BPF_EXTENSIONS 48
#endif /* _XTENSA_SOCKET_H */

View file

@ -0,0 +1,31 @@
/*
* include/asm-xtensa/sockios.h
*
* Socket-level I/O control calls. Copied from MIPS.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 by Ralf Baechle
* Copyright (C) 2001 Tensilica Inc.
*/
#ifndef _XTENSA_SOCKIOS_H
#define _XTENSA_SOCKIOS_H
#include <asm/ioctl.h>
/* Socket-level I/O control calls. */
#define FIOGETOWN _IOR('f', 123, int)
#define FIOSETOWN _IOW('f', 124, int)
#define SIOCATMARK _IOR('s', 7, int)
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
#endif /* _XTENSA_SOCKIOS_H */

View file

@ -0,0 +1,59 @@
/*
* include/asm-xtensa/stat.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_STAT_H
#define _XTENSA_STAT_H
#define STAT_HAVE_NSEC 1
struct stat {
unsigned long st_dev;
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long st_rdev;
long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
struct stat64 {
unsigned long long st_dev; /* Device */
unsigned long long st_ino; /* File serial number */
unsigned int st_mode; /* File mode. */
unsigned int st_nlink; /* Link count. */
unsigned int st_uid; /* User ID of the file's owner. */
unsigned int st_gid; /* Group ID of the file's group. */
unsigned long long st_rdev; /* Device number, if device. */
long long st_size; /* Size of file, in bytes. */
unsigned long st_blksize; /* Optimal block size for I/O. */
unsigned long __unused2;
unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime; /* Time of last access. */
unsigned long st_atime_nsec;
unsigned long st_mtime; /* Time of last modification. */
unsigned long st_mtime_nsec;
unsigned long st_ctime; /* Time of last status change. */
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* _XTENSA_STAT_H */

View file

@ -0,0 +1,70 @@
/*
* include/asm-xtensa/swab.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_SWAB_H
#define _XTENSA_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
#define __SWAB_64_THRU_32__
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__u32 res;
/* instruction sequence from Xtensa ISA release 2/2000 */
__asm__("ssai 8 \n\t"
"srli %0, %1, 16 \n\t"
"src %0, %0, %1 \n\t"
"src %0, %0, %0 \n\t"
"src %0, %1, %0 \n"
: "=&a" (res)
: "a" (x)
);
return res;
}
#define __arch_swab32 __arch_swab32
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
/* Given that 'short' values are signed (i.e., can be negative),
* we cannot assume that the upper 16-bits of the register are
* zero. We are careful to mask values after shifting.
*/
/* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
* inserts an extui instruction after putting this function inline
* to ensure that it uses only the least-significant 16 bits of
* the result. xt-xcc doesn't use an extui, but assumes the
* __asm__ macro follows convention that the upper 16 bits of an
* 'unsigned short' result are still zero. This macro doesn't
* follow convention; indeed, it leaves garbage in the upport 16
* bits of the register.
* Declaring the temporary variables 'res' and 'tmp' to be 32-bit
* types while the return type of the function is a 16-bit type
* forces both compilers to insert exactly one extui instruction
* (or equivalent) to mask off the upper 16 bits. */
__u32 res;
__u32 tmp;
__asm__("extui %1, %2, 8, 8\n\t"
"slli %0, %2, 8 \n\t"
"or %0, %0, %1 \n"
: "=&a" (res), "=&a" (tmp)
: "a" (x)
);
return res;
}
#define __arch_swab16 __arch_swab16
#endif /* _XTENSA_SWAB_H */

View file

@ -0,0 +1,220 @@
/*
* include/asm-xtensa/termbits.h
*
* Copied from SH.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_TERMBITS_H
#define _XTENSA_TERMBITS_H
#include <linux/posix_types.h>
typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
struct termios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
};
struct termios2 {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
speed_t c_ispeed; /* input speed */
speed_t c_ospeed; /* output speed */
};
struct ktermios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
speed_t c_ispeed; /* input speed */
speed_t c_ospeed; /* output speed */
};
/* c_cc characters */
#define VINTR 0
#define VQUIT 1
#define VERASE 2
#define VKILL 3
#define VEOF 4
#define VTIME 5
#define VMIN 6
#define VSWTC 7
#define VSTART 8
#define VSTOP 9
#define VSUSP 10
#define VEOL 11
#define VREPRINT 12
#define VDISCARD 13
#define VWERASE 14
#define VLNEXT 15
#define VEOL2 16
/* c_iflag bits */
#define IGNBRK 0000001
#define BRKINT 0000002
#define IGNPAR 0000004
#define PARMRK 0000010
#define INPCK 0000020
#define ISTRIP 0000040
#define INLCR 0000100
#define IGNCR 0000200
#define ICRNL 0000400
#define IUCLC 0001000
#define IXON 0002000
#define IXANY 0004000
#define IXOFF 0010000
#define IMAXBEL 0020000
#define IUTF8 0040000
/* c_oflag bits */
#define OPOST 0000001
#define OLCUC 0000002
#define ONLCR 0000004
#define OCRNL 0000010
#define ONOCR 0000020
#define ONLRET 0000040
#define OFILL 0000100
#define OFDEL 0000200
#define NLDLY 0000400
#define NL0 0000000
#define NL1 0000400
#define CRDLY 0003000
#define CR0 0000000
#define CR1 0001000
#define CR2 0002000
#define CR3 0003000
#define TABDLY 0014000
#define TAB0 0000000
#define TAB1 0004000
#define TAB2 0010000
#define TAB3 0014000
#define XTABS 0014000
#define BSDLY 0020000
#define BS0 0000000
#define BS1 0020000
#define VTDLY 0040000
#define VT0 0000000
#define VT1 0040000
#define FFDLY 0100000
#define FF0 0000000
#define FF1 0100000
/* c_cflag bit meaning */
#define CBAUD 0010017
#define B0 0000000 /* hang up */
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CSIZE 0000060
#define CS5 0000000
#define CS6 0000020
#define CS7 0000040
#define CS8 0000060
#define CSTOPB 0000100
#define CREAD 0000200
#define PARENB 0000400
#define PARODD 0001000
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
#define BOTHER 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
#define B460800 0010004
#define B500000 0010005
#define B576000 0010006
#define B921600 0010007
#define B1000000 0010010
#define B1152000 0010011
#define B1500000 0010012
#define B2000000 0010013
#define B2500000 0010014
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
#define XCASE 0000004
#define ECHO 0000010
#define ECHOE 0000020
#define ECHOK 0000040
#define ECHONL 0000100
#define NOFLSH 0000200
#define TOSTOP 0000400
#define ECHOCTL 0001000
#define ECHOPRT 0002000
#define ECHOKE 0004000
#define FLUSHO 0010000
#define PENDIN 0040000
#define IEXTEN 0100000
#define EXTPROC 0200000
/* tcflow() and TCXONC use these */
#define TCOOFF 0
#define TCOON 1
#define TCIOFF 2
#define TCION 3
/* tcflush() and TCFLSH use these */
#define TCIFLUSH 0
#define TCOFLUSH 1
#define TCIOFLUSH 2
/* tcsetattr uses these */
#define TCSANOW 0
#define TCSADRAIN 1
#define TCSAFLUSH 2
#endif /* _XTENSA_TERMBITS_H */

View file

@ -0,0 +1,28 @@
/*
* include/asm-xtensa/types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _UAPI_XTENSA_TYPES_H
#define _UAPI_XTENSA_TYPES_H
#include <asm-generic/int-ll64.h>
#ifdef __ASSEMBLY__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL
#endif
#ifndef __ASSEMBLY__
#endif
#endif /* _UAPI_XTENSA_TYPES_H */

View file

@ -0,0 +1,775 @@
#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL)
#define _UAPI_XTENSA_UNISTD_H
#ifndef __SYSCALL
# define __SYSCALL(nr,func,nargs)
#endif
#define __NR_spill 0
__SYSCALL( 0, sys_ni_syscall, 0)
#define __NR_xtensa 1
__SYSCALL( 1, sys_ni_syscall, 0)
#define __NR_available4 2
__SYSCALL( 2, sys_ni_syscall, 0)
#define __NR_available5 3
__SYSCALL( 3, sys_ni_syscall, 0)
#define __NR_available6 4
__SYSCALL( 4, sys_ni_syscall, 0)
#define __NR_available7 5
__SYSCALL( 5, sys_ni_syscall, 0)
#define __NR_available8 6
__SYSCALL( 6, sys_ni_syscall, 0)
#define __NR_available9 7
__SYSCALL( 7, sys_ni_syscall, 0)
/* File Operations */
#define __NR_open 8
__SYSCALL( 8, sys_open, 3)
#define __NR_close 9
__SYSCALL( 9, sys_close, 1)
#define __NR_dup 10
__SYSCALL( 10, sys_dup, 1)
#define __NR_dup2 11
__SYSCALL( 11, sys_dup2, 2)
#define __NR_read 12
__SYSCALL( 12, sys_read, 3)
#define __NR_write 13
__SYSCALL( 13, sys_write, 3)
#define __NR_select 14
__SYSCALL( 14, sys_select, 5)
#define __NR_lseek 15
__SYSCALL( 15, sys_lseek, 3)
#define __NR_poll 16
__SYSCALL( 16, sys_poll, 3)
#define __NR__llseek 17
__SYSCALL( 17, sys_llseek, 5)
#define __NR_epoll_wait 18
__SYSCALL( 18, sys_epoll_wait, 4)
#define __NR_epoll_ctl 19
__SYSCALL( 19, sys_epoll_ctl, 4)
#define __NR_epoll_create 20
__SYSCALL( 20, sys_epoll_create, 1)
#define __NR_creat 21
__SYSCALL( 21, sys_creat, 2)
#define __NR_truncate 22
__SYSCALL( 22, sys_truncate, 2)
#define __NR_ftruncate 23
__SYSCALL( 23, sys_ftruncate, 2)
#define __NR_readv 24
__SYSCALL( 24, sys_readv, 3)
#define __NR_writev 25
__SYSCALL( 25, sys_writev, 3)
#define __NR_fsync 26
__SYSCALL( 26, sys_fsync, 1)
#define __NR_fdatasync 27
__SYSCALL( 27, sys_fdatasync, 1)
#define __NR_truncate64 28
__SYSCALL( 28, sys_truncate64, 2)
#define __NR_ftruncate64 29
__SYSCALL( 29, sys_ftruncate64, 2)
#define __NR_pread64 30
__SYSCALL( 30, sys_pread64, 6)
#define __NR_pwrite64 31
__SYSCALL( 31, sys_pwrite64, 6)
#define __NR_link 32
__SYSCALL( 32, sys_link, 2)
#define __NR_rename 33
__SYSCALL( 33, sys_rename, 2)
#define __NR_symlink 34
__SYSCALL( 34, sys_symlink, 2)
#define __NR_readlink 35
__SYSCALL( 35, sys_readlink, 3)
#define __NR_mknod 36
__SYSCALL( 36, sys_mknod, 3)
#define __NR_pipe 37
__SYSCALL( 37, sys_pipe, 1)
#define __NR_unlink 38
__SYSCALL( 38, sys_unlink, 1)
#define __NR_rmdir 39
__SYSCALL( 39, sys_rmdir, 1)
#define __NR_mkdir 40
__SYSCALL( 40, sys_mkdir, 2)
#define __NR_chdir 41
__SYSCALL( 41, sys_chdir, 1)
#define __NR_fchdir 42
__SYSCALL( 42, sys_fchdir, 1)
#define __NR_getcwd 43
__SYSCALL( 43, sys_getcwd, 2)
#define __NR_chmod 44
__SYSCALL( 44, sys_chmod, 2)
#define __NR_chown 45
__SYSCALL( 45, sys_chown, 3)
#define __NR_stat 46
__SYSCALL( 46, sys_newstat, 2)
#define __NR_stat64 47
__SYSCALL( 47, sys_stat64, 2)
#define __NR_lchown 48
__SYSCALL( 48, sys_lchown, 3)
#define __NR_lstat 49
__SYSCALL( 49, sys_newlstat, 2)
#define __NR_lstat64 50
__SYSCALL( 50, sys_lstat64, 2)
#define __NR_available51 51
__SYSCALL( 51, sys_ni_syscall, 0)
#define __NR_fchmod 52
__SYSCALL( 52, sys_fchmod, 2)
#define __NR_fchown 53
__SYSCALL( 53, sys_fchown, 3)
#define __NR_fstat 54
__SYSCALL( 54, sys_newfstat, 2)
#define __NR_fstat64 55
__SYSCALL( 55, sys_fstat64, 2)
#define __NR_flock 56
__SYSCALL( 56, sys_flock, 2)
#define __NR_access 57
__SYSCALL( 57, sys_access, 2)
#define __NR_umask 58
__SYSCALL( 58, sys_umask, 1)
#define __NR_getdents 59
__SYSCALL( 59, sys_getdents, 3)
#define __NR_getdents64 60
__SYSCALL( 60, sys_getdents64, 3)
#define __NR_fcntl64 61
__SYSCALL( 61, sys_fcntl64, 3)
#define __NR_fallocate 62
__SYSCALL( 62, sys_fallocate, 6)
#define __NR_fadvise64_64 63
__SYSCALL( 63, xtensa_fadvise64_64, 6)
#define __NR_utime 64 /* glibc 2.3.3 ?? */
__SYSCALL( 64, sys_utime, 2)
#define __NR_utimes 65
__SYSCALL( 65, sys_utimes, 2)
#define __NR_ioctl 66
__SYSCALL( 66, sys_ioctl, 3)
#define __NR_fcntl 67
__SYSCALL( 67, sys_fcntl, 3)
#define __NR_setxattr 68
__SYSCALL( 68, sys_setxattr, 5)
#define __NR_getxattr 69
__SYSCALL( 69, sys_getxattr, 4)
#define __NR_listxattr 70
__SYSCALL( 70, sys_listxattr, 3)
#define __NR_removexattr 71
__SYSCALL( 71, sys_removexattr, 2)
#define __NR_lsetxattr 72
__SYSCALL( 72, sys_lsetxattr, 5)
#define __NR_lgetxattr 73
__SYSCALL( 73, sys_lgetxattr, 4)
#define __NR_llistxattr 74
__SYSCALL( 74, sys_llistxattr, 3)
#define __NR_lremovexattr 75
__SYSCALL( 75, sys_lremovexattr, 2)
#define __NR_fsetxattr 76
__SYSCALL( 76, sys_fsetxattr, 5)
#define __NR_fgetxattr 77
__SYSCALL( 77, sys_fgetxattr, 4)
#define __NR_flistxattr 78
__SYSCALL( 78, sys_flistxattr, 3)
#define __NR_fremovexattr 79
__SYSCALL( 79, sys_fremovexattr, 2)
/* File Map / Shared Memory Operations */
#define __NR_mmap2 80
__SYSCALL( 80, sys_mmap_pgoff, 6)
#define __NR_munmap 81
__SYSCALL( 81, sys_munmap, 2)
#define __NR_mprotect 82
__SYSCALL( 82, sys_mprotect, 3)
#define __NR_brk 83
__SYSCALL( 83, sys_brk, 1)
#define __NR_mlock 84
__SYSCALL( 84, sys_mlock, 2)
#define __NR_munlock 85
__SYSCALL( 85, sys_munlock, 2)
#define __NR_mlockall 86
__SYSCALL( 86, sys_mlockall, 1)
#define __NR_munlockall 87
__SYSCALL( 87, sys_munlockall, 0)
#define __NR_mremap 88
__SYSCALL( 88, sys_mremap, 4)
#define __NR_msync 89
__SYSCALL( 89, sys_msync, 3)
#define __NR_mincore 90
__SYSCALL( 90, sys_mincore, 3)
#define __NR_madvise 91
__SYSCALL( 91, sys_madvise, 3)
#define __NR_shmget 92
__SYSCALL( 92, sys_shmget, 4)
#define __NR_shmat 93
__SYSCALL( 93, xtensa_shmat, 4)
#define __NR_shmctl 94
__SYSCALL( 94, sys_shmctl, 4)
#define __NR_shmdt 95
__SYSCALL( 95, sys_shmdt, 4)
/* Socket Operations */
#define __NR_socket 96
__SYSCALL( 96, sys_socket, 3)
#define __NR_setsockopt 97
__SYSCALL( 97, sys_setsockopt, 5)
#define __NR_getsockopt 98
__SYSCALL( 98, sys_getsockopt, 5)
#define __NR_shutdown 99
__SYSCALL( 99, sys_shutdown, 2)
#define __NR_bind 100
__SYSCALL(100, sys_bind, 3)
#define __NR_connect 101
__SYSCALL(101, sys_connect, 3)
#define __NR_listen 102
__SYSCALL(102, sys_listen, 2)
#define __NR_accept 103
__SYSCALL(103, sys_accept, 3)
#define __NR_getsockname 104
__SYSCALL(104, sys_getsockname, 3)
#define __NR_getpeername 105
__SYSCALL(105, sys_getpeername, 3)
#define __NR_sendmsg 106
__SYSCALL(106, sys_sendmsg, 3)
#define __NR_recvmsg 107
__SYSCALL(107, sys_recvmsg, 3)
#define __NR_send 108
__SYSCALL(108, sys_send, 4)
#define __NR_recv 109
__SYSCALL(109, sys_recv, 4)
#define __NR_sendto 110
__SYSCALL(110, sys_sendto, 6)
#define __NR_recvfrom 111
__SYSCALL(111, sys_recvfrom, 6)
#define __NR_socketpair 112
__SYSCALL(112, sys_socketpair, 4)
#define __NR_sendfile 113
__SYSCALL(113, sys_sendfile, 4)
#define __NR_sendfile64 114
__SYSCALL(114, sys_sendfile64, 4)
#define __NR_sendmmsg 115
__SYSCALL(115, sys_sendmmsg, 4)
/* Process Operations */
#define __NR_clone 116
__SYSCALL(116, sys_clone, 5)
#define __NR_execve 117
__SYSCALL(117, sys_execve, 3)
#define __NR_exit 118
__SYSCALL(118, sys_exit, 1)
#define __NR_exit_group 119
__SYSCALL(119, sys_exit_group, 1)
#define __NR_getpid 120
__SYSCALL(120, sys_getpid, 0)
#define __NR_wait4 121
__SYSCALL(121, sys_wait4, 4)
#define __NR_waitid 122
__SYSCALL(122, sys_waitid, 5)
#define __NR_kill 123
__SYSCALL(123, sys_kill, 2)
#define __NR_tkill 124
__SYSCALL(124, sys_tkill, 2)
#define __NR_tgkill 125
__SYSCALL(125, sys_tgkill, 3)
#define __NR_set_tid_address 126
__SYSCALL(126, sys_set_tid_address, 1)
#define __NR_gettid 127
__SYSCALL(127, sys_gettid, 0)
#define __NR_setsid 128
__SYSCALL(128, sys_setsid, 0)
#define __NR_getsid 129
__SYSCALL(129, sys_getsid, 1)
#define __NR_prctl 130
__SYSCALL(130, sys_prctl, 5)
#define __NR_personality 131
__SYSCALL(131, sys_personality, 1)
#define __NR_getpriority 132
__SYSCALL(132, sys_getpriority, 2)
#define __NR_setpriority 133
__SYSCALL(133, sys_setpriority, 3)
#define __NR_setitimer 134
__SYSCALL(134, sys_setitimer, 3)
#define __NR_getitimer 135
__SYSCALL(135, sys_getitimer, 2)
#define __NR_setuid 136
__SYSCALL(136, sys_setuid, 1)
#define __NR_getuid 137
__SYSCALL(137, sys_getuid, 0)
#define __NR_setgid 138
__SYSCALL(138, sys_setgid, 1)
#define __NR_getgid 139
__SYSCALL(139, sys_getgid, 0)
#define __NR_geteuid 140
__SYSCALL(140, sys_geteuid, 0)
#define __NR_getegid 141
__SYSCALL(141, sys_getegid, 0)
#define __NR_setreuid 142
__SYSCALL(142, sys_setreuid, 2)
#define __NR_setregid 143
__SYSCALL(143, sys_setregid, 2)
#define __NR_setresuid 144
__SYSCALL(144, sys_setresuid, 3)
#define __NR_getresuid 145
__SYSCALL(145, sys_getresuid, 3)
#define __NR_setresgid 146
__SYSCALL(146, sys_setresgid, 3)
#define __NR_getresgid 147
__SYSCALL(147, sys_getresgid, 3)
#define __NR_setpgid 148
__SYSCALL(148, sys_setpgid, 2)
#define __NR_getpgid 149
__SYSCALL(149, sys_getpgid, 1)
#define __NR_getppid 150
__SYSCALL(150, sys_getppid, 0)
#define __NR_getpgrp 151
__SYSCALL(151, sys_getpgrp, 0)
#define __NR_reserved152 152 /* set_thread_area */
__SYSCALL(152, sys_ni_syscall, 0)
#define __NR_reserved153 153 /* get_thread_area */
__SYSCALL(153, sys_ni_syscall, 0)
#define __NR_times 154
__SYSCALL(154, sys_times, 1)
#define __NR_acct 155
__SYSCALL(155, sys_acct, 1)
#define __NR_sched_setaffinity 156
__SYSCALL(156, sys_sched_setaffinity, 3)
#define __NR_sched_getaffinity 157
__SYSCALL(157, sys_sched_getaffinity, 3)
#define __NR_capget 158
__SYSCALL(158, sys_capget, 2)
#define __NR_capset 159
__SYSCALL(159, sys_capset, 2)
#define __NR_ptrace 160
__SYSCALL(160, sys_ptrace, 4)
#define __NR_semtimedop 161
__SYSCALL(161, sys_semtimedop, 5)
#define __NR_semget 162
__SYSCALL(162, sys_semget, 4)
#define __NR_semop 163
__SYSCALL(163, sys_semop, 4)
#define __NR_semctl 164
__SYSCALL(164, sys_semctl, 4)
#define __NR_available165 165
__SYSCALL(165, sys_ni_syscall, 0)
#define __NR_msgget 166
__SYSCALL(166, sys_msgget, 4)
#define __NR_msgsnd 167
__SYSCALL(167, sys_msgsnd, 4)
#define __NR_msgrcv 168
__SYSCALL(168, sys_msgrcv, 4)
#define __NR_msgctl 169
__SYSCALL(169, sys_msgctl, 4)
#define __NR_available170 170
__SYSCALL(170, sys_ni_syscall, 0)
/* File System */
#define __NR_umount2 171
__SYSCALL(171, sys_umount, 2)
#define __NR_mount 172
__SYSCALL(172, sys_mount, 5)
#define __NR_swapon 173
__SYSCALL(173, sys_swapon, 2)
#define __NR_chroot 174
__SYSCALL(174, sys_chroot, 1)
#define __NR_pivot_root 175
__SYSCALL(175, sys_pivot_root, 2)
#define __NR_umount 176
__SYSCALL(176, sys_oldumount, 1)
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __NR_swapoff 177
__SYSCALL(177, sys_swapoff, 1)
#define __NR_sync 178
__SYSCALL(178, sys_sync, 0)
#define __NR_syncfs 179
__SYSCALL(179, sys_syncfs, 1)
#define __NR_setfsuid 180
__SYSCALL(180, sys_setfsuid, 1)
#define __NR_setfsgid 181
__SYSCALL(181, sys_setfsgid, 1)
#define __NR_sysfs 182
__SYSCALL(182, sys_sysfs, 3)
#define __NR_ustat 183
__SYSCALL(183, sys_ustat, 2)
#define __NR_statfs 184
__SYSCALL(184, sys_statfs, 2)
#define __NR_fstatfs 185
__SYSCALL(185, sys_fstatfs, 2)
#define __NR_statfs64 186
__SYSCALL(186, sys_statfs64, 3)
#define __NR_fstatfs64 187
__SYSCALL(187, sys_fstatfs64, 3)
/* System */
#define __NR_setrlimit 188
__SYSCALL(188, sys_setrlimit, 2)
#define __NR_getrlimit 189
__SYSCALL(189, sys_getrlimit, 2)
#define __NR_getrusage 190
__SYSCALL(190, sys_getrusage, 2)
#define __NR_futex 191
__SYSCALL(191, sys_futex, 5)
#define __NR_gettimeofday 192
__SYSCALL(192, sys_gettimeofday, 2)
#define __NR_settimeofday 193
__SYSCALL(193, sys_settimeofday, 2)
#define __NR_adjtimex 194
__SYSCALL(194, sys_adjtimex, 1)
#define __NR_nanosleep 195
__SYSCALL(195, sys_nanosleep, 2)
#define __NR_getgroups 196
__SYSCALL(196, sys_getgroups, 2)
#define __NR_setgroups 197
__SYSCALL(197, sys_setgroups, 2)
#define __NR_sethostname 198
__SYSCALL(198, sys_sethostname, 2)
#define __NR_setdomainname 199
__SYSCALL(199, sys_setdomainname, 2)
#define __NR_syslog 200
__SYSCALL(200, sys_syslog, 3)
#define __NR_vhangup 201
__SYSCALL(201, sys_vhangup, 0)
#define __NR_uselib 202
__SYSCALL(202, sys_uselib, 1)
#define __NR_reboot 203
__SYSCALL(203, sys_reboot, 3)
#define __NR_quotactl 204
__SYSCALL(204, sys_quotactl, 4)
#define __NR_nfsservctl 205
__SYSCALL(205, sys_ni_syscall, 0) /* old nfsservctl */
#define __NR__sysctl 206
__SYSCALL(206, sys_sysctl, 1)
#define __NR_bdflush 207
__SYSCALL(207, sys_bdflush, 2)
#define __NR_uname 208
__SYSCALL(208, sys_newuname, 1)
#define __NR_sysinfo 209
__SYSCALL(209, sys_sysinfo, 1)
#define __NR_init_module 210
__SYSCALL(210, sys_init_module, 2)
#define __NR_delete_module 211
__SYSCALL(211, sys_delete_module, 1)
#define __NR_sched_setparam 212
__SYSCALL(212, sys_sched_setparam, 2)
#define __NR_sched_getparam 213
__SYSCALL(213, sys_sched_getparam, 2)
#define __NR_sched_setscheduler 214
__SYSCALL(214, sys_sched_setscheduler, 3)
#define __NR_sched_getscheduler 215
__SYSCALL(215, sys_sched_getscheduler, 1)
#define __NR_sched_get_priority_max 216
__SYSCALL(216, sys_sched_get_priority_max, 1)
#define __NR_sched_get_priority_min 217
__SYSCALL(217, sys_sched_get_priority_min, 1)
#define __NR_sched_rr_get_interval 218
__SYSCALL(218, sys_sched_rr_get_interval, 2)
#define __NR_sched_yield 219
__SYSCALL(219, sys_sched_yield, 0)
#define __NR_available222 222
__SYSCALL(222, sys_ni_syscall, 0)
/* Signal Handling */
#define __NR_restart_syscall 223
__SYSCALL(223, sys_restart_syscall, 0)
#define __NR_sigaltstack 224
__SYSCALL(224, sys_sigaltstack, 2)
#define __NR_rt_sigreturn 225
__SYSCALL(225, xtensa_rt_sigreturn, 1)
#define __NR_rt_sigaction 226
__SYSCALL(226, sys_rt_sigaction, 4)
#define __NR_rt_sigprocmask 227
__SYSCALL(227, sys_rt_sigprocmask, 4)
#define __NR_rt_sigpending 228
__SYSCALL(228, sys_rt_sigpending, 2)
#define __NR_rt_sigtimedwait 229
__SYSCALL(229, sys_rt_sigtimedwait, 4)
#define __NR_rt_sigqueueinfo 230
__SYSCALL(230, sys_rt_sigqueueinfo, 3)
#define __NR_rt_sigsuspend 231
__SYSCALL(231, sys_rt_sigsuspend, 2)
/* Message */
#define __NR_mq_open 232
__SYSCALL(232, sys_mq_open, 4)
#define __NR_mq_unlink 233
__SYSCALL(233, sys_mq_unlink, 1)
#define __NR_mq_timedsend 234
__SYSCALL(234, sys_mq_timedsend, 5)
#define __NR_mq_timedreceive 235
__SYSCALL(235, sys_mq_timedreceive, 5)
#define __NR_mq_notify 236
__SYSCALL(236, sys_mq_notify, 2)
#define __NR_mq_getsetattr 237
__SYSCALL(237, sys_mq_getsetattr, 3)
#define __NR_available238 238
__SYSCALL(238, sys_ni_syscall, 0)
/* IO */
#define __NR_io_setup 239
__SYSCALL(239, sys_io_setup, 2)
#define __NR_io_destroy 240
__SYSCALL(240, sys_io_destroy, 1)
#define __NR_io_submit 241
__SYSCALL(241, sys_io_submit, 3)
#define __NR_io_getevents 242
__SYSCALL(242, sys_io_getevents, 5)
#define __NR_io_cancel 243
__SYSCALL(243, sys_io_cancel, 3)
#define __NR_clock_settime 244
__SYSCALL(244, sys_clock_settime, 2)
#define __NR_clock_gettime 245
__SYSCALL(245, sys_clock_gettime, 2)
#define __NR_clock_getres 246
__SYSCALL(246, sys_clock_getres, 2)
#define __NR_clock_nanosleep 247
__SYSCALL(247, sys_clock_nanosleep, 4)
/* Timer */
#define __NR_timer_create 248
__SYSCALL(248, sys_timer_create, 3)
#define __NR_timer_delete 249
__SYSCALL(249, sys_timer_delete, 1)
#define __NR_timer_settime 250
__SYSCALL(250, sys_timer_settime, 4)
#define __NR_timer_gettime 251
__SYSCALL(251, sys_timer_gettime, 2)
#define __NR_timer_getoverrun 252
__SYSCALL(252, sys_timer_getoverrun, 1)
/* System */
#define __NR_reserved253 253
__SYSCALL(253, sys_ni_syscall, 0)
#define __NR_lookup_dcookie 254
__SYSCALL(254, sys_lookup_dcookie, 4)
#define __NR_available255 255
__SYSCALL(255, sys_ni_syscall, 0)
#define __NR_add_key 256
__SYSCALL(256, sys_add_key, 5)
#define __NR_request_key 257
__SYSCALL(257, sys_request_key, 5)
#define __NR_keyctl 258
__SYSCALL(258, sys_keyctl, 5)
#define __NR_available259 259
__SYSCALL(259, sys_ni_syscall, 0)
#define __NR_readahead 260
__SYSCALL(260, sys_readahead, 5)
#define __NR_remap_file_pages 261
__SYSCALL(261, sys_remap_file_pages, 5)
#define __NR_migrate_pages 262
__SYSCALL(262, sys_migrate_pages, 0)
#define __NR_mbind 263
__SYSCALL(263, sys_mbind, 6)
#define __NR_get_mempolicy 264
__SYSCALL(264, sys_get_mempolicy, 5)
#define __NR_set_mempolicy 265
__SYSCALL(265, sys_set_mempolicy, 3)
#define __NR_unshare 266
__SYSCALL(266, sys_unshare, 1)
#define __NR_move_pages 267
__SYSCALL(267, sys_move_pages, 0)
#define __NR_splice 268
__SYSCALL(268, sys_splice, 0)
#define __NR_tee 269
__SYSCALL(269, sys_tee, 0)
#define __NR_vmsplice 270
__SYSCALL(270, sys_vmsplice, 0)
#define __NR_available271 271
__SYSCALL(271, sys_ni_syscall, 0)
#define __NR_pselect6 272
__SYSCALL(272, sys_pselect6, 0)
#define __NR_ppoll 273
__SYSCALL(273, sys_ppoll, 0)
#define __NR_epoll_pwait 274
__SYSCALL(274, sys_epoll_pwait, 0)
#define __NR_epoll_create1 275
__SYSCALL(275, sys_epoll_create1, 1)
#define __NR_inotify_init 276
__SYSCALL(276, sys_inotify_init, 0)
#define __NR_inotify_add_watch 277
__SYSCALL(277, sys_inotify_add_watch, 3)
#define __NR_inotify_rm_watch 278
__SYSCALL(278, sys_inotify_rm_watch, 2)
#define __NR_inotify_init1 279
__SYSCALL(279, sys_inotify_init1, 1)
#define __NR_getcpu 280
__SYSCALL(280, sys_getcpu, 0)
#define __NR_kexec_load 281
__SYSCALL(281, sys_ni_syscall, 0)
#define __NR_ioprio_set 282
__SYSCALL(282, sys_ioprio_set, 2)
#define __NR_ioprio_get 283
__SYSCALL(283, sys_ioprio_get, 3)
#define __NR_set_robust_list 284
__SYSCALL(284, sys_set_robust_list, 3)
#define __NR_get_robust_list 285
__SYSCALL(285, sys_get_robust_list, 3)
#define __NR_available286 286
__SYSCALL(286, sys_ni_syscall, 0)
#define __NR_available287 287
__SYSCALL(287, sys_ni_syscall, 0)
/* Relative File Operations */
#define __NR_openat 288
__SYSCALL(288, sys_openat, 4)
#define __NR_mkdirat 289
__SYSCALL(289, sys_mkdirat, 3)
#define __NR_mknodat 290
__SYSCALL(290, sys_mknodat, 4)
#define __NR_unlinkat 291
__SYSCALL(291, sys_unlinkat, 3)
#define __NR_renameat 292
__SYSCALL(292, sys_renameat, 4)
#define __NR_linkat 293
__SYSCALL(293, sys_linkat, 5)
#define __NR_symlinkat 294
__SYSCALL(294, sys_symlinkat, 3)
#define __NR_readlinkat 295
__SYSCALL(295, sys_readlinkat, 4)
#define __NR_utimensat 296
__SYSCALL(296, sys_utimensat, 0)
#define __NR_fchownat 297
__SYSCALL(297, sys_fchownat, 5)
#define __NR_futimesat 298
__SYSCALL(298, sys_futimesat, 4)
#define __NR_fstatat64 299
__SYSCALL(299, sys_fstatat64, 0)
#define __NR_fchmodat 300
__SYSCALL(300, sys_fchmodat, 4)
#define __NR_faccessat 301
__SYSCALL(301, sys_faccessat, 4)
#define __NR_available302 302
__SYSCALL(302, sys_ni_syscall, 0)
#define __NR_available303 303
__SYSCALL(303, sys_ni_syscall, 0)
#define __NR_signalfd 304
__SYSCALL(304, sys_signalfd, 3)
/* 305 was __NR_timerfd */
__SYSCALL(305, sys_ni_syscall, 0)
#define __NR_eventfd 306
__SYSCALL(306, sys_eventfd, 1)
#define __NR_recvmmsg 307
__SYSCALL(307, sys_recvmmsg, 5)
#define __NR_setns 308
__SYSCALL(308, sys_setns, 2)
#define __NR_signalfd4 309
__SYSCALL(309, sys_signalfd4, 4)
#define __NR_dup3 310
__SYSCALL(310, sys_dup3, 3)
#define __NR_pipe2 311
__SYSCALL(311, sys_pipe2, 2)
#define __NR_timerfd_create 312
__SYSCALL(312, sys_timerfd_create, 2)
#define __NR_timerfd_settime 313
__SYSCALL(313, sys_timerfd_settime, 4)
#define __NR_timerfd_gettime 314
__SYSCALL(314, sys_timerfd_gettime, 2)
#define __NR_available315 315
__SYSCALL(315, sys_ni_syscall, 0)
#define __NR_eventfd2 316
__SYSCALL(316, sys_eventfd2, 2)
#define __NR_preadv 317
__SYSCALL(317, sys_preadv, 5)
#define __NR_pwritev 318
__SYSCALL(318, sys_pwritev, 5)
#define __NR_available319 319
__SYSCALL(319, sys_ni_syscall, 0)
#define __NR_fanotify_init 320
__SYSCALL(320, sys_fanotify_init, 2)
#define __NR_fanotify_mark 321
__SYSCALL(321, sys_fanotify_mark, 6)
#define __NR_process_vm_readv 322
__SYSCALL(322, sys_process_vm_readv, 6)
#define __NR_process_vm_writev 323
__SYSCALL(323, sys_process_vm_writev, 6)
#define __NR_name_to_handle_at 324
__SYSCALL(324, sys_name_to_handle_at, 5)
#define __NR_open_by_handle_at 325
__SYSCALL(325, sys_open_by_handle_at, 3)
#define __NR_sync_file_range2 326
__SYSCALL(326, sys_sync_file_range2, 6)
#define __NR_perf_event_open 327
__SYSCALL(327, sys_perf_event_open, 5)
#define __NR_rt_tgsigqueueinfo 328
__SYSCALL(328, sys_rt_tgsigqueueinfo, 4)
#define __NR_clock_adjtime 329
__SYSCALL(329, sys_clock_adjtime, 2)
#define __NR_prlimit64 330
__SYSCALL(330, sys_prlimit64, 4)
#define __NR_kcmp 331
__SYSCALL(331, sys_kcmp, 5)
#define __NR_finit_module 332
__SYSCALL(332, sys_finit_module, 3)
#define __NR_accept4 333
__SYSCALL(333, sys_accept4, 4)
#define __NR_sched_setattr 334
__SYSCALL(334, sys_sched_setattr, 2)
#define __NR_sched_getattr 335
__SYSCALL(335, sys_sched_getattr, 3)
#define __NR_renameat2 336
__SYSCALL(336, sys_renameat2, 5)
#define __NR_seccomp 337
__SYSCALL(337, sys_seccomp, 3)
#define __NR_getrandom 338
__SYSCALL(338, sys_getrandom, 3)
#define __NR_memfd_create 339
__SYSCALL(339, sys_memfd_create, 2)
#define __NR_syscall_count 340
/*
* sysxtensa syscall handler
*
* int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
* int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
* a2 a6 a3 a4 a5
*/
#define SYS_XTENSA_RESERVED 0 /* don't use this */
#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
#define SYS_XTENSA_COUNT 5 /* count */
#undef __SYSCALL
#endif /* _UAPI_XTENSA_UNISTD_H */