mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-29 07:18:51 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
44
arch/tile/include/asm/Kbuild
Normal file
44
arch/tile/include/asm/Kbuild
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
|
||||
header-y += ../arch/
|
||||
|
||||
generic-y += bug.h
|
||||
generic-y += bugs.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += hash.h
|
||||
generic-y += hw_irq.h
|
||||
generic-y += ioctl.h
|
||||
generic-y += ioctls.h
|
||||
generic-y += ipcbuf.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += poll.h
|
||||
generic-y += posix_types.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
generic-y += scatterlist.h
|
||||
generic-y += sembuf.h
|
||||
generic-y += serial.h
|
||||
generic-y += shmbuf.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += socket.h
|
||||
generic-y += sockios.h
|
||||
generic-y += statfs.h
|
||||
generic-y += termbits.h
|
||||
generic-y += termios.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += xor.h
|
||||
1
arch/tile/include/asm/asm-offsets.h
Normal file
1
arch/tile/include/asm/asm-offsets.h
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include <generated/asm-offsets.h>
|
||||
208
arch/tile/include/asm/atomic.h
Normal file
208
arch/tile/include/asm/atomic.h
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Atomic primitives.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ATOMIC_H
|
||||
#define _ASM_TILE_ATOMIC_H
|
||||
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
/**
|
||||
* atomic_read - read atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return ACCESS_ONCE(v->counter);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_sub_return - subtract integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to subtract
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns @v - @i
|
||||
*/
|
||||
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
|
||||
|
||||
/**
|
||||
* atomic_sub - subtract integer from atomic variable
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
#define atomic_sub(i, v) atomic_add((int)(-(i)), (v))
|
||||
|
||||
/**
|
||||
* atomic_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns true if the result is
|
||||
* zero, or false for all other cases.
|
||||
*/
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
||||
|
||||
/**
|
||||
* atomic_inc_return - increment memory and return
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1 and returns the new value.
|
||||
*/
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec_return - decrement memory and return
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and returns the new value.
|
||||
*/
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_inc - increment atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec - decrement atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1.
|
||||
*/
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec_and_test - decrement and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and returns true if the result is 0.
|
||||
*/
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
|
||||
/**
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1 and returns true if the result is 0.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
/**
|
||||
* atomic_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline int atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
return xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
return cmpxchg(&v->counter, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns true if the result is
|
||||
* negative, or false when result is greater than or equal to zero.
|
||||
*/
|
||||
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef __tilegx__
|
||||
#include <asm/atomic_32.h>
|
||||
#else
|
||||
#include <asm/atomic_64.h>
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/**
|
||||
* atomic64_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline long long atomic64_xchg(atomic64_t *v, long long n)
|
||||
{
|
||||
return xchg64(&v->counter, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic64_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
|
||||
long long n)
|
||||
{
|
||||
return cmpxchg64(&v->counter, o, n);
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return dec;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_ATOMIC_H */
|
||||
243
arch/tile/include/asm/atomic_32.h
Normal file
243
arch/tile/include/asm/atomic_32.h
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Do not include directly; use <linux/atomic.h>.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ATOMIC_32_H
|
||||
#define _ASM_TILE_ATOMIC_32_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
_atomic_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_return - add integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add(&v->counter, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add_unless(&v->counter, a, u);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set - set atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: required value
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*
|
||||
* atomic_set() can't be just a raw store, since it would be lost if it
|
||||
* fell between the load and store of one of the other atomic ops.
|
||||
*/
|
||||
static inline void atomic_set(atomic_t *v, int n)
|
||||
{
|
||||
_atomic_xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
/* A 64bit atomic type */
|
||||
|
||||
typedef struct {
|
||||
long long counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic variable
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
/*
|
||||
* Requires an atomic op to read both 32-bit parts consistently.
|
||||
* Casting away const is safe since the atomic support routines
|
||||
* do not write to memory if the value has not been modified.
|
||||
*/
|
||||
return _atomic64_xchg_add((long long *)&v->counter, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
{
|
||||
_atomic64_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add integer and return
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add(&v->counter, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
|
||||
long long u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic variable
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: required value
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*
|
||||
* atomic64_set() can't be just a raw store, since it would be lost if it
|
||||
* fell between the load and store of one of the other atomic ops.
|
||||
*/
|
||||
static inline void atomic64_set(atomic64_t *v, long long n)
|
||||
{
|
||||
_atomic64_xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Internal definitions only beyond this point.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Number of atomic locks in atomic_locks[]. Must be a power of two.
|
||||
* There is no reason for more than PAGE_SIZE / 8 entries, since that
|
||||
* is the maximum number of pointer bits we can use to index this.
|
||||
* And we cannot have more than PAGE_SIZE / 4, since this has to
|
||||
* fit on a single page and each entry takes 4 bytes.
|
||||
*/
|
||||
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
|
||||
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern int atomic_locks[];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* All the code that may fault while holding an atomic lock must
|
||||
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
|
||||
* can correctly release and reacquire the lock. Note that we
|
||||
* mention the register number in a comment in "lib/atomic_asm.S" to help
|
||||
* assembly coders from using this register by mistake, so if it
|
||||
* is changed here, change that comment as well.
|
||||
*/
|
||||
#define ATOMIC_LOCK_REG 20
|
||||
#define ATOMIC_LOCK_REG_NAME r20
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Called from setup to initialize a hash table to point to per_cpu locks. */
|
||||
void __init_atomic_per_cpu(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Support releasing the atomic lock in do_page_fault_ics(). */
|
||||
void __atomic_fault_unlock(int *lock_ptr);
|
||||
#endif
|
||||
|
||||
/* Return a pointer to the lock for the given address. */
|
||||
int *__atomic_hashed_lock(volatile void *v);
|
||||
|
||||
/* Private helper routines in lib/atomic_asm_32.S */
|
||||
struct __get_user {
|
||||
unsigned long val;
|
||||
int err;
|
||||
};
|
||||
extern struct __get_user __atomic_cmpxchg(volatile int *p,
|
||||
int *lock, int o, int n);
|
||||
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
|
||||
int *lock, int o, int n);
|
||||
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
|
||||
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
|
||||
long long o, long long n);
|
||||
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
|
||||
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
|
||||
long long n);
|
||||
extern long long __atomic64_xchg_add_unless(volatile long long *p,
|
||||
int *lock, long long o, long long n);
|
||||
|
||||
/* Return failure from the atomic wrappers. */
|
||||
struct __get_user __atomic_bad_address(int __user *addr);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_ATOMIC_32_H */
|
||||
113
arch/tile/include/asm/atomic_64.h
Normal file
113
arch/tile/include/asm/atomic_64.h
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Do not include directly; use <linux/atomic.h>.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ATOMIC_64_H
|
||||
#define _ASM_TILE_ATOMIC_64_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
|
||||
|
||||
#define atomic_set(v, i) ((v)->counter = (i))
|
||||
|
||||
/*
|
||||
* The smp_mb() operations throughout are to support the fact that
|
||||
* Linux requires memory barriers before and after the operation,
|
||||
* on any routine which updates memory and returns a value.
|
||||
*/
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__insn_fetchadd4((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int val;
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = __insn_fetchadd4((void *)&v->counter, i) + i;
|
||||
barrier(); /* the "+ i" above will wait on memory */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int guess, oldval = v->counter;
|
||||
do {
|
||||
if (oldval == u)
|
||||
break;
|
||||
guess = oldval;
|
||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
||||
} while (guess != oldval);
|
||||
return oldval;
|
||||
}
|
||||
|
||||
/* Now the true 64-bit operations. */
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define atomic64_read(v) ((v)->counter)
|
||||
#define atomic64_set(v, i) ((v)->counter = (i))
|
||||
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
__insn_fetchadd((void *)&v->counter, i);
|
||||
}
|
||||
|
||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
int val;
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = __insn_fetchadd((void *)&v->counter, i) + i;
|
||||
barrier(); /* the "+ i" above will wait on memory */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long guess, oldval = v->counter;
|
||||
do {
|
||||
if (oldval == u)
|
||||
break;
|
||||
guess = oldval;
|
||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
||||
} while (guess != oldval);
|
||||
return oldval != u;
|
||||
}
|
||||
|
||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
||||
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
||||
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
||||
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
/* Define this to indicate that cmpxchg is an efficient operation. */
|
||||
#define __HAVE_ARCH_CMPXCHG
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_ATOMIC_64_H */
|
||||
162
arch/tile/include/asm/backtrace.h
Normal file
162
arch/tile/include/asm/backtrace.h
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BACKTRACE_H
|
||||
#define _ASM_TILE_BACKTRACE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Reads 'size' bytes from 'address' and writes the data to 'result'.
|
||||
* Returns true if successful, else false (e.g. memory not readable).
|
||||
*/
|
||||
typedef bool (*BacktraceMemoryReader)(void *result,
|
||||
unsigned long address,
|
||||
unsigned int size,
|
||||
void *extra);
|
||||
|
||||
typedef struct {
|
||||
/* Current PC. */
|
||||
unsigned long pc;
|
||||
|
||||
/* Current stack pointer value. */
|
||||
unsigned long sp;
|
||||
|
||||
/* Current frame pointer value (i.e. caller's stack pointer) */
|
||||
unsigned long fp;
|
||||
|
||||
/* Internal use only: caller's PC for first frame. */
|
||||
unsigned long initial_frame_caller_pc;
|
||||
|
||||
/* Internal use only: callback to read memory. */
|
||||
BacktraceMemoryReader read_memory_func;
|
||||
|
||||
/* Internal use only: arbitrary argument to read_memory_func. */
|
||||
void *read_memory_func_extra;
|
||||
|
||||
} BacktraceIterator;
|
||||
|
||||
|
||||
typedef enum {
|
||||
|
||||
/* We have no idea what the caller's pc is. */
|
||||
PC_LOC_UNKNOWN,
|
||||
|
||||
/* The caller's pc is currently in lr. */
|
||||
PC_LOC_IN_LR,
|
||||
|
||||
/* The caller's pc can be found by dereferencing the caller's sp. */
|
||||
PC_LOC_ON_STACK
|
||||
|
||||
} CallerPCLocation;
|
||||
|
||||
|
||||
typedef enum {
|
||||
|
||||
/* We have no idea what the caller's sp is. */
|
||||
SP_LOC_UNKNOWN,
|
||||
|
||||
/* The caller's sp is currently in r52. */
|
||||
SP_LOC_IN_R52,
|
||||
|
||||
/* The caller's sp can be found by adding a certain constant
|
||||
* to the current value of sp.
|
||||
*/
|
||||
SP_LOC_OFFSET
|
||||
|
||||
} CallerSPLocation;
|
||||
|
||||
|
||||
/* Bit values ORed into CALLER_* values for info ops. */
|
||||
enum {
|
||||
/* Setting the low bit on any of these values means the info op
|
||||
* applies only to one bundle ago.
|
||||
*/
|
||||
ONE_BUNDLE_AGO_FLAG = 1,
|
||||
|
||||
/* Setting this bit on a CALLER_SP_* value means the PC is in LR.
|
||||
* If not set, PC is on the stack.
|
||||
*/
|
||||
PC_IN_LR_FLAG = 2,
|
||||
|
||||
/* This many of the low bits of a CALLER_SP_* value are for the
|
||||
* flag bits above.
|
||||
*/
|
||||
NUM_INFO_OP_FLAGS = 2,
|
||||
|
||||
/* We cannot have one in the memory pipe so this is the maximum. */
|
||||
MAX_INFO_OPS_PER_BUNDLE = 2
|
||||
};
|
||||
|
||||
|
||||
/* Internal constants used to define 'info' operands. */
|
||||
enum {
|
||||
/* 0 and 1 are reserved, as are all negative numbers. */
|
||||
|
||||
CALLER_UNKNOWN_BASE = 2,
|
||||
|
||||
CALLER_SP_IN_R52_BASE = 4,
|
||||
|
||||
CALLER_SP_OFFSET_BASE = 8,
|
||||
};
|
||||
|
||||
|
||||
/* Current backtracer state describing where it thinks the caller is. */
|
||||
typedef struct {
|
||||
/*
|
||||
* Public fields
|
||||
*/
|
||||
|
||||
/* How do we find the caller's PC? */
|
||||
CallerPCLocation pc_location : 8;
|
||||
|
||||
/* How do we find the caller's SP? */
|
||||
CallerSPLocation sp_location : 8;
|
||||
|
||||
/* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
|
||||
* loc->sp_offset. Else this field is undefined.
|
||||
*/
|
||||
uint16_t sp_offset;
|
||||
|
||||
/* In the most recently visited bundle a terminating bundle? */
|
||||
bool at_terminating_bundle;
|
||||
|
||||
/*
|
||||
* Private fields
|
||||
*/
|
||||
|
||||
/* Will the forward scanner see someone clobbering sp
|
||||
* (i.e. changing it with something other than addi sp, sp, N?)
|
||||
*/
|
||||
bool sp_clobber_follows;
|
||||
|
||||
/* Operand to next "visible" info op (no more than one bundle past
|
||||
* the next terminating bundle), or -32768 if none.
|
||||
*/
|
||||
int16_t next_info_operand;
|
||||
|
||||
/* Is the info of in next_info_op in the very next bundle? */
|
||||
bool is_next_info_operand_adjacent;
|
||||
|
||||
} CallerLocation;
|
||||
|
||||
extern void backtrace_init(BacktraceIterator *state,
|
||||
BacktraceMemoryReader read_memory_func,
|
||||
void *read_memory_func_extra,
|
||||
unsigned long pc, unsigned long lr,
|
||||
unsigned long sp, unsigned long r52);
|
||||
|
||||
|
||||
extern bool backtrace_next(BacktraceIterator *state);
|
||||
|
||||
#endif /* _ASM_TILE_BACKTRACE_H */
|
||||
92
arch/tile/include/asm/barrier.h
Normal file
92
arch/tile/include/asm/barrier.h
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BARRIER_H
|
||||
#define _ASM_TILE_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
#define __sync() __insn_mf()
|
||||
|
||||
#include <hv/syscall_public.h>
|
||||
/*
|
||||
* Issue an uncacheable load to each memory controller, then
|
||||
* wait until those loads have completed.
|
||||
*/
|
||||
static inline void __mb_incoherent(void)
|
||||
{
|
||||
long clobber_r10;
|
||||
asm volatile("swint2"
|
||||
: "=R10" (clobber_r10)
|
||||
: "R10" (HV_SYS_fence_incoherent)
|
||||
: "r0", "r1", "r2", "r3", "r4",
|
||||
"r5", "r6", "r7", "r8", "r9",
|
||||
"r11", "r12", "r13", "r14",
|
||||
"r15", "r16", "r17", "r18", "r19",
|
||||
"r20", "r21", "r22", "r23", "r24",
|
||||
"r25", "r26", "r27", "r28", "r29");
|
||||
}
|
||||
|
||||
/* Fence to guarantee visibility of stores to incoherent memory. */
|
||||
static inline void
|
||||
mb_incoherent(void)
|
||||
{
|
||||
__insn_mf();
|
||||
|
||||
{
|
||||
#if CHIP_HAS_TILE_WRITE_PENDING()
|
||||
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
|
||||
unsigned long start = get_cycles_low();
|
||||
do {
|
||||
if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
|
||||
return;
|
||||
} while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
|
||||
#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
|
||||
(void) __mb_incoherent();
|
||||
}
|
||||
}
|
||||
|
||||
#define fast_wmb() __sync()
|
||||
#define fast_rmb() __sync()
|
||||
#define fast_mb() __sync()
|
||||
#define fast_iob() mb_incoherent()
|
||||
|
||||
#define wmb() fast_wmb()
|
||||
#define rmb() fast_rmb()
|
||||
#define mb() fast_mb()
|
||||
#define iob() fast_iob()
|
||||
|
||||
#ifndef __tilegx__ /* 32 bit */
|
||||
/*
|
||||
* We need to barrier before modifying the word, since the _atomic_xxx()
|
||||
* routines just tns the lock and then read/modify/write of the word.
|
||||
* But after the word is updated, the routine issues an "mf" before returning,
|
||||
* and since it's a function call, we don't even need a compiler barrier.
|
||||
*/
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() do { } while (0)
|
||||
#else /* 64 bit */
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
#endif
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_TILE_BARRIER_H */
|
||||
94
arch/tile/include/asm/bitops.h
Normal file
94
arch/tile/include/asm/bitops.h
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Copyright 1992, Linus Torvalds.
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITOPS_H
|
||||
#define _ASM_TILE_BITOPS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <asm/bitops_64.h>
|
||||
#else
|
||||
#include <asm/bitops_32.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ffz - find first zero bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no zero exists, so code should check against ~0UL first.
|
||||
*/
|
||||
static inline unsigned long ffz(unsigned long word)
|
||||
{
|
||||
return __builtin_ctzl(~word);
|
||||
}
|
||||
|
||||
static inline int fls64(__u64 w)
|
||||
{
|
||||
return (sizeof(__u64) * 8) - __builtin_clzll(w);
|
||||
}
|
||||
|
||||
/**
|
||||
* fls - find last set bit in word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined in a similar way as the libc and compiler builtin
|
||||
* ffs, but returns the position of the most significant set bit.
|
||||
*
|
||||
* fls(value) returns 0 if value is 0 or the position of the last
|
||||
* set bit if value is nonzero. The last (most significant) bit is
|
||||
* at position 32.
|
||||
*/
|
||||
static inline int fls(int x)
|
||||
{
|
||||
return fls64((unsigned int) x);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight32(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight16(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w & 0xffff);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight8(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w & 0xff);
|
||||
}
|
||||
|
||||
static inline unsigned long __arch_hweight64(__u64 w)
|
||||
{
|
||||
return __builtin_popcountll(w);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/builtin-__ffs.h>
|
||||
#include <asm-generic/bitops/builtin-__fls.h>
|
||||
#include <asm-generic/bitops/builtin-ffs.h>
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITOPS_H */
|
||||
126
arch/tile/include/asm/bitops_32.h
Normal file
126
arch/tile/include/asm/bitops_32.h
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITOPS_32_H
|
||||
#define _ASM_TILE_BITOPS_32_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Tile-specific routines to support <asm/bitops.h>. */
|
||||
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
|
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* This function is atomic and may not be reordered.
|
||||
* See __set_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered.
|
||||
* See __clear_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*
|
||||
* clear_bit() may not contain a memory barrier, so if it is used for
|
||||
* locking purposes, you should call smp_mb__before_atomic() and/or
|
||||
* smp_mb__after_atomic() to ensure changes are visible on other cpus.
|
||||
*/
|
||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* change_bit - Toggle a bit in memory
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* change_bit() is atomic and may not be reordered.
|
||||
* See __change_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_or(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_andn(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_change_bit(unsigned nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_xor(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITOPS_32_H */
|
||||
95
arch/tile/include/asm/bitops_64.h
Normal file
95
arch/tile/include/asm/bitops_64.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITOPS_64_H
|
||||
#define _ASM_TILE_BITOPS_64_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
/* See <asm/bitops.h> for API comments. */
|
||||
|
||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
unsigned long guess, oldval;
|
||||
addr += nr / BITS_PER_LONG;
|
||||
oldval = *addr;
|
||||
do {
|
||||
guess = oldval;
|
||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
||||
} while (guess != oldval);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The test_and_xxx_bit() routines require a memory fence before we
|
||||
* start the operation, and after the operation completes. We use
|
||||
* smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
|
||||
* barrier(), to block until the atomic op is complete.
|
||||
*/
|
||||
|
||||
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
int val;
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
|
||||
& mask) != 0;
|
||||
barrier();
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
int val;
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
|
||||
& mask) != 0;
|
||||
barrier();
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
static inline int test_and_change_bit(unsigned nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
||||
unsigned long guess, oldval;
|
||||
addr += nr / BITS_PER_LONG;
|
||||
oldval = *addr;
|
||||
do {
|
||||
guess = oldval;
|
||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
||||
} while (guess != oldval);
|
||||
return (oldval & mask) != 0;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITOPS_64_H */
|
||||
64
arch/tile/include/asm/cache.h
Normal file
64
arch/tile/include/asm/cache.h
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CACHE_H
|
||||
#define _ASM_TILE_CACHE_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* bytes per L1 data cache line */
|
||||
#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
/* bytes per L2 cache line */
|
||||
#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
|
||||
#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
|
||||
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
|
||||
|
||||
/*
|
||||
* TILEPro I/O is not always coherent (networking typically uses coherent
|
||||
* I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
|
||||
* L2 cacheline size helps ensure that kernel heap allocations are aligned.
|
||||
* TILE-Gx I/O is always coherent when used on hash-for-home pages.
|
||||
*
|
||||
* However, it's possible at runtime to request not to use hash-for-home
|
||||
* for the kernel heap, in which case the kernel will use flush-and-inval
|
||||
* to manage coherence. As a result, we use L2_CACHE_BYTES for the
|
||||
* DMA minimum alignment to avoid false sharing in the kernel heap.
|
||||
*/
|
||||
#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
|
||||
|
||||
/* use the cache line size for the L2, which is where it counts */
|
||||
#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
|
||||
#define SMP_CACHE_BYTES L2_CACHE_BYTES
|
||||
#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
|
||||
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
|
||||
|
||||
/* Group together read-mostly things to avoid cache false sharing */
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
/*
|
||||
* Originally we used small TLB pages for kernel data and grouped some
|
||||
* things together as "write once", enforcing the property at the end
|
||||
* of initialization by making those pages read-only and non-coherent.
|
||||
* This allowed better cache utilization since cache inclusion did not
|
||||
* need to be maintained. However, to do this requires an extra TLB
|
||||
* entry, which on balance is more of a performance hit than the
|
||||
* non-coherence is a performance gain, so we now just make "read
|
||||
* mostly" and "write once" be synonyms. We keep the attribute
|
||||
* separate in case we change our minds at a future date.
|
||||
*/
|
||||
#define __write_once __read_mostly
|
||||
|
||||
#endif /* _ASM_TILE_CACHE_H */
|
||||
160
arch/tile/include/asm/cacheflush.h
Normal file
160
arch/tile/include/asm/cacheflush.h
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CACHEFLUSH_H
|
||||
#define _ASM_TILE_CACHEFLUSH_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* Keep includes the same across arches. */
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cache.h>
|
||||
#include <arch/icache.h>
|
||||
|
||||
/* Caches are physically-indexed and so don't need special treatment */
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, pg) do { } while (0)
|
||||
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
|
||||
|
||||
/* Flush the icache just on this cpu */
|
||||
extern void __flush_icache_range(unsigned long start, unsigned long end);
|
||||
|
||||
/* Flush the entire icache on this cpu. */
|
||||
#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* When the kernel writes to its own text we need to do an SMP
|
||||
* broadcast to make the L1I coherent everywhere. This includes
|
||||
* module load and single step.
|
||||
*/
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#else
|
||||
#define flush_icache_range __flush_icache_range
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An update to an executable user page requires icache flushing.
|
||||
* We could carefully update only tiles that are running this process,
|
||||
* and rely on the fact that we flush the icache on every context
|
||||
* switch to avoid doing extra work here. But for now, I'll be
|
||||
* conservative and just do a global icache flush.
|
||||
*/
|
||||
static inline void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, void *src, int len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
flush_icache_range((unsigned long) dst,
|
||||
(unsigned long) dst + len);
|
||||
}
|
||||
}
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
|
||||
/* Flush a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __flush_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_flush(next);
|
||||
next += CHIP_FLUSH_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __finv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_finv(next);
|
||||
next += CHIP_FINV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Flush a locally-homecached VA range and wait for the evicted
|
||||
* cachelines to hit memory.
|
||||
*/
|
||||
static inline void flush_buffer_local(void *buffer, size_t size)
|
||||
{
|
||||
__flush_buffer(buffer, size);
|
||||
mb_incoherent();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush and invalidate a locally-homecached VA range and wait for the
|
||||
* evicted cachelines to hit memory.
|
||||
*/
|
||||
static inline void finv_buffer_local(void *buffer, size_t size)
|
||||
{
|
||||
__finv_buffer(buffer, size);
|
||||
mb_incoherent();
|
||||
}
|
||||
|
||||
#ifdef __tilepro__
|
||||
/* Invalidate a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_inv(next);
|
||||
next += CHIP_INV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidate a VA range and wait for it to be complete. */
|
||||
static inline void inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__inv_buffer(buffer, size);
|
||||
mb();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Flush and invalidate a VA range that is homed remotely, waiting
|
||||
* until the memory controller holds the flushed values. If "hfh" is
|
||||
* true, we will do a more expensive flush involving additional loads
|
||||
* to make sure we have touched all the possible home cpus of a buffer
|
||||
* that is homed with "hash for home".
|
||||
*/
|
||||
void finv_buffer_remote(void *buffer, size_t size, int hfh);
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible:
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_CACHEFLUSH_H */
|
||||
42
arch/tile/include/asm/checksum.h
Normal file
42
arch/tile/include/asm/checksum.h
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CHECKSUM_H
|
||||
#define _ASM_TILE_CHECKSUM_H
|
||||
|
||||
#include <asm-generic/checksum.h>
|
||||
|
||||
/* Allow us to provide a more optimized do_csum(). */
|
||||
__wsum do_csum(const unsigned char *buff, int len);
|
||||
#define do_csum do_csum
|
||||
|
||||
/*
|
||||
* Return the sum of all the 16-bit subwords in a long.
|
||||
* This sums two subwords on a 32-bit machine, and four on 64 bits.
|
||||
* The implementation does two vector adds to capture any overflow.
|
||||
*/
|
||||
static inline unsigned int csum_long(unsigned long x)
|
||||
{
|
||||
unsigned long ret;
|
||||
#ifdef __tilegx__
|
||||
ret = __insn_v2sadu(x, 0);
|
||||
ret = __insn_v2sadu(ret, 0);
|
||||
#else
|
||||
ret = __insn_sadh_u(x, 0);
|
||||
ret = __insn_sadh_u(ret, 0);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_CHECKSUM_H */
|
||||
134
arch/tile/include/asm/cmpxchg.h
Normal file
134
arch/tile/include/asm/cmpxchg.h
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* cmpxchg.h -- forked from asm/atomic.h with this copyright:
|
||||
*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CMPXCHG_H
|
||||
#define _ASM_TILE_CMPXCHG_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Nonexistent functions intended to cause compile errors. */
|
||||
extern void __xchg_called_with_bad_pointer(void)
|
||||
__compiletime_error("Bad argument size for xchg");
|
||||
extern void __cmpxchg_called_with_bad_pointer(void)
|
||||
__compiletime_error("Bad argument size for cmpxchg");
|
||||
|
||||
#ifndef __tilegx__
|
||||
|
||||
/* Note the _atomic_xxx() routines include a final mb(). */
|
||||
int _atomic_xchg(int *ptr, int n);
|
||||
int _atomic_xchg_add(int *v, int i);
|
||||
int _atomic_xchg_add_unless(int *v, int a, int u);
|
||||
int _atomic_cmpxchg(int *ptr, int o, int n);
|
||||
long long _atomic64_xchg(long long *v, long long n);
|
||||
long long _atomic64_xchg_add(long long *v, long long i);
|
||||
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
|
||||
long long _atomic64_cmpxchg(long long *v, long long o, long long n);
|
||||
|
||||
#define xchg(ptr, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 4) \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 4) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
|
||||
(int)n); \
|
||||
})
|
||||
|
||||
#define xchg64(ptr, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 8) \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
|
||||
(long long)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 8) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
|
||||
(long long)o, (long long)n); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
#define xchg(ptr, n) \
|
||||
({ \
|
||||
typeof(*(ptr)) __x; \
|
||||
smp_mb(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_exch4((ptr), \
|
||||
(u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x)) \
|
||||
__insn_exch((ptr), (unsigned long)(n)); \
|
||||
break; \
|
||||
default: \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
smp_mb(); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
typeof(*(ptr)) __x; \
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
|
||||
smp_mb(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_cmpexch4((ptr), \
|
||||
(u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x))__insn_cmpexch((ptr), \
|
||||
(long long)(n)); \
|
||||
break; \
|
||||
default: \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
smp_mb(); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define xchg64 xchg
|
||||
#define cmpxchg64 cmpxchg
|
||||
|
||||
#endif
|
||||
|
||||
#define tas(ptr) xchg((ptr), 1)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_CMPXCHG_H */
|
||||
296
arch/tile/include/asm/compat.h
Normal file
296
arch/tile/include/asm/compat.h
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_COMPAT_H
|
||||
#define _ASM_TILE_COMPAT_H
|
||||
|
||||
/*
|
||||
* Architecture specific compatibility types
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
|
||||
/* "long" and pointer-based types are different. */
|
||||
typedef s32 compat_long_t;
|
||||
typedef u32 compat_ulong_t;
|
||||
typedef u32 compat_size_t;
|
||||
typedef s32 compat_ssize_t;
|
||||
typedef s32 compat_off_t;
|
||||
typedef s32 compat_time_t;
|
||||
typedef s32 compat_clock_t;
|
||||
typedef u32 compat_ino_t;
|
||||
typedef u32 compat_caddr_t;
|
||||
typedef u32 compat_uptr_t;
|
||||
|
||||
/* Many types are "int" or otherwise the same. */
|
||||
typedef __kernel_pid_t compat_pid_t;
|
||||
typedef __kernel_uid_t __compat_uid_t;
|
||||
typedef __kernel_gid_t __compat_gid_t;
|
||||
typedef __kernel_uid32_t __compat_uid32_t;
|
||||
typedef __kernel_uid32_t __compat_gid32_t;
|
||||
typedef __kernel_mode_t compat_mode_t;
|
||||
typedef __kernel_dev_t compat_dev_t;
|
||||
typedef __kernel_loff_t compat_loff_t;
|
||||
typedef __kernel_ipc_pid_t compat_ipc_pid_t;
|
||||
typedef __kernel_daddr_t compat_daddr_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
typedef __kernel_timer_t compat_timer_t;
|
||||
typedef __kernel_key_t compat_key_t;
|
||||
typedef int compat_int_t;
|
||||
typedef s64 compat_s64;
|
||||
typedef uint compat_uint_t;
|
||||
typedef u64 compat_u64;
|
||||
|
||||
/* We use the same register dump format in 32-bit images. */
|
||||
typedef unsigned long compat_elf_greg_t;
|
||||
#define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t))
|
||||
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
|
||||
|
||||
struct compat_timespec {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_nsec;
|
||||
};
|
||||
|
||||
struct compat_timeval {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_usec;
|
||||
};
|
||||
|
||||
#define compat_stat stat
|
||||
#define compat_statfs statfs
|
||||
|
||||
struct compat_sysctl {
|
||||
unsigned int name;
|
||||
int nlen;
|
||||
unsigned int oldval;
|
||||
unsigned int oldlenp;
|
||||
unsigned int newval;
|
||||
unsigned int newlen;
|
||||
unsigned int __unused[4];
|
||||
};
|
||||
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define _COMPAT_NSIG 64
|
||||
#define _COMPAT_NSIG_BPW 32
|
||||
|
||||
typedef u32 compat_sigset_word;
|
||||
|
||||
typedef union compat_sigval {
|
||||
compat_int_t sival_int;
|
||||
compat_uptr_t sival_ptr;
|
||||
} compat_sigval_t;
|
||||
|
||||
#define COMPAT_SI_PAD_SIZE (128/sizeof(int) - 3)
|
||||
|
||||
typedef struct compat_siginfo {
|
||||
int si_signo;
|
||||
int si_errno;
|
||||
int si_code;
|
||||
|
||||
union {
|
||||
int _pad[COMPAT_SI_PAD_SIZE];
|
||||
|
||||
/* kill() */
|
||||
struct {
|
||||
unsigned int _pid; /* sender's pid */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
} _kill;
|
||||
|
||||
/* POSIX.1b timers */
|
||||
struct {
|
||||
compat_timer_t _tid; /* timer id */
|
||||
int _overrun; /* overrun count */
|
||||
compat_sigval_t _sigval; /* same as below */
|
||||
int _sys_private; /* not to be passed to user */
|
||||
int _overrun_incr; /* amount to add to overrun */
|
||||
} _timer;
|
||||
|
||||
/* POSIX.1b signals */
|
||||
struct {
|
||||
unsigned int _pid; /* sender's pid */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
compat_sigval_t _sigval;
|
||||
} _rt;
|
||||
|
||||
/* SIGCHLD */
|
||||
struct {
|
||||
unsigned int _pid; /* which child */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
int _status; /* exit code */
|
||||
compat_clock_t _utime;
|
||||
compat_clock_t _stime;
|
||||
} _sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
struct {
|
||||
unsigned int _addr; /* faulting insn/memory ref. */
|
||||
#ifdef __ARCH_SI_TRAPNO
|
||||
int _trapno; /* TRAP # which caused the signal */
|
||||
#endif
|
||||
} _sigfault;
|
||||
|
||||
/* SIGPOLL */
|
||||
struct {
|
||||
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
|
||||
int _fd;
|
||||
} _sigpoll;
|
||||
} _sifields;
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
__compat_gid32_t gid;
|
||||
__compat_uid32_t cuid;
|
||||
__compat_gid32_t cgid;
|
||||
unsigned short mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
compat_ulong_t unused1;
|
||||
compat_ulong_t unused2;
|
||||
};
|
||||
|
||||
struct compat_semid64_ds {
|
||||
struct compat_ipc64_perm sem_perm;
|
||||
compat_time_t sem_otime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t sem_ctime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_ulong_t sem_nsems;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t __unused4;
|
||||
};
|
||||
|
||||
struct compat_msqid64_ds {
|
||||
struct compat_ipc64_perm msg_perm;
|
||||
compat_time_t msg_stime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t msg_rtime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_time_t msg_ctime;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t msg_cbytes;
|
||||
compat_ulong_t msg_qnum;
|
||||
compat_ulong_t msg_qbytes;
|
||||
compat_pid_t msg_lspid;
|
||||
compat_pid_t msg_lrpid;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
struct compat_shmid64_ds {
|
||||
struct compat_ipc64_perm shm_perm;
|
||||
compat_size_t shm_segsz;
|
||||
compat_time_t shm_atime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t shm_dtime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_time_t shm_ctime;
|
||||
compat_ulong_t __unused3;
|
||||
compat_pid_t shm_cpid;
|
||||
compat_pid_t shm_lpid;
|
||||
compat_ulong_t shm_nattch;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
* be used for syscall parameters, just declare them
|
||||
* as pointers because the syscall entry code will have
|
||||
* appropriately converted them already.
|
||||
*/
|
||||
|
||||
static inline void __user *compat_ptr(compat_uptr_t uptr)
|
||||
{
|
||||
return (void __user *)(long)(s32)uptr;
|
||||
}
|
||||
|
||||
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
||||
{
|
||||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
/* Sign-extend when storing a kernel pointer to a user's ptregs. */
|
||||
static inline unsigned long ptr_to_compat_reg(void __user *uptr)
|
||||
{
|
||||
return (long)(int)(long __force)uptr;
|
||||
}
|
||||
|
||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(current);
|
||||
return (void __user *)regs->sp - len;
|
||||
}
|
||||
|
||||
static inline int is_compat_task(void)
|
||||
{
|
||||
return current_thread_info()->status & TS_COMPAT;
|
||||
}
|
||||
|
||||
extern int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs);
|
||||
|
||||
/* Compat syscalls. */
|
||||
struct compat_siginfo;
|
||||
struct compat_sigaltstack;
|
||||
long compat_sys_rt_sigreturn(void);
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi);
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi);
|
||||
long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
|
||||
unsigned int offset_low, loff_t __user * result,
|
||||
unsigned int origin);
|
||||
|
||||
/* Assembly trampoline to avoid clobbering r0. */
|
||||
long _compat_sys_rt_sigreturn(void);
|
||||
|
||||
#endif /* _ASM_TILE_COMPAT_H */
|
||||
31
arch/tile/include/asm/current.h
Normal file
31
arch/tile/include/asm/current.h
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CURRENT_H
|
||||
#define _ASM_TILE_CURRENT_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
{
|
||||
return current_thread_info()->task;
|
||||
}
|
||||
#define current get_current()
|
||||
|
||||
/* Return a usable "task_struct" pointer even if the real one is corrupt. */
|
||||
struct task_struct *validate_current(void);
|
||||
|
||||
#endif /* _ASM_TILE_CURRENT_H */
|
||||
34
arch/tile/include/asm/delay.h
Normal file
34
arch/tile/include/asm/delay.h
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DELAY_H
|
||||
#define _ASM_TILE_DELAY_H
|
||||
|
||||
/* Undefined functions to get compile-time errors. */
|
||||
extern void __bad_udelay(void);
|
||||
extern void __bad_ndelay(void);
|
||||
|
||||
extern void __udelay(unsigned long usecs);
|
||||
extern void __ndelay(unsigned long nsecs);
|
||||
extern void __delay(unsigned long loops);
|
||||
|
||||
#define udelay(n) (__builtin_constant_p(n) ? \
|
||||
((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \
|
||||
__udelay(n))
|
||||
|
||||
#define ndelay(n) (__builtin_constant_p(n) ? \
|
||||
((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \
|
||||
__ndelay(n))
|
||||
|
||||
#endif /* _ASM_TILE_DELAY_H */
|
||||
36
arch/tile/include/asm/device.h
Normal file
36
arch/tile/include/asm/device.h
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
* Arch specific extensions to struct device
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DEVICE_H
|
||||
#define _ASM_TILE_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
/* DMA operations on that device */
|
||||
struct dma_map_ops *dma_ops;
|
||||
|
||||
/* Offset of the DMA address from the PA. */
|
||||
dma_addr_t dma_offset;
|
||||
|
||||
/*
|
||||
* Highest DMA address that can be generated by devices that
|
||||
* have limited DMA capability, i.e. non 64-bit capable.
|
||||
*/
|
||||
dma_addr_t max_direct_dma_addr;
|
||||
};
|
||||
|
||||
struct pdev_archdata {
|
||||
};
|
||||
|
||||
#endif /* _ASM_TILE_DEVICE_H */
|
||||
158
arch/tile/include/asm/dma-mapping.h
Normal file
158
arch/tile/include/asm/dma-mapping.h
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DMA_MAPPING_H
|
||||
#define _ASM_TILE_DMA_MAPPING_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
#endif
|
||||
|
||||
extern struct dma_map_ops *tile_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_pci_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
else
|
||||
return tile_dma_map_ops;
|
||||
}
|
||||
|
||||
static inline dma_addr_t get_dma_offset(struct device *dev)
|
||||
{
|
||||
return dev->archdata.dma_offset;
|
||||
}
|
||||
|
||||
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
||||
{
|
||||
dev->archdata.dma_offset = off;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
||||
{
|
||||
dev->archdata.dma_ops = ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return get_dma_ops(dev)->dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
/*
|
||||
* For PCI devices with 64-bit DMA addressing capability, promote
|
||||
* the dma_ops to hybrid, with the consistent memory DMA space limited
|
||||
* to 32-bit. For 32-bit capable devices, limit the streaming DMA
|
||||
* address range to max_direct_dma_addr.
|
||||
*/
|
||||
if (dma_ops == gx_pci_dma_map_ops ||
|
||||
dma_ops == gx_hybrid_pci_dma_map_ops ||
|
||||
dma_ops == gx_legacy_pci_dma_map_ops) {
|
||||
if (mask == DMA_BIT_MASK(64) &&
|
||||
dma_ops == gx_legacy_pci_dma_map_ops)
|
||||
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
|
||||
else if (mask > dev->archdata.max_direct_dma_addr)
|
||||
mask = dev->archdata.max_direct_dma_addr;
|
||||
}
|
||||
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
||||
|
||||
/*
|
||||
* dma_alloc_noncoherent() is #defined to return coherent memory,
|
||||
* so there's no need to do any flushing here.
|
||||
*/
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_DMA_MAPPING_H */
|
||||
25
arch/tile/include/asm/dma.h
Normal file
25
arch/tile/include/asm/dma.h
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DMA_H
|
||||
#define _ASM_TILE_DMA_H
|
||||
|
||||
#include <asm-generic/dma.h>
|
||||
|
||||
/* Needed by drivers/pci/quirks.c */
|
||||
#ifdef CONFIG_PCI
|
||||
extern int isa_dma_bridge_buggy;
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_DMA_H */
|
||||
29
arch/tile/include/asm/edac.h
Normal file
29
arch/tile/include/asm/edac.h
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_EDAC_H
|
||||
#define _ASM_TILE_EDAC_H
|
||||
|
||||
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
|
||||
|
||||
static inline void atomic_scrub(void *va, u32 size)
|
||||
{
|
||||
/*
|
||||
* These is nothing to be done here because CE is
|
||||
* corrected by the mshim.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_EDAC_H */
|
||||
183
arch/tile/include/asm/elf.h
Normal file
183
arch/tile/include/asm/elf.h
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ELF_H
|
||||
#define _ASM_TILE_ELF_H
|
||||
|
||||
/*
|
||||
* ELF register definitions.
|
||||
*/
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
#define EM_TILEPRO 188
|
||||
#define EM_TILEGX 191
|
||||
|
||||
/* Provide a nominal data structure. */
|
||||
#define ELF_NFPREG 0
|
||||
typedef double elf_fpreg_t;
|
||||
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#else
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#endif
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define ELF_DATA ELFDATA2MSB
|
||||
#else
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#endif
|
||||
|
||||
/*
|
||||
* There seems to be a bug in how compat_binfmt_elf.c works: it
|
||||
* #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info().
|
||||
* Hack around this by providing an enum value of ELF_ARCH.
|
||||
*/
|
||||
enum { ELF_ARCH = CHIP_ELF_TYPE() };
|
||||
#define ELF_ARCH ELF_ARCH
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
|
||||
(x)->e_ident[EI_DATA] == ELF_DATA && \
|
||||
(x)->e_machine == CHIP_ELF_TYPE())
|
||||
|
||||
/* The module loader only handles a few relocation types. */
|
||||
#ifndef __tilegx__
|
||||
#define R_TILE_32 1
|
||||
#define R_TILE_JOFFLONG_X1 15
|
||||
#define R_TILE_IMM16_X0_LO 25
|
||||
#define R_TILE_IMM16_X1_LO 26
|
||||
#define R_TILE_IMM16_X0_HA 29
|
||||
#define R_TILE_IMM16_X1_HA 30
|
||||
#else
|
||||
#define R_TILEGX_64 1
|
||||
#define R_TILEGX_JUMPOFF_X1 21
|
||||
#define R_TILEGX_IMM16_X0_HW0 36
|
||||
#define R_TILEGX_IMM16_X1_HW0 37
|
||||
#define R_TILEGX_IMM16_X0_HW1 38
|
||||
#define R_TILEGX_IMM16_X1_HW1 39
|
||||
#define R_TILEGX_IMM16_X0_HW2_LAST 48
|
||||
#define R_TILEGX_IMM16_X1_HW2_LAST 49
|
||||
#endif
|
||||
|
||||
/* Use standard page size for core dumps. */
|
||||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
* use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
* the loader. We need to make sure that it is out of the way of the program
|
||||
* that it will "exec", and that there is sufficient room for the brk.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
#define ELF_CORE_COPY_REGS(_dest, _regs) \
|
||||
memcpy((char *) &_dest, (char *) _regs, \
|
||||
sizeof(struct pt_regs));
|
||||
|
||||
/* No additional FP registers to copy. */
|
||||
#define ELF_CORE_COPY_FPREGS(t, fpu) 0
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
* instruction set this CPU supports. This could be done in user space,
|
||||
* but it's not easy, and we've already done it here.
|
||||
*/
|
||||
#define ELF_HWCAP (0)
|
||||
|
||||
/*
|
||||
* This yields a string that ld.so will use to load implementation
|
||||
* specific libraries for optimization. This is more specific in
|
||||
* intent than poking at uname or /proc/cpuinfo.
|
||||
*/
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr);
|
||||
|
||||
#define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr)
|
||||
|
||||
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
||||
|
||||
/* Tilera Linux has no personalities currently, so no need to do anything. */
|
||||
#define SET_PERSONALITY(ex) do { } while (0)
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||
/* Support auto-mapping of the user interrupt vectors. */
|
||||
struct linux_binprm;
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
|
||||
} while (0)
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_PLATFORM "tilegx-m32"
|
||||
|
||||
/*
|
||||
* "Compat" binaries have the same machine type, but 32-bit class,
|
||||
* since they're not a separate machine type, but just a 32-bit
|
||||
* variant of the standard 64-bit architecture.
|
||||
*/
|
||||
#define compat_elf_check_arch(x) \
|
||||
((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
|
||||
(x)->e_machine == CHIP_ELF_TYPE())
|
||||
|
||||
#define compat_start_thread(regs, ip, usp) do { \
|
||||
regs->pc = ptr_to_compat_reg((void *)(ip)); \
|
||||
regs->sp = ptr_to_compat_reg((void *)(usp)); \
|
||||
single_step_execve(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Use SET_PERSONALITY to indicate compatibility via TS_COMPAT.
|
||||
*/
|
||||
#undef SET_PERSONALITY
|
||||
#define SET_PERSONALITY(ex) \
|
||||
do { \
|
||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
|
||||
current_thread_info()->status &= ~TS_COMPAT; \
|
||||
} while (0)
|
||||
#define COMPAT_SET_PERSONALITY(ex) \
|
||||
do { \
|
||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
|
||||
current_thread_info()->status |= TS_COMPAT; \
|
||||
} while (0)
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
|
||||
#endif /* _ASM_TILE_ELF_H */
|
||||
87
arch/tile/include/asm/fixmap.h
Normal file
87
arch/tile/include/asm/fixmap.h
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FIXMAP_H
|
||||
#define _ASM_TILE_FIXMAP_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process. We allocate these special addresses
|
||||
* from the end of supervisor virtual memory backwards.
|
||||
* Also this lets us do fail-safe vmalloc(), we
|
||||
* can guarantee that these special addresses and
|
||||
* vmalloc()-ed addresses never overlap.
|
||||
*
|
||||
* these 'compile-time allocated' memory buffers are
|
||||
* fixed-size 4k pages. (or larger if used with an increment
|
||||
* higher than 1) use fixmap_set(idx,phys) to associate
|
||||
* physical memory with fixmap indices.
|
||||
*
|
||||
* TLB entries of such buffers will not be flushed across
|
||||
* task switches.
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
#ifdef __tilegx__
|
||||
/*
|
||||
* TILEPro has unmapped memory above so the hole isn't needed,
|
||||
* and in any case the hole pushes us over a single 16MB pmd.
|
||||
*/
|
||||
FIX_HOLE,
|
||||
#endif
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#endif
|
||||
#ifdef __tilegx__ /* see homecache.c */
|
||||
FIX_HOMECACHE_BEGIN,
|
||||
FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
* Temporary boot-time mappings, used before ioremap() is functional.
|
||||
* Not currently needed by the Tile architecture.
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 0
|
||||
#if NR_FIX_BTMAPS
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
|
||||
__end_of_fixed_addresses
|
||||
#else
|
||||
__end_of_fixed_addresses = __end_of_permanent_fixed_addresses
|
||||
#endif
|
||||
};
|
||||
|
||||
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
|
||||
#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
|
||||
|
||||
#include <asm-generic/fixmap.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_FIXMAP_H */
|
||||
40
arch/tile/include/asm/ftrace.h
Normal file
40
arch/tile/include/asm/ftrace.h
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FTRACE_H
|
||||
#define _ASM_TILE_FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#define MCOUNT_ADDR ((unsigned long)(__mcount))
|
||||
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void __mcount(void);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
};
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* _ASM_TILE_FTRACE_H */
|
||||
196
arch/tile/include/asm/futex.h
Normal file
196
arch/tile/include/asm/futex.h
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* These routines make two important assumptions:
|
||||
*
|
||||
* 1. atomic_t is really an int and can be freely cast back and forth
|
||||
* (validated in __init_atomic_per_cpu).
|
||||
*
|
||||
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
||||
* the same locking convention that all the kernel atomic routines use.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FUTEX_H
|
||||
#define _ASM_TILE_FUTEX_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Support macros for futex operations. Do not use these macros directly.
|
||||
* They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
|
||||
* __futex_cmpxchg() additionally assumes "oldval".
|
||||
*/
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
#define __futex_asm(OP) \
|
||||
asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %5; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 8\n" \
|
||||
".quad 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
: "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
|
||||
: "r" (uaddr), "r" (oparg), "i" (-EFAULT))
|
||||
|
||||
#define __futex_set() __futex_asm(exch4)
|
||||
#define __futex_add() __futex_asm(fetchadd4)
|
||||
#define __futex_or() __futex_asm(fetchor4)
|
||||
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
|
||||
#define __futex_cmpxchg() \
|
||||
({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
|
||||
|
||||
#define __futex_xor() \
|
||||
({ \
|
||||
u32 oldval, n = oparg; \
|
||||
if ((ret = __get_user(oldval, uaddr)) == 0) { \
|
||||
do { \
|
||||
oparg = oldval ^ n; \
|
||||
__futex_cmpxchg(); \
|
||||
} while (ret == 0 && oldval != val); \
|
||||
} \
|
||||
})
|
||||
|
||||
/* No need to prefetch, since the atomic ops go to the home cache anyway. */
|
||||
#define __futex_prolog()
|
||||
|
||||
#else
|
||||
|
||||
#define __futex_call(FN) \
|
||||
{ \
|
||||
struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
|
||||
val = gu.val; \
|
||||
ret = gu.err; \
|
||||
}
|
||||
|
||||
#define __futex_set() __futex_call(__atomic_xchg)
|
||||
#define __futex_add() __futex_call(__atomic_xchg_add)
|
||||
#define __futex_or() __futex_call(__atomic_or)
|
||||
#define __futex_andn() __futex_call(__atomic_andn)
|
||||
#define __futex_xor() __futex_call(__atomic_xor)
|
||||
|
||||
#define __futex_cmpxchg() \
|
||||
{ \
|
||||
struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
|
||||
lock, oldval, oparg); \
|
||||
val = gu.val; \
|
||||
ret = gu.err; \
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the lock pointer for the atomic calls to use, and issue a
|
||||
* prefetch to the user address to bring it into cache. Similar to
|
||||
* __atomic_setup(), but we can't do a read into the L1 since it might
|
||||
* fault; instead we do a prefetch into the L2.
|
||||
*/
|
||||
#define __futex_prolog() \
|
||||
int *lock; \
|
||||
__insn_prefetch(uaddr); \
|
||||
lock = __atomic_hashed_lock((int __force *)uaddr)
|
||||
#endif
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int uninitialized_var(val), ret;
|
||||
|
||||
__futex_prolog();
|
||||
|
||||
/* The 32-bit futex code makes this assumption, so validate it here. */
|
||||
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_set();
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_add();
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_or();
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_andn();
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_xor();
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ:
|
||||
ret = (val == cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_NE:
|
||||
ret = (val != cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LT:
|
||||
ret = (val < cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GE:
|
||||
ret = (val >= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LE:
|
||||
ret = (val <= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GT:
|
||||
ret = (val > cmparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 oparg)
|
||||
{
|
||||
int ret, val;
|
||||
|
||||
__futex_prolog();
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
__futex_cmpxchg();
|
||||
|
||||
*uval = val;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_FUTEX_H */
|
||||
45
arch/tile/include/asm/hardirq.h
Normal file
45
arch/tile/include/asm/hardirq.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HARDIRQ_H
|
||||
#define _ASM_TILE_HARDIRQ_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
long idle_timestamp;
|
||||
|
||||
/* Hard interrupt statistics. */
|
||||
unsigned int irq_timer_count;
|
||||
unsigned int irq_syscall_count;
|
||||
unsigned int irq_resched_count;
|
||||
unsigned int irq_hv_flush_count;
|
||||
unsigned int irq_call_count;
|
||||
unsigned int irq_hv_msg_count;
|
||||
unsigned int irq_dev_intr_count;
|
||||
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#endif /* _ASM_TILE_HARDIRQ_H */
|
||||
30
arch/tile/include/asm/hardwall.h
Normal file
30
arch/tile/include/asm/hardwall.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Provide methods for access control of per-cpu resources like
|
||||
* UDN, IDN, or IPI.
|
||||
*/
|
||||
#ifndef _ASM_TILE_HARDWALL_H
|
||||
#define _ASM_TILE_HARDWALL_H
|
||||
|
||||
#include <uapi/asm/hardwall.h>
|
||||
|
||||
/* /proc hooks for hardwall. */
|
||||
struct proc_dir_entry;
|
||||
#ifdef CONFIG_HARDWALL
|
||||
void proc_tile_hardwall_init(struct proc_dir_entry *root);
|
||||
int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task);
|
||||
#else
|
||||
static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
|
||||
#endif
|
||||
#endif /* _ASM_TILE_HARDWALL_H */
|
||||
72
arch/tile/include/asm/highmem.h
Normal file
72
arch/tile/include/asm/highmem.h
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
||||
* Gerhard.Wichert@pdb.siemens.de
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Used in CONFIG_HIGHMEM systems for memory pages which
|
||||
* are not addressable by direct kernel virtual addresses.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HIGHMEM_H
|
||||
#define _ASM_TILE_HIGHMEM_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
|
||||
/* declarations for highmem.c */
|
||||
extern unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
/*
|
||||
* Ordering is:
|
||||
*
|
||||
* FIXADDR_TOP
|
||||
* fixed_addresses
|
||||
* FIXADDR_START
|
||||
* temp fixed addresses
|
||||
* FIXADDR_BOOT_START
|
||||
* Persistent kmap area
|
||||
* PKMAP_BASE
|
||||
* VMALLOC_END
|
||||
* Vmalloc area
|
||||
* VMALLOC_START
|
||||
* high_memory
|
||||
*/
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
void *kmap_high(struct page *page);
|
||||
void kunmap_high(struct page *page);
|
||||
void *kmap(struct page *page);
|
||||
void kunmap(struct page *page);
|
||||
void *kmap_fix_kpte(struct page *page, int finished);
|
||||
|
||||
/* This macro is used only in map_new_virtual() to map "page". */
|
||||
#define kmap_prot page_to_kpgprot(page)
|
||||
|
||||
void *kmap_atomic(struct page *page);
|
||||
void __kunmap_atomic(void *kvaddr);
|
||||
void *kmap_atomic_pfn(unsigned long pfn);
|
||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
|
||||
struct page *kmap_atomic_to_page(void *ptr);
|
||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
|
||||
void kmap_atomic_fix_kpte(struct page *page, int finished);
|
||||
|
||||
#define flush_cache_kmaps() do { } while (0)
|
||||
|
||||
#endif /* _ASM_TILE_HIGHMEM_H */
|
||||
123
arch/tile/include/asm/homecache.h
Normal file
123
arch/tile/include/asm/homecache.h
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Handle issues around the Tile "home cache" model of coherence.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HOMECACHE_H
|
||||
#define _ASM_TILE_HOMECACHE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
struct page;
|
||||
struct task_struct;
|
||||
struct vm_area_struct;
|
||||
struct zone;
|
||||
|
||||
/*
|
||||
* Coherence point for the page is its memory controller.
|
||||
* It is not present in any cache (L1 or L2).
|
||||
*/
|
||||
#define PAGE_HOME_UNCACHED -1
|
||||
|
||||
/*
|
||||
* Is this page immutable (unwritable) and thus able to be cached more
|
||||
* widely than would otherwise be possible? This means we have "nc" set.
|
||||
*/
|
||||
#define PAGE_HOME_IMMUTABLE -2
|
||||
|
||||
/*
|
||||
* Each cpu considers its own cache to be the home for the page,
|
||||
* which makes it incoherent.
|
||||
*/
|
||||
#define PAGE_HOME_INCOHERENT -3
|
||||
|
||||
/* Home for the page is distributed via hash-for-home. */
|
||||
#define PAGE_HOME_HASH -4
|
||||
|
||||
/* Support wrapper to use instead of explicit hv_flush_remote(). */
|
||||
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
|
||||
const struct cpumask *cache_cpumask,
|
||||
HV_VirtAddr tlb_va, unsigned long tlb_length,
|
||||
unsigned long tlb_pgsize,
|
||||
const struct cpumask *tlb_cpumask,
|
||||
HV_Remote_ASID *asids, int asidcount);
|
||||
|
||||
/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
|
||||
extern pte_t pte_set_home(pte_t pte, int home);
|
||||
|
||||
/* Do a cache eviction on the specified cpus. */
|
||||
extern void homecache_evict(const struct cpumask *mask);
|
||||
|
||||
/*
|
||||
* Change a kernel page's homecache. It must not be mapped in user space.
|
||||
* If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
|
||||
* no other cpu can reference the page, and causes a full-chip cache/TLB flush.
|
||||
*/
|
||||
extern void homecache_change_page_home(struct page *, int order, int home);
|
||||
|
||||
/*
|
||||
* Flush a page out of whatever cache(s) it is in.
|
||||
* This is more than just finv, since it properly handles waiting
|
||||
* for the data to reach memory, but it can be quite
|
||||
* heavyweight, particularly on incoherent or immutable memory.
|
||||
*/
|
||||
extern void homecache_finv_page(struct page *);
|
||||
|
||||
/*
|
||||
* Flush a page out of the specified home cache.
|
||||
* Note that the specified home need not be the actual home of the page,
|
||||
* as for example might be the case when coordinating with I/O devices.
|
||||
*/
|
||||
extern void homecache_finv_map_page(struct page *, int home);
|
||||
|
||||
/*
|
||||
* Allocate a page with the given GFP flags, home, and optionally
|
||||
* node. These routines are actually just wrappers around the normal
|
||||
* alloc_pages() / alloc_pages_node() functions, which set and clear
|
||||
* a per-cpu variable to communicate with homecache_new_kernel_page().
|
||||
* If !CONFIG_HOMECACHE, uses homecache_change_page_home().
|
||||
*/
|
||||
extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
|
||||
unsigned int order, int home);
|
||||
extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
|
||||
unsigned int order, int home);
|
||||
#define homecache_alloc_page(gfp_mask, home) \
|
||||
homecache_alloc_pages(gfp_mask, 0, home)
|
||||
|
||||
/*
|
||||
* These routines are just pass-throughs to free_pages() when
|
||||
* we support full homecaching. If !CONFIG_HOMECACHE, then these
|
||||
* routines use homecache_change_page_home() to reset the home
|
||||
* back to the default before returning the page to the allocator.
|
||||
*/
|
||||
void __homecache_free_pages(struct page *, unsigned int order);
|
||||
void homecache_free_pages(unsigned long addr, unsigned int order);
|
||||
#define __homecache_free_page(page) __homecache_free_pages((page), 0)
|
||||
#define homecache_free_page(page) homecache_free_pages((page), 0)
|
||||
|
||||
|
||||
/*
|
||||
* Report the page home for LOWMEM pages by examining their kernel PTE,
|
||||
* or for highmem pages as the default home.
|
||||
*/
|
||||
extern int page_home(struct page *);
|
||||
|
||||
#define homecache_migrate_kthread() do {} while (0)
|
||||
|
||||
#define homecache_kpte_lock() 0
|
||||
#define homecache_kpte_unlock(flags) do {} while (0)
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_HOMECACHE_H */
|
||||
135
arch/tile/include/asm/hugetlb.h
Normal file
135
arch/tile/include/asm/hugetlb.h
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HUGETLB_H
|
||||
#define _ASM_TILE_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
{
|
||||
free_pgd_range(tlb, addr, end, floor, ceiling);
|
||||
}
|
||||
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_wrprotect(pte);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
static inline int arch_prepare_hugepage(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_release_hugepage(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
||||
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
||||
struct page *page, int writable)
|
||||
{
|
||||
size_t pagesize = huge_page_size(hstate_vma(vma));
|
||||
if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
|
||||
entry = pte_mksuper(entry);
|
||||
return entry;
|
||||
}
|
||||
#define arch_make_huge_pte arch_make_huge_pte
|
||||
|
||||
/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
|
||||
enum {
|
||||
HUGE_SHIFT_PGDIR = 0,
|
||||
HUGE_SHIFT_PMD = 1,
|
||||
HUGE_SHIFT_PAGE = 2,
|
||||
HUGE_SHIFT_ENTRIES
|
||||
};
|
||||
extern int huge_shift[HUGE_SHIFT_ENTRIES];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_HUGETLB_H */
|
||||
60
arch/tile/include/asm/hv_driver.h
Normal file
60
arch/tile/include/asm/hv_driver.h
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This header defines a wrapper interface for managing hypervisor
|
||||
* device calls that will result in an interrupt at some later time.
|
||||
* In particular, this provides wrappers for hv_preada() and
|
||||
* hv_pwritea().
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HV_DRIVER_H
|
||||
#define _ASM_TILE_HV_DRIVER_H
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
struct hv_driver_cb;
|
||||
|
||||
/* A callback to be invoked when an operation completes. */
|
||||
typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result);
|
||||
|
||||
/*
|
||||
* A structure to hold information about an outstanding call.
|
||||
* The driver must allocate a separate structure for each call.
|
||||
*/
|
||||
struct hv_driver_cb {
|
||||
hv_driver_callback_t *callback; /* Function to call on interrupt. */
|
||||
void *dev; /* Driver-specific state variable. */
|
||||
};
|
||||
|
||||
/* Wrapper for invoking hv_dev_preada(). */
|
||||
static inline int
|
||||
tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
|
||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
||||
struct hv_driver_cb *callback)
|
||||
{
|
||||
return hv_dev_preada(devhdl, flags, sgl_len, sgl,
|
||||
offset, (HV_IntArg)callback);
|
||||
}
|
||||
|
||||
/* Wrapper for invoking hv_dev_pwritea(). */
|
||||
static inline int
|
||||
tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
|
||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
||||
struct hv_driver_cb *callback)
|
||||
{
|
||||
return hv_dev_pwritea(devhdl, flags, sgl_len, sgl,
|
||||
offset, (HV_IntArg)callback);
|
||||
}
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_HV_DRIVER_H */
|
||||
25
arch/tile/include/asm/ide.h
Normal file
25
arch/tile/include/asm/ide.h
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IDE_H
|
||||
#define _ASM_TILE_IDE_H
|
||||
|
||||
/* For IDE on PCI */
|
||||
#define MAX_HWIFS 10
|
||||
|
||||
#define ide_default_io_ctl(base) (0)
|
||||
|
||||
#include <asm-generic/ide_iops.h>
|
||||
|
||||
#endif /* _ASM_TILE_IDE_H */
|
||||
505
arch/tile/include/asm/io.h
Normal file
505
arch/tile/include/asm/io.h
Normal file
|
|
@ -0,0 +1,505 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IO_H
|
||||
#define _ASM_TILE_IO_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/* Maximum PCI I/O space address supported. */
|
||||
#define IO_SPACE_LIMIT 0xffffffff
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access.
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer.
|
||||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
|
||||
* long before casting it to a pointer to avoid compiler warnings.
|
||||
*/
|
||||
#if CHIP_HAS_MMIO()
|
||||
extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
pgprot_t pgprot);
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
#else
|
||||
#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
|
||||
#define iounmap(addr) ((void)0)
|
||||
#endif
|
||||
|
||||
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
|
||||
|
||||
#define mmiowb()
|
||||
|
||||
/* Conversion between virtual and physical mappings. */
|
||||
#define mm_ptov(addr) ((void *)phys_to_virt(addr))
|
||||
#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
|
||||
|
||||
#if CHIP_HAS_MMIO()
|
||||
|
||||
/*
|
||||
* We use inline assembly to guarantee that the compiler does not
|
||||
* split an access into multiple byte-sized accesses as it might
|
||||
* sometimes do if a register data structure is marked "packed".
|
||||
* Obviously on tile we can't tolerate such an access being
|
||||
* actually unaligned, but we want to avoid the case where the
|
||||
* compiler conservatively would generate multiple accesses even
|
||||
* for an aligned read or write.
|
||||
*/
|
||||
|
||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u8 __force *)addr;
|
||||
}
|
||||
|
||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 ret;
|
||||
asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
|
||||
barrier();
|
||||
return le16_to_cpu(ret);
|
||||
}
|
||||
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 ret;
|
||||
/* Sign-extend to conform to u32 ABI sign-extension convention. */
|
||||
asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
|
||||
barrier();
|
||||
return le32_to_cpu(ret);
|
||||
}
|
||||
|
||||
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 ret;
|
||||
asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
|
||||
barrier();
|
||||
return le64_to_cpu(ret);
|
||||
}
|
||||
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u8 __force *)addr = val;
|
||||
}
|
||||
|
||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
|
||||
}
|
||||
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
|
||||
}
|
||||
|
||||
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
|
||||
}
|
||||
|
||||
/*
|
||||
* The on-chip I/O hardware on tilegx is configured with VA=PA for the
|
||||
* kernel's PA range. The low-level APIs and field names use "va" and
|
||||
* "void *" nomenclature, to be consistent with the general notion
|
||||
* that the addresses in question are virtualizable, but in the kernel
|
||||
* context we are actually manipulating PA values. (In other contexts,
|
||||
* e.g. access from user space, we do in fact use real virtual addresses
|
||||
* in the va fields.) To allow readers of the code to understand what's
|
||||
* happening, we direct their attention to this comment by using the
|
||||
* following two functions that just duplicate __va() and __pa().
|
||||
*/
|
||||
typedef unsigned long tile_io_addr_t;
|
||||
static inline tile_io_addr_t va_to_tile_io_addr(void *va)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
|
||||
return __pa(va);
|
||||
}
|
||||
static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
|
||||
{
|
||||
return __va(tile_io_addr);
|
||||
}
|
||||
|
||||
#else /* CHIP_HAS_MMIO() */
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
||||
extern u8 _tile_readb(unsigned long addr);
|
||||
extern u16 _tile_readw(unsigned long addr);
|
||||
extern u32 _tile_readl(unsigned long addr);
|
||||
extern u64 _tile_readq(unsigned long addr);
|
||||
extern void _tile_writeb(u8 val, unsigned long addr);
|
||||
extern void _tile_writew(u16 val, unsigned long addr);
|
||||
extern void _tile_writel(u32 val, unsigned long addr);
|
||||
extern void _tile_writeq(u64 val, unsigned long addr);
|
||||
|
||||
#define __raw_readb(addr) _tile_readb((unsigned long)addr)
|
||||
#define __raw_readw(addr) _tile_readw((unsigned long)addr)
|
||||
#define __raw_readl(addr) _tile_readl((unsigned long)addr)
|
||||
#define __raw_readq(addr) _tile_readq((unsigned long)addr)
|
||||
#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
|
||||
#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
|
||||
#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
|
||||
#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
|
||||
|
||||
#else /* CONFIG_PCI */
|
||||
|
||||
/*
|
||||
* The tilepro architecture does not support IOMEM unless PCI is enabled.
|
||||
* Unfortunately we can't yet simply not declare these methods,
|
||||
* since some generic code that compiles into the kernel, but
|
||||
* we never run, uses them unconditionally.
|
||||
*/
|
||||
|
||||
static inline int iomem_panic(void)
|
||||
{
|
||||
panic("readb/writeb and friends do not exist on tile without PCI");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 readb(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u16 _readw(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u32 readl(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u64 readq(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline void writeb(u8 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void writew(u16 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void writel(u32 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void writeq(u64 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
#endif /* CHIP_HAS_MMIO() */
|
||||
|
||||
#define readb __raw_readb
|
||||
#define readw __raw_readw
|
||||
#define readl __raw_readl
|
||||
#define readq __raw_readq
|
||||
#define writeb __raw_writeb
|
||||
#define writew __raw_writew
|
||||
#define writel __raw_writel
|
||||
#define writeq __raw_writeq
|
||||
|
||||
#define readb_relaxed readb
|
||||
#define readw_relaxed readw
|
||||
#define readl_relaxed readl
|
||||
#define readq_relaxed readq
|
||||
|
||||
#define ioread8 readb
|
||||
#define ioread16 readw
|
||||
#define ioread32 readl
|
||||
#define ioread64 readq
|
||||
#define iowrite8 writeb
|
||||
#define iowrite16 writew
|
||||
#define iowrite32 writel
|
||||
#define iowrite64 writeq
|
||||
|
||||
#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
|
||||
|
||||
static inline void memset_io(volatile void *dst, int val, size_t len)
|
||||
{
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)dst & 0x3);
|
||||
val = (val & 0xff) * 0x01010101;
|
||||
for (x = 0; x < len; x += 4)
|
||||
writel(val, dst + x);
|
||||
}
|
||||
|
||||
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
|
||||
size_t len)
|
||||
{
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)src & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
*(u32 *)(dst + x) = readl(src + x);
|
||||
}
|
||||
|
||||
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
||||
size_t len)
|
||||
{
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)dst & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
writel(*(u32 *)(src + x), dst + x);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return readb((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return readw((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return readl((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
writeb(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
writew(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
writel(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u8 *buf = buffer;
|
||||
do {
|
||||
u8 x = inb(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u16 *buf = buffer;
|
||||
do {
|
||||
u16 x = inw(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u32 *buf = buffer;
|
||||
do {
|
||||
u32 x = inl(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u8 *buf = buffer;
|
||||
do {
|
||||
outb(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u16 *buf = buffer;
|
||||
do {
|
||||
outw(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u32 *buf = buffer;
|
||||
do {
|
||||
outl(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
extern void __iomem *ioport_map(unsigned long port, unsigned int len);
|
||||
extern void ioport_unmap(void __iomem *addr);
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* The TilePro architecture does not support IOPORT, even with PCI.
|
||||
* Unfortunately we can't yet simply not declare these methods,
|
||||
* since some generic code that compiles into the kernel, but
|
||||
* we never run, uses them unconditionally.
|
||||
*/
|
||||
|
||||
static inline long ioport_panic(void)
|
||||
{
|
||||
#ifdef __tilegx__
|
||||
panic("PCI IO space support is disabled. Configure the kernel with"
|
||||
" CONFIG_TILE_PCI_IO to enable it");
|
||||
#else
|
||||
panic("inb/outb and friends do not exist on tile");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
|
||||
{
|
||||
pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ioport_unmap(void __iomem *addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
#define ioread16be(addr) be16_to_cpu(ioread16(addr))
|
||||
#define ioread32be(addr) be32_to_cpu(ioread32(addr))
|
||||
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
|
||||
#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
|
||||
|
||||
#define ioread8_rep(p, dst, count) \
|
||||
insb((unsigned long) (p), (dst), (count))
|
||||
#define ioread16_rep(p, dst, count) \
|
||||
insw((unsigned long) (p), (dst), (count))
|
||||
#define ioread32_rep(p, dst, count) \
|
||||
insl((unsigned long) (p), (dst), (count))
|
||||
|
||||
#define iowrite8_rep(p, src, count) \
|
||||
outsb((unsigned long) (p), (src), (count))
|
||||
#define iowrite16_rep(p, src, count) \
|
||||
outsw((unsigned long) (p), (src), (count))
|
||||
#define iowrite32_rep(p, src, count) \
|
||||
outsl((unsigned long) (p), (src), (count))
|
||||
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
#endif /* _ASM_TILE_IO_H */
|
||||
81
arch/tile/include/asm/irq.h
Normal file
81
arch/tile/include/asm/irq.h
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IRQ_H
|
||||
#define _ASM_TILE_IRQ_H
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
/* The hypervisor interface provides 32 IRQs. */
|
||||
#define NR_IRQS 32
|
||||
|
||||
/* IRQ numbers used for linux IPIs. */
|
||||
#define IRQ_RESCHEDULE 0
|
||||
/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */
|
||||
#define NR_IRQS_LEGACY 1
|
||||
|
||||
#define irq_canonicalize(irq) (irq)
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
|
||||
/*
|
||||
* Different ways of handling interrupts. Tile interrupts are always
|
||||
* per-cpu; there is no global interrupt controller to implement
|
||||
* enable/disable. Most onboard devices can send their interrupts to
|
||||
* many tiles at the same time, and Tile-specific drivers know how to
|
||||
* deal with this.
|
||||
*
|
||||
* However, generic devices (usually PCIE based, sometimes GPIO)
|
||||
* expect that interrupts will fire on a single core at a time and
|
||||
* that the irq can be enabled or disabled from any core at any time.
|
||||
* We implement this by directing such interrupts to a single core.
|
||||
*
|
||||
* One added wrinkle is that PCI interrupts can be either
|
||||
* hardware-cleared (legacy interrupts) or software cleared (MSI).
|
||||
* Other generic device systems (GPIO) are always software-cleared.
|
||||
*
|
||||
* The enums below are used by drivers for onboard devices, including
|
||||
* the internals of PCI root complex and GPIO. They allow the driver
|
||||
* to tell the generic irq code what kind of interrupt is mapped to a
|
||||
* particular IRQ number.
|
||||
*/
|
||||
enum {
|
||||
/* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
|
||||
TILE_IRQ_PERCPU,
|
||||
/* global interrupt, hardware responsible for clearing. */
|
||||
TILE_IRQ_HW_CLEAR,
|
||||
/* global interrupt, software responsible for clearing. */
|
||||
TILE_IRQ_SW_CLEAR,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Paravirtualized drivers should call this when they dynamically
|
||||
* allocate a new IRQ or discover an IRQ that was pre-allocated by the
|
||||
* hypervisor for use with their particular device. This gives the
|
||||
* IRQ subsystem an opportunity to do interrupt-type-specific
|
||||
* initialization.
|
||||
*
|
||||
* ISSUE: We should modify this API so that registering anything
|
||||
* except percpu interrupts also requires providing callback methods
|
||||
* for enabling and disabling the interrupt. This would allow the
|
||||
* generic IRQ code to proxy enable/disable_irq() calls back into the
|
||||
* PCI subsystem, which in turn could enable or disable the interrupt
|
||||
* at the PCI shim.
|
||||
*/
|
||||
void tile_irq_activate(unsigned int irq, int tile_irq_type);
|
||||
|
||||
void setup_irq_regs(void);
|
||||
|
||||
#endif /* _ASM_TILE_IRQ_H */
|
||||
311
arch/tile/include/asm/irqflags.h
Normal file
311
arch/tile/include/asm/irqflags.h
Normal file
|
|
@ -0,0 +1,311 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IRQFLAGS_H
|
||||
#define _ASM_TILE_IRQFLAGS_H
|
||||
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/*
|
||||
* The set of interrupts we want to allow when interrupts are nominally
|
||||
* disabled. The remainder are effectively "NMI" interrupts from
|
||||
* the point of view of the generic Linux code. Note that synchronous
|
||||
* interrupts (aka "non-queued") are not blocked by the mask in any case.
|
||||
*/
|
||||
#define LINUX_MASKABLE_INTERRUPTS \
|
||||
(~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
|
||||
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
/* The same macro, but for the two 32-bit SPRs separately. */
|
||||
#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
|
||||
#define LINUX_MASKABLE_INTERRUPTS_HI \
|
||||
(~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
|
||||
#include <asm/percpu.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/*
|
||||
* Set and clear kernel interrupt masks.
|
||||
*
|
||||
* NOTE: __insn_mtspr() is a compiler builtin marked as a memory
|
||||
* clobber. We rely on it being equivalent to a compiler barrier in
|
||||
* this code since arch_local_irq_save() and friends must act as
|
||||
* compiler barriers. This compiler semantic is baked into enough
|
||||
* places that the compiler will maintain it going forward.
|
||||
*/
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
|
||||
# error Fix assumptions about which word various interrupts are in
|
||||
#endif
|
||||
#define interrupt_mask_set(n) do { \
|
||||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset(n) do { \
|
||||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_check(n) ({ \
|
||||
int __n = (n); \
|
||||
(((__n < 32) ? \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
|
||||
>> (__n & 0x1f)) & 1; \
|
||||
})
|
||||
#define interrupt_mask_set_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#define interrupt_mask_save_mask() \
|
||||
(__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
|
||||
(((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
|
||||
#define interrupt_mask_restore_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define interrupt_mask_set(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
|
||||
#define interrupt_mask_reset(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
|
||||
#define interrupt_mask_check(n) \
|
||||
((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
|
||||
#define interrupt_mask_set_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
|
||||
#define interrupt_mask_reset_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
|
||||
#define interrupt_mask_save_mask() \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_K)
|
||||
#define interrupt_mask_restore_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The set of interrupts we want active if irqs are enabled.
|
||||
* Note that in particular, the tile timer interrupt comes and goes
|
||||
* from this set, since we have no other way to turn off the timer.
|
||||
* Likewise, INTCTRL_K is removed and re-added during device
|
||||
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
|
||||
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
|
||||
* is always claimed as an "active interrupt" so we can query that bit
|
||||
* to know our current state.
|
||||
*/
|
||||
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
||||
#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
|
||||
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
|
||||
extern unsigned int debug_smp_processor_id(void);
|
||||
# define smp_processor_id() debug_smp_processor_id()
|
||||
#endif
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define arch_local_irq_disable() \
|
||||
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
|
||||
|
||||
/* Disable all interrupts, including NMIs. */
|
||||
#define arch_local_irq_disable_all() \
|
||||
interrupt_mask_set_mask(-1ULL)
|
||||
|
||||
/*
|
||||
* Read the set of maskable interrupts.
|
||||
* We avoid the preemption warning here via raw_cpu_ptr since even
|
||||
* if irqs are already enabled, it's harmless to read the wrong cpu's
|
||||
* enabled mask.
|
||||
*/
|
||||
#define arch_local_irqs_enabled() \
|
||||
(*raw_cpu_ptr(&interrupts_enabled_mask))
|
||||
|
||||
/* Re-enable all maskable interrupts. */
|
||||
#define arch_local_irq_enable() \
|
||||
interrupt_mask_reset_mask(arch_local_irqs_enabled())
|
||||
|
||||
/* Disable or enable interrupts based on flag argument. */
|
||||
#define arch_local_irq_restore(disabled) do { \
|
||||
if (disabled) \
|
||||
arch_local_irq_disable(); \
|
||||
else \
|
||||
arch_local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
/* Return true if "flags" argument means interrupts are disabled. */
|
||||
#define arch_irqs_disabled_flags(flags) ((flags) != 0)
|
||||
|
||||
/* Return true if interrupts are currently disabled. */
|
||||
#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
|
||||
|
||||
/* Save whether interrupts are currently disabled. */
|
||||
#define arch_local_save_flags() arch_irqs_disabled()
|
||||
|
||||
/* Save whether interrupts are currently disabled, then disable them. */
|
||||
#define arch_local_irq_save() ({ \
|
||||
unsigned long __flags = arch_local_save_flags(); \
|
||||
arch_local_irq_disable(); \
|
||||
__flags; })
|
||||
|
||||
/* Prevent the given interrupt from being enabled next time we enable irqs. */
|
||||
#define arch_local_irq_mask(interrupt) \
|
||||
this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
|
||||
|
||||
/* Prevent the given interrupt from being enabled immediately. */
|
||||
#define arch_local_irq_mask_now(interrupt) do { \
|
||||
arch_local_irq_mask(interrupt); \
|
||||
interrupt_mask_set(interrupt); \
|
||||
} while (0)
|
||||
|
||||
/* Allow the given interrupt to be enabled next time we enable irqs. */
|
||||
#define arch_local_irq_unmask(interrupt) \
|
||||
this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
|
||||
|
||||
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
|
||||
#define arch_local_irq_unmask_now(interrupt) do { \
|
||||
arch_local_irq_unmask(interrupt); \
|
||||
if (!irqs_disabled()) \
|
||||
interrupt_mask_reset(interrupt); \
|
||||
} while (0)
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/* We provide a somewhat more restricted set for assembly. */
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
#if INT_MEM_ERROR != 0
|
||||
# error Fix IRQS_DISABLED() macro
|
||||
#endif
|
||||
|
||||
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, SPR_INTERRUPT_MASK_K; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, hw2_last(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
||||
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
||||
ld tmp0, tmp0
|
||||
#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
/*
|
||||
* Return 0 or 1 to indicate whether interrupts are currently disabled.
|
||||
* Note that it's important that we use a bit from the "low" mask word,
|
||||
* since when we are enabling, that is the word we write first, so if we
|
||||
* are interrupted after only writing half of the mask, the interrupt
|
||||
* handler will correctly observe that we have interrupts enabled, and
|
||||
* will enable interrupts itself on return from the interrupt handler
|
||||
* (making the original code's write of the "high" mask word idempotent).
|
||||
*/
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
|
||||
shri tmp, tmp, INT_MEM_ERROR; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, lo16(interrupts_enabled_mask); \
|
||||
auli reg, reg, ha16(interrupts_enabled_mask); \
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
||||
{ \
|
||||
movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
|
||||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
||||
}; \
|
||||
{ \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
|
||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
||||
}; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
|
||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
||||
{ \
|
||||
lw tmp0, tmp0; \
|
||||
addi tmp1, tmp0, 4 \
|
||||
}; \
|
||||
lw tmp1, tmp1
|
||||
#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
|
||||
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
|
||||
#endif
|
||||
|
||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
||||
IRQ_ENABLE_LOAD(tmp0, tmp1); \
|
||||
IRQ_ENABLE_APPLY(tmp0, tmp1)
|
||||
|
||||
/*
|
||||
* Do the CPU's IRQ-state tracing from assembly code. We call a
|
||||
* C function, but almost everywhere we do, we don't mind clobbering
|
||||
* all the caller-saved registers.
|
||||
*/
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
# define TRACE_IRQS_ON jal trace_hardirqs_on
|
||||
# define TRACE_IRQS_OFF jal trace_hardirqs_off
|
||||
#else
|
||||
# define TRACE_IRQS_ON
|
||||
# define TRACE_IRQS_OFF
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_IRQFLAGS_H */
|
||||
28
arch/tile/include/asm/kdebug.h
Normal file
28
arch/tile/include/asm/kdebug.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KDEBUG_H
|
||||
#define _ASM_TILE_KDEBUG_H
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
enum die_val {
|
||||
DIE_OOPS = 1,
|
||||
DIE_BREAK,
|
||||
DIE_SSTEPBP,
|
||||
DIE_PAGE_FAULT,
|
||||
DIE_COMPILED_BPT
|
||||
};
|
||||
|
||||
#endif /* _ASM_TILE_KDEBUG_H */
|
||||
65
arch/tile/include/asm/kexec.h
Normal file
65
arch/tile/include/asm/kexec.h
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* based on kexec.h from other architectures in linux-2.6.18
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KEXEC_H
|
||||
#define _ASM_TILE_KEXEC_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifndef __tilegx__
|
||||
/* Maximum physical address we can use pages from. */
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
|
||||
/* Maximum address we can reach in physical address mode. */
|
||||
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
|
||||
/* Maximum address we can use for the control code buffer. */
|
||||
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
|
||||
#else
|
||||
/* We need to limit the memory below PGDIR_SIZE since
|
||||
* we only setup page table for [0, PGDIR_SIZE) before final kexec.
|
||||
*/
|
||||
/* Maximum physical address we can use pages from. */
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
|
||||
/* Maximum address we can reach in physical address mode. */
|
||||
#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
|
||||
/* Maximum address we can use for the control code buffer. */
|
||||
#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
|
||||
#endif
|
||||
|
||||
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* We don't bother to provide a unique identifier, since we can only
|
||||
* reboot with a single type of kernel image anyway.
|
||||
*/
|
||||
#define KEXEC_ARCH KEXEC_ARCH_DEFAULT
|
||||
|
||||
/* Use the tile override for the page allocator. */
|
||||
struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
||||
#define kimage_alloc_pages_arch kimage_alloc_pages_arch
|
||||
|
||||
#define MAX_NOTE_BYTES 1024
|
||||
|
||||
/* Defined in arch/tile/kernel/relocate_kernel.S */
|
||||
extern const unsigned char relocate_new_kernel[];
|
||||
extern const unsigned long relocate_new_kernel_size;
|
||||
extern void relocate_new_kernel_end(void);
|
||||
|
||||
/* Provide a dummy definition to avoid build failures. */
|
||||
static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_KEXEC_H */
|
||||
71
arch/tile/include/asm/kgdb.h
Normal file
71
arch/tile/include/asm/kgdb.h
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE-Gx KGDB support.
|
||||
*/
|
||||
|
||||
#ifndef __TILE_KGDB_H__
|
||||
#define __TILE_KGDB_H__
|
||||
|
||||
#include <linux/kdebug.h>
|
||||
#include <arch/opcode.h>
|
||||
|
||||
#define GDB_SIZEOF_REG sizeof(unsigned long)
|
||||
|
||||
/*
|
||||
* TILE-Gx gdb is expecting the following register layout:
|
||||
* 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
|
||||
* plus the PC and the faultnum.
|
||||
*
|
||||
* Even though kernel not use the 8 special GPRs, they need to be present
|
||||
* in the registers sent for correct processing in the host-side gdb.
|
||||
*
|
||||
*/
|
||||
#define DBG_MAX_REG_NUM (56+8+2)
|
||||
#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
|
||||
|
||||
/*
|
||||
* BUFMAX defines the maximum number of characters in inbound/outbound
|
||||
* buffers at least NUMREGBYTES*2 are needed for register packets,
|
||||
* Longer buffer is needed to list all threads.
|
||||
*/
|
||||
#define BUFMAX 2048
|
||||
|
||||
#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
|
||||
|
||||
/*
|
||||
* Require cache flush for set/clear a software breakpoint or write memory.
|
||||
*/
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
|
||||
/*
|
||||
* The compiled-in breakpoint instruction can be used to "break" into
|
||||
* the debugger via magic system request key (sysrq-G).
|
||||
*/
|
||||
static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
|
||||
|
||||
enum tilegx_regnum {
|
||||
TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
|
||||
TILEGX_FAULTNUM_REGNUM,
|
||||
};
|
||||
|
||||
/*
|
||||
* Generate a breakpoint exception to "break" into the debugger.
|
||||
*/
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm volatile (".quad %0\n\t"
|
||||
::""(compiled_bpt));
|
||||
}
|
||||
|
||||
#endif /* __TILE_KGDB_H__ */
|
||||
28
arch/tile/include/asm/kmap_types.h
Normal file
28
arch/tile/include/asm/kmap_types.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KMAP_TYPES_H
|
||||
#define _ASM_TILE_KMAP_TYPES_H
|
||||
|
||||
/*
|
||||
* In 32-bit TILE Linux we have to balance the desire to have a lot of
|
||||
* nested atomic mappings with the fact that large page sizes and many
|
||||
* processors chew up address space quickly. In a typical
|
||||
* 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
|
||||
* adds 4MB of required address-space. For now we leave KM_TYPE_NR
|
||||
* set to depth 8.
|
||||
*/
|
||||
#define KM_TYPE_NR 8
|
||||
|
||||
#endif /* _ASM_TILE_KMAP_TYPES_H */
|
||||
79
arch/tile/include/asm/kprobes.h
Normal file
79
arch/tile/include/asm/kprobes.h
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* arch/tile/include/asm/kprobes.h
|
||||
*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KPROBES_H
|
||||
#define _ASM_TILE_KPROBES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
typedef tile_bundle_bits kprobe_opcode_t;
|
||||
|
||||
#define flush_insn_slot(p) \
|
||||
flush_icache_range((unsigned long)p->addr, \
|
||||
(unsigned long)p->addr + \
|
||||
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
|
||||
|
||||
struct kprobe;
|
||||
|
||||
/* Architecture specific copy of original instruction. */
|
||||
struct arch_specific_insn {
|
||||
kprobe_opcode_t *insn;
|
||||
};
|
||||
|
||||
struct prev_kprobe {
|
||||
struct kprobe *kp;
|
||||
unsigned long status;
|
||||
unsigned long saved_pc;
|
||||
};
|
||||
|
||||
#define MAX_JPROBES_STACK_SIZE 128
|
||||
#define MAX_JPROBES_STACK_ADDR \
|
||||
(((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
|
||||
- sizeof(struct pt_regs))
|
||||
|
||||
#define MIN_JPROBES_STACK_SIZE(ADDR) \
|
||||
((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
|
||||
? MAX_JPROBES_STACK_ADDR - (ADDR) \
|
||||
: MAX_JPROBES_STACK_SIZE)
|
||||
|
||||
/* per-cpu kprobe control block. */
|
||||
struct kprobe_ctlblk {
|
||||
unsigned long kprobe_status;
|
||||
unsigned long kprobe_saved_pc;
|
||||
unsigned long jprobe_saved_sp;
|
||||
struct prev_kprobe prev_kprobe;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
char jprobes_stack[MAX_JPROBES_STACK_SIZE];
|
||||
};
|
||||
|
||||
extern tile_bundle_bits breakpoint2_insn;
|
||||
extern tile_bundle_bits breakpoint_insn;
|
||||
|
||||
void arch_remove_kprobe(struct kprobe *);
|
||||
|
||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
|
||||
#endif /* _ASM_TILE_KPROBES_H */
|
||||
51
arch/tile/include/asm/linkage.h
Normal file
51
arch/tile/include/asm/linkage.h
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_LINKAGE_H
|
||||
#define _ASM_TILE_LINKAGE_H
|
||||
|
||||
#include <feedback.h>
|
||||
|
||||
#define __ALIGN .align 8
|
||||
|
||||
/*
|
||||
* The STD_ENTRY and STD_ENDPROC macros put the function in a
|
||||
* self-named .text.foo section, and if linker feedback collection
|
||||
* is enabled, add a suitable call to the feedback collection code.
|
||||
* STD_ENTRY_SECTION lets you specify a non-standard section name.
|
||||
*/
|
||||
|
||||
#define STD_ENTRY(name) \
|
||||
.pushsection .text.##name, "ax"; \
|
||||
ENTRY(name); \
|
||||
FEEDBACK_ENTER(name)
|
||||
|
||||
#define STD_ENTRY_SECTION(name, section) \
|
||||
.pushsection section, "ax"; \
|
||||
ENTRY(name); \
|
||||
FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name)
|
||||
|
||||
#define STD_ENDPROC(name) \
|
||||
ENDPROC(name); \
|
||||
.Lend_##name:; \
|
||||
.popsection
|
||||
|
||||
/* Create a file-static function entry set up for feedback gathering. */
|
||||
#define STD_ENTRY_LOCAL(name) \
|
||||
.pushsection .text.##name, "ax"; \
|
||||
ALIGN; \
|
||||
name:; \
|
||||
FEEDBACK_ENTER(name)
|
||||
|
||||
#endif /* _ASM_TILE_LINKAGE_H */
|
||||
32
arch/tile/include/asm/mmu.h
Normal file
32
arch/tile/include/asm/mmu.h
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMU_H
|
||||
#define _ASM_TILE_MMU_H
|
||||
|
||||
/* Capture any arch- and mm-specific information. */
|
||||
struct mm_context {
|
||||
/*
|
||||
* Written under the mmap_sem semaphore; read without the
|
||||
* semaphore but atomically, but it is conservatively set.
|
||||
*/
|
||||
unsigned long priority_cached;
|
||||
unsigned long vdso_base;
|
||||
};
|
||||
|
||||
typedef struct mm_context mm_context_t;
|
||||
|
||||
void leave_mm(int cpu);
|
||||
|
||||
#endif /* _ASM_TILE_MMU_H */
|
||||
135
arch/tile/include/asm/mmu_context.h
Normal file
135
arch/tile/include/asm/mmu_context.h
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMU_CONTEXT_H
|
||||
#define _ASM_TILE_MMU_CONTEXT_H
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
|
||||
* also call hv_install_context().
|
||||
*/
|
||||
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
|
||||
{
|
||||
/* FIXME: DIRECTIO should not always be set. FIXME. */
|
||||
int rc = hv_install_context(__pa(pgdir), prot, asid,
|
||||
HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
|
||||
if (rc < 0)
|
||||
panic("hv_install_context failed: %d", rc);
|
||||
}
|
||||
|
||||
static inline void install_page_table(pgd_t *pgdir, int asid)
|
||||
{
|
||||
pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
|
||||
__install_page_table(pgdir, asid, *ptep);
|
||||
}
|
||||
|
||||
/*
|
||||
* "Lazy" TLB mode is entered when we are switching to a kernel task,
|
||||
* which borrows the mm of the previous task. The goal of this
|
||||
* optimization is to avoid having to install a new page table. On
|
||||
* early x86 machines (where the concept originated) you couldn't do
|
||||
* anything short of a full page table install for invalidation, so
|
||||
* handling a remote TLB invalidate required doing a page table
|
||||
* re-install. Someone clearly decided that it was silly to keep
|
||||
* doing this while in "lazy" TLB mode, so the optimization involves
|
||||
* installing the swapper page table instead the first time one
|
||||
* occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
|
||||
* the kernel task doesn't need to take any more interrupts. At that
|
||||
* point it's then necessary to explicitly reinstall it when context
|
||||
* switching back to the original mm.
|
||||
*
|
||||
* On Tile, we have to do a page-table install whenever DMA is enabled,
|
||||
* so in that case lazy mode doesn't help anyway. And more generally,
|
||||
* we have efficient per-page TLB shootdown, and don't expect to spend
|
||||
* that much time in kernel tasks in general, so just leaving the
|
||||
* kernel task borrowing the old page table, but handling TLB
|
||||
* shootdowns, is a reasonable thing to do. And importantly, this
|
||||
* lets us use the hypervisor's internal APIs for TLB shootdown, which
|
||||
* means we don't have to worry about having TLB shootdowns blocked
|
||||
* when Linux is disabling interrupts; see the page migration code for
|
||||
* an example of where it's important for TLB shootdowns to complete
|
||||
* even when interrupts are disabled at the Linux level.
|
||||
*/
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
|
||||
{
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/*
|
||||
* We have to do an "identity" page table switch in order to
|
||||
* clear any pending DMA interrupts.
|
||||
*/
|
||||
if (current->thread.tile_dma_state.enabled)
|
||||
install_page_table(mm->pgd, __this_cpu_read(current_asid));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (likely(prev != next)) {
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* Pick new ASID. */
|
||||
int asid = __this_cpu_read(current_asid) + 1;
|
||||
if (asid > max_asid) {
|
||||
asid = min_asid;
|
||||
local_flush_tlb();
|
||||
}
|
||||
__this_cpu_write(current_asid, asid);
|
||||
|
||||
/* Clear cpu from the old mm, and set it in the new one. */
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/* Re-load page tables */
|
||||
install_page_table(next->pgd, asid);
|
||||
|
||||
/* See how we should set the red/black cache info */
|
||||
check_mm_caching(prev, next);
|
||||
|
||||
/*
|
||||
* Since we're changing to a new mm, we have to flush
|
||||
* the icache in case some physical page now being mapped
|
||||
* has subsequently been repurposed and has new code.
|
||||
*/
|
||||
__flush_icache();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
struct mm_struct *next_mm)
|
||||
{
|
||||
switch_mm(prev_mm, next_mm, NULL);
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while (0)
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#endif /* _ASM_TILE_MMU_CONTEXT_H */
|
||||
70
arch/tile/include/asm/mmzone.h
Normal file
70
arch/tile/include/asm/mmzone.h
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMZONE_H
|
||||
#define _ASM_TILE_MMZONE_H
|
||||
|
||||
extern struct pglist_data node_data[];
|
||||
#define NODE_DATA(nid) (&node_data[nid])
|
||||
|
||||
extern void get_memcfg_numa(void);
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* Generally, memory ranges are always doled out by the hypervisor in
|
||||
* fixed-size, power-of-two increments. That would make computing the node
|
||||
* very easy. We could just take a couple high bits of the PA, which
|
||||
* denote the memory shim, and we'd be done. However, when we're doing
|
||||
* memory striping, this may not be true; PAs with different high bit
|
||||
* values might be in the same node. Thus, we keep a lookup table to
|
||||
* translate the high bits of the PFN to the node number.
|
||||
*/
|
||||
extern int highbits_to_node[];
|
||||
|
||||
static inline int pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
return highbits_to_node[__pfn_to_highbits(pfn)];
|
||||
}
|
||||
|
||||
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
int nid = pfn_to_nid(pfn);
|
||||
|
||||
if (nid >= 0)
|
||||
return (pfn < node_end_pfn(nid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Information on the NUMA nodes that we compute early */
|
||||
extern unsigned long node_start_pfn[];
|
||||
extern unsigned long node_end_pfn[];
|
||||
extern unsigned long node_memmap_pfn[];
|
||||
extern unsigned long node_percpu_pfn[];
|
||||
extern unsigned long node_free_pfn[];
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern unsigned long node_lowmem_end_pfn[];
|
||||
#endif
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned long pci_reserve_start_pfn;
|
||||
extern unsigned long pci_reserve_end_pfn;
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#endif /* _ASM_TILE_MMZONE_H */
|
||||
40
arch/tile/include/asm/module.h
Normal file
40
arch/tile/include/asm/module.h
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MODULE_H
|
||||
#define _ASM_TILE_MODULE_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#include <asm-generic/module.h>
|
||||
|
||||
/* We can't use modules built with different page sizes. */
|
||||
#if defined(CONFIG_PAGE_SIZE_16KB)
|
||||
# define MODULE_PGSZ " 16KB"
|
||||
#elif defined(CONFIG_PAGE_SIZE_64KB)
|
||||
# define MODULE_PGSZ " 64KB"
|
||||
#else
|
||||
# define MODULE_PGSZ ""
|
||||
#endif
|
||||
|
||||
/* We don't really support no-SMP so tag if someone tries. */
|
||||
#ifdef CONFIG_SMP
|
||||
#define MODULE_NOSMP ""
|
||||
#else
|
||||
#define MODULE_NOSMP " nosmp"
|
||||
#endif
|
||||
|
||||
#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
|
||||
|
||||
#endif /* _ASM_TILE_MODULE_H */
|
||||
333
arch/tile/include/asm/page.h
Normal file
333
arch/tile/include/asm/page.h
Normal file
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PAGE_H
|
||||
#define _ASM_TILE_PAGE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
|
||||
#if defined(CONFIG_PAGE_SIZE_16KB)
|
||||
#define PAGE_SHIFT 14
|
||||
#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
|
||||
#elif defined(CONFIG_PAGE_SIZE_64KB)
|
||||
#define PAGE_SHIFT 16
|
||||
#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
|
||||
#else
|
||||
#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
|
||||
#define CTX_PAGE_FLAG 0
|
||||
#endif
|
||||
#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
|
||||
|
||||
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
||||
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
/*
|
||||
* If the Kconfig doesn't specify, set a maximum zone order that
|
||||
* is enough so that we can create huge pages from small pages given
|
||||
* the respective sizes of the two page types. See <linux/mmzone.h>.
|
||||
*/
|
||||
#ifndef CONFIG_FORCE_MAX_ZONEORDER
|
||||
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct page;
|
||||
|
||||
static inline void clear_page(void *page)
|
||||
{
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void copy_page(void *to, void *from)
|
||||
{
|
||||
memcpy(to, from, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void clear_user_page(void *page, unsigned long vaddr,
|
||||
struct page *pg)
|
||||
{
|
||||
clear_page(page);
|
||||
}
|
||||
|
||||
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
struct page *topage)
|
||||
{
|
||||
copy_page(to, from);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypervisor page tables are made of the same basic structure.
|
||||
*/
|
||||
|
||||
typedef HV_PTE pte_t;
|
||||
typedef HV_PTE pgd_t;
|
||||
typedef HV_PTE pgprot_t;
|
||||
|
||||
/*
|
||||
* User L2 page tables are managed as one L2 page table per page,
|
||||
* because we use the page allocator for them. This keeps the allocation
|
||||
* simple, but it's also inefficient, since L2 page tables are much smaller
|
||||
* than pages (currently 2KB vs 64KB). So we should revisit this.
|
||||
*/
|
||||
typedef struct page *pgtable_t;
|
||||
|
||||
/* Must be a macro since it is used to create constants. */
|
||||
#define __pgprot(val) hv_pte(val)
|
||||
|
||||
/* Rarely-used initializers, typically with a "zero" value. */
|
||||
#define __pte(x) hv_pte(x)
|
||||
#define __pgd(x) hv_pte(x)
|
||||
|
||||
static inline u64 pgprot_val(pgprot_t pgprot)
|
||||
{
|
||||
return hv_pte_val(pgprot);
|
||||
}
|
||||
|
||||
static inline u64 pte_val(pte_t pte)
|
||||
{
|
||||
return hv_pte_val(pte);
|
||||
}
|
||||
|
||||
static inline u64 pgd_val(pgd_t pgd)
|
||||
{
|
||||
return hv_pte_val(pgd);
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
typedef HV_PTE pmd_t;
|
||||
|
||||
#define __pmd(x) hv_pte(x)
|
||||
|
||||
static inline u64 pmd_val(pmd_t pmd)
|
||||
{
|
||||
return hv_pte_val(pmd);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline __attribute_const__ int get_order(unsigned long size)
|
||||
{
|
||||
return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#define HUGE_MAX_HSTATE 6
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#endif
|
||||
|
||||
/* Allow overriding how much VA or PA the kernel will use. */
|
||||
#define MAX_PA_WIDTH CHIP_PA_WIDTH()
|
||||
#define MAX_VA_WIDTH CHIP_VA_WIDTH()
|
||||
|
||||
/* Each memory controller has PAs distinct in their high bits. */
|
||||
#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
|
||||
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
|
||||
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
|
||||
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
/*
|
||||
* We reserve the lower half of memory for user-space programs, and the
|
||||
* upper half for system code. We re-map all of physical memory in the
|
||||
* upper half, which takes a quarter of our VA space. Then we have
|
||||
* the vmalloc regions. The supervisor code lives at the highest address,
|
||||
* with the hypervisor above that.
|
||||
*
|
||||
* Loadable kernel modules are placed immediately after the static
|
||||
* supervisor code, with each being allocated a 256MB region of
|
||||
* address space, so we don't have to worry about the range of "jal"
|
||||
* and other branch instructions.
|
||||
*
|
||||
* For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
|
||||
* Similarly, for now we don't play any struct page mapping games.
|
||||
*/
|
||||
|
||||
#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
|
||||
# error Too much PA to map with the VA available!
|
||||
#endif
|
||||
|
||||
#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
|
||||
#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
|
||||
#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
|
||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
|
||||
#define _VMALLOC_START FIXADDR_TOP
|
||||
#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
|
||||
#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
|
||||
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
/*
|
||||
* A PAGE_OFFSET of 0xC0000000 means that the kernel has
|
||||
* a virtual address space of one gigabyte, which limits the
|
||||
* amount of physical memory you can use to about 768MB.
|
||||
* If you want more physical memory than this then see the CONFIG_HIGHMEM
|
||||
* option in the kernel configuration.
|
||||
*
|
||||
* The top 16MB chunk in the table below is unavailable to Linux. Since
|
||||
* the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
|
||||
* (depending on whether the kernel is at PL2 or Pl1), we map all of the
|
||||
* bottom of RAM at this address with a huge page table entry to minimize
|
||||
* its ITLB footprint (as well as at PAGE_OFFSET). The last architected
|
||||
* requirement is that user interrupt vectors live at 0xfc000000, so we
|
||||
* make that range of memory available to user processes. The remaining
|
||||
* regions are sized as shown; the first four addresses use the PL 1
|
||||
* values, and after that, we show "typical" values, since the actual
|
||||
* addresses depend on kernel #defines.
|
||||
*
|
||||
* MEM_HV_START 0xfe000000
|
||||
* MEM_SV_START (kernel code) 0xfd000000
|
||||
* MEM_USER_INTRPT (user vector) 0xfc000000
|
||||
* FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
|
||||
* PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
|
||||
* VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
|
||||
* mapped LOWMEM 0xc0000000
|
||||
*/
|
||||
|
||||
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
|
||||
#define MEM_SV_START _AC(0xfd000000, UL)
|
||||
#define MEM_HV_START _AC(0xfe000000, UL)
|
||||
|
||||
#define INTRPT_SIZE 0x4000
|
||||
|
||||
/* Tolerate page size larger than the architecture interrupt region size. */
|
||||
#if PAGE_SIZE > INTRPT_SIZE
|
||||
#undef INTRPT_SIZE
|
||||
#define INTRPT_SIZE PAGE_SIZE
|
||||
#endif
|
||||
|
||||
#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
|
||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
|
||||
|
||||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
|
||||
/* On 32-bit architectures we mix kernel modules in with other vmaps. */
|
||||
#define MEM_MODULE_START VMALLOC_START
|
||||
#define MEM_MODULE_END VMALLOC_END
|
||||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
||||
/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
|
||||
extern unsigned long pbase_map[];
|
||||
extern void *vbase_map[];
|
||||
|
||||
static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
|
||||
{
|
||||
unsigned long kaddr = (unsigned long)_kaddr;
|
||||
return pbase_map[kaddr >> HPAGE_SHIFT] +
|
||||
((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
{
|
||||
return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
||||
{
|
||||
unsigned long pfn = kaddr_to_pfn(kaddr);
|
||||
return ((phys_addr_t)pfn << PAGE_SHIFT) +
|
||||
((unsigned long)kaddr & (PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
||||
{
|
||||
return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
|
||||
static inline int virt_addr_valid(const volatile void *kaddr)
|
||||
{
|
||||
extern void *high_memory; /* copied from <linux/mm.h> */
|
||||
return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HIGHMEM */
|
||||
|
||||
static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
|
||||
{
|
||||
return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
{
|
||||
return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
|
||||
}
|
||||
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
||||
{
|
||||
return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
||||
{
|
||||
return (void *)((unsigned long)paddr + PAGE_OFFSET);
|
||||
}
|
||||
|
||||
/* Check that the given address is within some mapped range of PAs. */
|
||||
#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
|
||||
|
||||
#endif /* !CONFIG_HIGHMEM */
|
||||
|
||||
/* All callers are not consistent in how they call these functions. */
|
||||
#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
|
||||
#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
|
||||
|
||||
extern int devmem_is_allowed(unsigned long pagenr);
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return pfn < max_mapnr;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Provide as macros since these require some other headers included. */
|
||||
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
|
||||
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
|
||||
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
||||
|
||||
struct mm_struct;
|
||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
||||
extern pte_t *virt_to_kpte(unsigned long kaddr);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
||||
#endif /* _ASM_TILE_PAGE_H */
|
||||
232
arch/tile/include/asm/pci.h
Normal file
232
arch/tile/include/asm/pci.h
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PCI_H
|
||||
#define _ASM_TILE_PCI_H
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm-generic/pci_iomap.h>
|
||||
|
||||
#ifndef __tilegx__
|
||||
|
||||
/*
|
||||
* Structure of a PCI controller (host bridge)
|
||||
*/
|
||||
struct pci_controller {
|
||||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
int last_busno;
|
||||
|
||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
||||
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
|
||||
|
||||
struct pci_ops *ops;
|
||||
|
||||
int irq_base; /* Base IRQ from the Hypervisor */
|
||||
int plx_gen1; /* flag for PLX Gen 1 configuration */
|
||||
|
||||
/* Address ranges that are routed to this controller/bridge. */
|
||||
struct resource mem_resources[3];
|
||||
};
|
||||
|
||||
/*
|
||||
* This flag tells if the platform is TILEmpower that needs
|
||||
* special configuration for the PLX switch chip.
|
||||
*/
|
||||
extern int tile_plx_gen1;
|
||||
|
||||
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
|
||||
|
||||
#define TILE_NUM_PCIE 2
|
||||
|
||||
/*
|
||||
* The hypervisor maps the entirety of CPA-space as bus addresses, so
|
||||
* bus addresses are physical addresses. The networking and block
|
||||
* device layers use this boolean for bounce buffer decisions.
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS 1
|
||||
|
||||
/* generic pci stuff */
|
||||
#include <asm-generic/pci.h>
|
||||
|
||||
#else
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <gxio/trio.h>
|
||||
|
||||
/**
|
||||
* We reserve the hugepage-size address range at the top of the 64-bit address
|
||||
* space to serve as the PCI window, emulating the BAR0 space of an endpoint
|
||||
* device. This window is used by the chip-to-chip applications running on
|
||||
* the RC node. The reason for carving out this window is that Mem-Maps that
|
||||
* back up this window will not overlap with those that map the real physical
|
||||
* memory.
|
||||
*/
|
||||
#define PCIE_HOST_BAR0_SIZE HPAGE_SIZE
|
||||
#define PCIE_HOST_BAR0_START HPAGE_MASK
|
||||
|
||||
/**
|
||||
* The first PAGE_SIZE of the above "BAR" window is mapped to the
|
||||
* gxpci_host_regs structure.
|
||||
*/
|
||||
#define PCIE_HOST_REGS_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* This is the PCI address where the Mem-Map interrupt regions start.
|
||||
* We use the 2nd to the last huge page of the 64-bit address space.
|
||||
* The last huge page is used for the rootcomplex "bar", for C2C purpose.
|
||||
*/
|
||||
#define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Each Mem-Map interrupt region occupies 4KB.
|
||||
*/
|
||||
#define MEM_MAP_INTR_REGION_SIZE (1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT)
|
||||
|
||||
/*
|
||||
* Allocate the PCI BAR window right below 4GB.
|
||||
*/
|
||||
#define TILE_PCI_BAR_WINDOW_TOP (1ULL << 32)
|
||||
|
||||
/*
|
||||
* Allocate 1GB for the PCI BAR window.
|
||||
*/
|
||||
#define TILE_PCI_BAR_WINDOW_SIZE (1 << 30)
|
||||
|
||||
/*
|
||||
* This is the highest bus address targeting the host memory that
|
||||
* can be generated by legacy PCI devices with 32-bit or less
|
||||
* DMA capability, dictated by the BAR window size and location.
|
||||
*/
|
||||
#define TILE_PCI_MAX_DIRECT_DMA_ADDRESS \
|
||||
(TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1)
|
||||
|
||||
/*
|
||||
* We shift the PCI bus range for all the physical memory up by the whole PA
|
||||
* range. The corresponding CPA of an incoming PCI request will be the PCI
|
||||
* address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies
|
||||
* that the 64-bit capable devices will be given DMA addresses as
|
||||
* the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
|
||||
* devices, we create a separate map region that handles the low
|
||||
* 4GB.
|
||||
*
|
||||
* This design lets us avoid the "PCI hole" problem where the host bridge
|
||||
* won't pass DMA traffic with target addresses that happen to fall within the
|
||||
* BAR space. This enables us to use all the physical memory for DMA, instead
|
||||
* of wasting the same amount of physical memory as the BAR window size.
|
||||
*/
|
||||
#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
|
||||
|
||||
/*
|
||||
* Start of the PCI memory resource, which starts at the end of the
|
||||
* maximum system physical RAM address.
|
||||
*/
|
||||
#define TILE_PCI_MEM_START (1ULL << CHIP_PA_WIDTH())
|
||||
|
||||
/*
|
||||
* Structure of a PCI controller (host bridge) on Gx.
|
||||
*/
|
||||
struct pci_controller {
|
||||
|
||||
/* Pointer back to the TRIO that this PCIe port is connected to. */
|
||||
gxio_trio_context_t *trio;
|
||||
int mac; /* PCIe mac index on the TRIO shim */
|
||||
int trio_index; /* Index of TRIO shim that contains the MAC. */
|
||||
|
||||
int pio_mem_index; /* PIO region index for memory access */
|
||||
|
||||
#ifdef CONFIG_TILE_PCI_IO
|
||||
int pio_io_index; /* PIO region index for I/O space access */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mem-Map regions for all the memory controllers so that Linux can
|
||||
* map all of its physical memory space to the PCI bus.
|
||||
*/
|
||||
int mem_maps[MAX_NUMNODES];
|
||||
|
||||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
/* PCI I/O space resource for this controller. */
|
||||
struct resource io_space;
|
||||
char io_space_name[32];
|
||||
|
||||
/* PCI memory space resource for this controller. */
|
||||
struct resource mem_space;
|
||||
char mem_space_name[32];
|
||||
|
||||
uint64_t mem_offset; /* cpu->bus memory mapping offset. */
|
||||
|
||||
int first_busno;
|
||||
|
||||
struct pci_ops *ops;
|
||||
|
||||
/* Table that maps the INTx numbers to Linux irq numbers. */
|
||||
int irq_intx_table[4];
|
||||
};
|
||||
|
||||
extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
|
||||
extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
|
||||
extern int num_trio_shims;
|
||||
|
||||
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
|
||||
|
||||
/*
|
||||
* The PCI address space does not equal the physical memory address
|
||||
* space (we have an IOMMU). The IDE and SCSI device layers use this
|
||||
* boolean for bounce buffer decisions.
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS 0
|
||||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
int __init tile_pci_init(void);
|
||||
int __init pcibios_init(void);
|
||||
|
||||
void pcibios_fixup_bus(struct pci_bus *bus);
|
||||
|
||||
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
|
||||
|
||||
/*
|
||||
* This decides whether to display the domain number in /proc.
|
||||
*/
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* pcibios_assign_all_busses() tells whether or not the bus numbers
|
||||
* should be reassigned, in case the BIOS didn't do it correctly, or
|
||||
* in case we don't have a BIOS and we want to let Linux do it.
|
||||
*/
|
||||
static inline int pcibios_assign_all_busses(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define PCIBIOS_MIN_MEM 0
|
||||
/* Minimum PCI I/O address, starting at the page boundary. */
|
||||
#define PCIBIOS_MIN_IO PAGE_SIZE
|
||||
|
||||
/* Use any cpu for PCI. */
|
||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
||||
|
||||
/* implement the pci_ DMA API in terms of the generic device dma_ one */
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
||||
#endif /* _ASM_TILE_PCI_H */
|
||||
52
arch/tile/include/asm/percpu.h
Normal file
52
arch/tile/include/asm/percpu.h
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PERCPU_H
|
||||
#define _ASM_TILE_PERCPU_H
|
||||
|
||||
register unsigned long my_cpu_offset_reg asm("tp");
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* For full preemption, we can't just use the register variable
|
||||
* directly, since we need barrier() to hazard against it, causing the
|
||||
* compiler to reload anything computed from a previous "tp" value.
|
||||
* But we also don't want to use volatile asm, since we'd like the
|
||||
* compiler to be able to cache the value across multiple percpu reads.
|
||||
* So we use a fake stack read as a hazard against barrier().
|
||||
* The 'U' constraint is like 'm' but disallows postincrement.
|
||||
*/
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long tp;
|
||||
register unsigned long *sp asm("sp");
|
||||
asm("move %0, tp" : "=r" (tp) : "U" (*sp));
|
||||
return tp;
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset()
|
||||
#else
|
||||
/*
|
||||
* We don't need to hazard against barrier() since "tp" doesn't ever
|
||||
* change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
|
||||
* changes at function call points, at which we are already re-reading
|
||||
* the value of "tp" due to "my_cpu_offset_reg" being a global variable.
|
||||
*/
|
||||
#define __my_cpu_offset my_cpu_offset_reg
|
||||
#endif
|
||||
|
||||
#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* _ASM_TILE_PERCPU_H */
|
||||
22
arch/tile/include/asm/perf_event.h
Normal file
22
arch/tile/include/asm/perf_event.h
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright 2014 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PERF_EVENT_H
|
||||
#define _ASM_TILE_PERF_EVENT_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
DECLARE_PER_CPU(u64, perf_irqs);
|
||||
|
||||
unsigned long handle_syscall_link_address(void);
|
||||
#endif /* _ASM_TILE_PERF_EVENT_H */
|
||||
164
arch/tile/include/asm/pgalloc.h
Normal file
164
arch/tile/include/asm/pgalloc.h
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGALLOC_H
|
||||
#define _ASM_TILE_PGALLOC_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Bits for the size of the second-level page table. */
|
||||
#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
||||
|
||||
/* How big is a kernel L2 page table? */
|
||||
#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
|
||||
|
||||
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
|
||||
#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
|
||||
#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
|
||||
#endif
|
||||
|
||||
/* How many pages do we need, as an "order", for a user L2 page table? */
|
||||
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
set_pte(pmdp, pmd);
|
||||
#else
|
||||
set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
||||
pmd_t *pmd, pte_t *ptep)
|
||||
{
|
||||
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
|
||||
__pgprot(_PAGE_PRESENT)));
|
||||
}
|
||||
|
||||
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
||||
pgtable_t page)
|
||||
{
|
||||
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
|
||||
__pgprot(_PAGE_PRESENT)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and free page tables.
|
||||
*/
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
|
||||
int order);
|
||||
extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
|
||||
|
||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
||||
{
|
||||
pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
|
||||
}
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline pte_t *
|
||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
|
||||
}
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
|
||||
pte_free(mm, virt_to_page(pte));
|
||||
}
|
||||
|
||||
extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
||||
unsigned long address, int order);
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
||||
unsigned long address)
|
||||
{
|
||||
__pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
/*
|
||||
* Get the small-page pte_t lowmem entry for a given pfn.
|
||||
* This may or may not be in use, depending on whether the initial
|
||||
* huge-page entry for the page has already been shattered.
|
||||
*/
|
||||
pte_t *get_prealloc_pte(unsigned long pfn);
|
||||
|
||||
/* During init, we can shatter kernel huge pages if needed. */
|
||||
void shatter_pmd(pmd_t *pmd);
|
||||
|
||||
/* After init, a more complex technique is required. */
|
||||
void shatter_huge_page(unsigned long addr);
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
#define pud_populate(mm, pud, pmd) \
|
||||
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
|
||||
|
||||
/* Bits for the size of the L1 (intermediate) page table. */
|
||||
#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
|
||||
|
||||
/* How big is a kernel L2 page table? */
|
||||
#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
|
||||
|
||||
/* We currently allocate L1 page tables by page. */
|
||||
#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
|
||||
#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
|
||||
#endif
|
||||
|
||||
/* How many pages do we need, as an "order", for an L1 page table? */
|
||||
#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
|
||||
return (pmd_t *)page_to_virt(p);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
||||
{
|
||||
pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
|
||||
}
|
||||
|
||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
||||
unsigned long address)
|
||||
{
|
||||
__pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
|
||||
L1_USER_PGTABLE_ORDER);
|
||||
}
|
||||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGALLOC_H */
|
||||
541
arch/tile/include/asm/pgtable.h
Normal file
541
arch/tile/include/asm/pgtable.h
Normal file
|
|
@ -0,0 +1,541 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This file contains the functions and defines necessary to modify and use
|
||||
* the TILE page table tree.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGTABLE_H
|
||||
#define _ASM_TILE_PGTABLE_H
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
extern pgd_t swapper_pg_dir[];
|
||||
extern pgprot_t swapper_pgprot;
|
||||
extern struct kmem_cache *pgd_cache;
|
||||
extern spinlock_t pgd_lock;
|
||||
extern struct list_head pgd_list;
|
||||
|
||||
/*
|
||||
* The very last slots in the pgd_t are for addresses unusable by Linux
|
||||
* (pgd_addr_invalid() returns true). So we use them for the list structure.
|
||||
* The x86 code we are modelled on uses the page->private/index fields
|
||||
* (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
|
||||
* our pgds are so much smaller than a page, it seems a waste to
|
||||
* spend a whole page on each pgd.
|
||||
*/
|
||||
#define PGD_LIST_OFFSET \
|
||||
((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
|
||||
#define pgd_to_list(pgd) \
|
||||
((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
|
||||
#define list_to_pgd(list) \
|
||||
((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
|
||||
|
||||
extern void pgtable_cache_init(void);
|
||||
extern void paging_init(void);
|
||||
extern void set_page_homes(void);
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
#define _PAGE_PRESENT HV_PTE_PRESENT
|
||||
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
|
||||
#define _PAGE_SUPER_PAGE HV_PTE_SUPER
|
||||
#define _PAGE_READABLE HV_PTE_READABLE
|
||||
#define _PAGE_WRITABLE HV_PTE_WRITABLE
|
||||
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
|
||||
#define _PAGE_ACCESSED HV_PTE_ACCESSED
|
||||
#define _PAGE_DIRTY HV_PTE_DIRTY
|
||||
#define _PAGE_GLOBAL HV_PTE_GLOBAL
|
||||
#define _PAGE_USER HV_PTE_USER
|
||||
|
||||
/*
|
||||
* All the "standard" bits. Cache-control bits are managed elsewhere.
|
||||
* This is used to test for valid level-2 page table pointers by checking
|
||||
* all the bits, and to mask away the cache control bits for mprotect.
|
||||
*/
|
||||
#define _PAGE_ALL (\
|
||||
_PAGE_PRESENT | \
|
||||
_PAGE_HUGE_PAGE | \
|
||||
_PAGE_SUPER_PAGE | \
|
||||
_PAGE_READABLE | \
|
||||
_PAGE_WRITABLE | \
|
||||
_PAGE_EXECUTABLE | \
|
||||
_PAGE_ACCESSED | \
|
||||
_PAGE_DIRTY | \
|
||||
_PAGE_GLOBAL | \
|
||||
_PAGE_USER \
|
||||
)
|
||||
|
||||
#define PAGE_NONE \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define PAGE_SHARED \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
||||
_PAGE_USER | _PAGE_ACCESSED)
|
||||
|
||||
#define PAGE_SHARED_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
||||
_PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
|
||||
#define PAGE_COPY_NOEXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
||||
#define PAGE_COPY_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
||||
#define PAGE_COPY \
|
||||
PAGE_COPY_NOEXEC
|
||||
#define PAGE_READONLY \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
||||
#define PAGE_READONLY_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
||||
|
||||
#define _PAGE_KERNEL_RO \
|
||||
(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
|
||||
#define _PAGE_KERNEL \
|
||||
(_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
|
||||
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
|
||||
|
||||
#define page_to_kpgprot(p) PAGE_KERNEL
|
||||
|
||||
/*
|
||||
* We could tighten these up, but for now writable or executable
|
||||
* implies readable.
|
||||
*/
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY /* this is write-only, which we won't support */
|
||||
#define __P011 PAGE_COPY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
||||
/*
|
||||
* All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
|
||||
* and PAGE_HUGE_PAGE, which must be one and zero, respectively.
|
||||
* We set the ignored bits to zero.
|
||||
*/
|
||||
#define _PAGE_TABLE _PAGE_PRESENT
|
||||
|
||||
/* Inherit the caching flags from the old protection bits. */
|
||||
#define pgprot_modify(oldprot, newprot) \
|
||||
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
|
||||
|
||||
/* Just setting the PFN to zero suffices. */
|
||||
#define pte_pgprot(x) hv_pte_set_pa((x), 0)
|
||||
|
||||
/*
|
||||
* For PTEs and PDEs, we must clear the Present bit first when
|
||||
* clearing a page table entry, so clear the bottom half first and
|
||||
* enforce ordering with a barrier.
|
||||
*/
|
||||
static inline void __pte_clear(pte_t *ptep)
|
||||
{
|
||||
#ifdef __tilegx__
|
||||
ptep->val = 0;
|
||||
#else
|
||||
u32 *tmp = (u32 *)ptep;
|
||||
tmp[0] = 0;
|
||||
barrier();
|
||||
tmp[1] = 0;
|
||||
#endif
|
||||
}
|
||||
#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
* Undefined behaviour if not..
|
||||
*/
|
||||
#define pte_present hv_pte_get_present
|
||||
#define pte_mknotpresent hv_pte_clear_present
|
||||
#define pte_user hv_pte_get_user
|
||||
#define pte_read hv_pte_get_readable
|
||||
#define pte_dirty hv_pte_get_dirty
|
||||
#define pte_young hv_pte_get_accessed
|
||||
#define pte_write hv_pte_get_writable
|
||||
#define pte_exec hv_pte_get_executable
|
||||
#define pte_huge hv_pte_get_page
|
||||
#define pte_super hv_pte_get_super
|
||||
#define pte_rdprotect hv_pte_clear_readable
|
||||
#define pte_exprotect hv_pte_clear_executable
|
||||
#define pte_mkclean hv_pte_clear_dirty
|
||||
#define pte_mkold hv_pte_clear_accessed
|
||||
#define pte_wrprotect hv_pte_clear_writable
|
||||
#define pte_mksmall hv_pte_clear_page
|
||||
#define pte_mkread hv_pte_set_readable
|
||||
#define pte_mkexec hv_pte_set_executable
|
||||
#define pte_mkdirty hv_pte_set_dirty
|
||||
#define pte_mkyoung hv_pte_set_accessed
|
||||
#define pte_mkwrite hv_pte_set_writable
|
||||
#define pte_mkhuge hv_pte_set_page
|
||||
#define pte_mksuper hv_pte_set_super
|
||||
|
||||
#define pte_special(pte) 0
|
||||
#define pte_mkspecial(pte) (pte)
|
||||
|
||||
/*
|
||||
* Use some spare bits in the PTE for user-caching tags.
|
||||
*/
|
||||
#define pte_set_forcecache hv_pte_set_client0
|
||||
#define pte_get_forcecache hv_pte_get_client0
|
||||
#define pte_clear_forcecache hv_pte_clear_client0
|
||||
#define pte_set_anyhome hv_pte_set_client1
|
||||
#define pte_get_anyhome hv_pte_get_client1
|
||||
#define pte_clear_anyhome hv_pte_clear_client1
|
||||
|
||||
/*
|
||||
* A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
|
||||
*/
|
||||
#define pte_migrating hv_pte_get_migrating
|
||||
#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
|
||||
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/* Return PA and protection info for a given kernel VA. */
|
||||
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
|
||||
|
||||
/*
|
||||
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
|
||||
* the right order on 32-bit platforms and also allows us to write
|
||||
* hooks to check valid PTEs, etc., if we want.
|
||||
*/
|
||||
void __set_pte(pte_t *ptep, pte_t pte);
|
||||
|
||||
/*
|
||||
* set_pte() sets the given PTE and also sanity-checks the
|
||||
* requested PTE against the page homecaching. Unspecified parts
|
||||
* of the PTE are filled in when it is written to memory, i.e. all
|
||||
* caching attributes if "!forcecache", or the home cpu if "anyhome".
|
||||
*/
|
||||
extern void set_pte(pte_t *ptep, pte_t pte);
|
||||
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
|
||||
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.val;
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return PFN_DOWN(hv_pte_get_pa(pte));
|
||||
}
|
||||
|
||||
/* Set or get the remote cache cpu in a pgprot with remote caching. */
|
||||
extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
|
||||
extern int get_remote_cache_cpu(pgprot_t prot);
|
||||
|
||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return hv_pte_set_pa(prot, PFN_PHYS(pfn));
|
||||
}
|
||||
|
||||
/* Support for priority mappings. */
|
||||
extern void start_mm_caching(struct mm_struct *mm);
|
||||
extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
|
||||
|
||||
/*
|
||||
* Support non-linear file mappings (see sys_remap_file_pages).
|
||||
* This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
|
||||
* file offset in the 32 high bits.
|
||||
*/
|
||||
#define _PAGE_FILE HV_PTE_CLIENT1
|
||||
#define PTE_FILE_MAX_BITS 32
|
||||
#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
|
||||
#define pte_to_pgoff(pte) ((pte).val >> 32)
|
||||
#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
|
||||
|
||||
/*
|
||||
* Encode and de-code a swap entry (see <linux/swapops.h>).
|
||||
* We put the swap file type+offset in the 32 high bits;
|
||||
* I believe we can just leave the low bits clear.
|
||||
*/
|
||||
#define __swp_type(swp) ((swp).val & 0x1f)
|
||||
#define __swp_offset(swp) ((swp).val >> 5)
|
||||
#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
|
||||
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/*
|
||||
* If we are doing an mprotect(), just accept the new vma->vm_page_prot
|
||||
* value and combine it with the PFN from the old PTE to get a new PTE.
|
||||
*/
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
return pfn_pte(pte_pfn(pte), newprot);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
||||
*
|
||||
* This macro returns the index of the entry in the pgd page which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
||||
|
||||
/*
|
||||
* pgd_offset() returns a (pgd_t *)
|
||||
* pgd_index() is used get the offset into the pgd page's array of pgd_t's.
|
||||
*/
|
||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
||||
|
||||
/*
|
||||
* A shortcut which implies the use of the kernel's pgd, instead
|
||||
* of a process's.
|
||||
*/
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
|
||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
|
||||
/* Clear a non-executable kernel PTE and flush it from the TLB. */
|
||||
#define kpte_clear_flush(ptep, vaddr) \
|
||||
do { \
|
||||
pte_clear(&init_mm, (vaddr), (ptep)); \
|
||||
local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The kernel page tables contain what we need, and we flush when we
|
||||
* change specific page table entries.
|
||||
*/
|
||||
#define update_mmu_cache(vma, address, pte) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#endif /* CONFIG_FLATMEM */
|
||||
|
||||
extern void vmalloc_sync_all(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <asm/pgtable_64.h>
|
||||
#else
|
||||
#include <asm/pgtable_32.h>
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* Only check low word on 32-bit platforms, since it might be
|
||||
* out of sync with upper half.
|
||||
*/
|
||||
return (unsigned long)pmd_val(pmd) == 0;
|
||||
}
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
|
||||
}
|
||||
|
||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
||||
{
|
||||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* This function returns the index of the entry in the pmd which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
static inline unsigned long pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
|
||||
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
|
||||
}
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
|
||||
}
|
||||
|
||||
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
||||
{
|
||||
set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
|
||||
}
|
||||
|
||||
#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
|
||||
|
||||
/* Create a pmd from a PTFN. */
|
||||
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
|
||||
{
|
||||
return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
|
||||
}
|
||||
|
||||
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
|
||||
#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
|
||||
|
||||
/*
|
||||
* A given kernel pmd_t maps to a specific virtual address (either a
|
||||
* kernel huge page or a kernel pte_t table). Since kernel pte_t
|
||||
* tables can be aligned at sub-page granularity, this function can
|
||||
* return non-page-aligned pointers, despite its name.
|
||||
*/
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
phys_addr_t pa =
|
||||
(phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
|
||||
return (unsigned long)__va(pa);
|
||||
}
|
||||
|
||||
/*
|
||||
* A pmd_t points to the base of a huge page or to a pte_t array.
|
||||
* If a pte_t array, since we can have multiple per page, we don't
|
||||
* have a one-to-one mapping of pmd_t's to pages. However, this is
|
||||
* OK for pte_lockptr(), since we just end up with potentially one
|
||||
* lock being used for several pte_t arrays.
|
||||
*/
|
||||
#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
__pte_clear(pmdp_ptep(pmdp));
|
||||
}
|
||||
|
||||
#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
|
||||
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
|
||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
||||
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
||||
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
||||
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
||||
#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
|
||||
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
|
||||
#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
|
||||
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
return pfn_pmd(pmd_pfn(pmd), newprot);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define has_transparent_hugepage() 1
|
||||
#define pmd_trans_huge pmd_huge_page
|
||||
|
||||
static inline pmd_t pmd_mksplitting(pmd_t pmd)
|
||||
{
|
||||
return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
|
||||
}
|
||||
|
||||
static inline int pmd_trans_splitting(pmd_t pmd)
|
||||
{
|
||||
return hv_pte_get_client2(pmd_pte(pmd));
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/*
|
||||
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* This macro returns the index of the entry in the pte page which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
static inline unsigned long pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
||||
}
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* Support /proc/NN/pgtable API. */
|
||||
struct seq_file;
|
||||
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
|
||||
unsigned long vaddr, unsigned long pagesize,
|
||||
pte_t *ptep, void **datap);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_H */
|
||||
121
arch/tile/include/asm/pgtable_32.h
Normal file
121
arch/tile/include/asm/pgtable_32.h
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGTABLE_32_H
|
||||
#define _ASM_TILE_PGTABLE_32_H
|
||||
|
||||
/*
|
||||
* The level-1 index is defined by the huge page size. A PGD is composed
|
||||
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
|
||||
*/
|
||||
#define PGDIR_SHIFT HPAGE_SHIFT
|
||||
#define PGDIR_SIZE HPAGE_SIZE
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
|
||||
#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
|
||||
#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* The level-2 index is defined by the difference between the huge
|
||||
* page size and the normal page size. A PTE is composed of
|
||||
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
|
||||
* Note that the hypervisor docs use PTE for what we call pte_t, so
|
||||
* this nomenclature is somewhat confusing.
|
||||
*/
|
||||
#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
|
||||
#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
|
||||
#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Right now we initialize only a single pte table. It can be extended
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*
|
||||
* HOWEVER, if we are using an allocation scheme with slop after the
|
||||
* end of the page table (e.g. where our L2 page tables are 2KB but
|
||||
* our pages are 64KB and we are allocating via the page allocator)
|
||||
* we can't extend it easily.
|
||||
*/
|
||||
#define LAST_PKMAP PTRS_PER_PTE
|
||||
|
||||
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
|
||||
#else
|
||||
# define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Align the vmalloc area to an L2 page table, and leave a guard page
|
||||
* at the beginning and end. The vmalloc code also puts in an internal
|
||||
* guard page between each allocation.
|
||||
*/
|
||||
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
|
||||
extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
|
||||
#define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE)
|
||||
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
|
||||
|
||||
/* This is the maximum possible amount of lowmem. */
|
||||
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
|
||||
|
||||
/* We have no pmd or pud since we are strictly a two-level page table */
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline int pud_huge_page(pud_t pud) { return 0; }
|
||||
|
||||
/* We don't define any pgds for these addresses. */
|
||||
static inline int pgd_addr_invalid(unsigned long addr)
|
||||
{
|
||||
return addr >= MEM_HV_START;
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide versions of these routines that can be used safely when
|
||||
* the hypervisor may be asynchronously modifying dirty/accessed bits.
|
||||
* ptep_get_and_clear() matches the generic one but we provide it to
|
||||
* be parallel with the 64-bit code.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
|
||||
extern int ptep_test_and_clear_young(struct vm_area_struct *,
|
||||
unsigned long addr, pte_t *);
|
||||
extern void ptep_set_wrprotect(struct mm_struct *,
|
||||
unsigned long addr, pte_t *);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = *ptep;
|
||||
pte_clear(_mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* pmds are wrappers around pgds, which are the same as ptes.
|
||||
* It's often convenient to "cast" back and forth and use the pte methods,
|
||||
* which are the methods supplied by the hypervisor.
|
||||
*/
|
||||
#define pmd_pte(pmd) ((pmd).pud.pgd)
|
||||
#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
|
||||
#define pte_pmd(pte) ((pmd_t){ { (pte) } })
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_32_H */
|
||||
171
arch/tile/include/asm/pgtable_64.h
Normal file
171
arch/tile/include/asm/pgtable_64.h
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGTABLE_64_H
|
||||
#define _ASM_TILE_PGTABLE_64_H
|
||||
|
||||
/* The level-0 page table breaks the address space into 32-bit chunks. */
|
||||
#define PGDIR_SHIFT HV_LOG2_L1_SPAN
|
||||
#define PGDIR_SIZE HV_L1_SPAN
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD HV_L0_ENTRIES
|
||||
#define PGD_INDEX(va) HV_L0_INDEX(va)
|
||||
#define SIZEOF_PGD HV_L0_SIZE
|
||||
|
||||
/*
|
||||
* The level-1 index is defined by the huge page size. A PMD is composed
|
||||
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
|
||||
*/
|
||||
#define PMD_SHIFT HPAGE_SHIFT
|
||||
#define PMD_SIZE HPAGE_SIZE
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
|
||||
#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
|
||||
#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* The level-2 index is defined by the difference between the huge
|
||||
* page size and the normal page size. A PTE is composed of
|
||||
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
|
||||
* Note that the hypervisor docs use PTE for what we call pte_t, so
|
||||
* this nomenclature is somewhat confusing.
|
||||
*/
|
||||
#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
|
||||
#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
|
||||
#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Align the vmalloc area to an L2 page table. Omit guard pages at
|
||||
* the beginning and end for simplicity (particularly in the per-cpu
|
||||
* memory allocation code). The vmalloc code puts in an internal
|
||||
* guard page between each allocation.
|
||||
*/
|
||||
#define _VMALLOC_END MEM_SV_START
|
||||
#define VMALLOC_END _VMALLOC_END
|
||||
#define VMALLOC_START _VMALLOC_START
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* We have no pud since we are a three-level page table. */
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
/*
|
||||
* pmds are the same as pgds and ptes, so converting is a no-op.
|
||||
*/
|
||||
#define pmd_pte(pmd) (pmd)
|
||||
#define pmdp_ptep(pmdp) (pmdp)
|
||||
#define pte_pmd(pte) (pte)
|
||||
|
||||
#define pud_pte(pud) ((pud).pgd)
|
||||
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pud_huge_page(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) & _PAGE_HUGE_PAGE;
|
||||
}
|
||||
|
||||
#define pmd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
__pte_clear(&pudp->pgd);
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
|
||||
}
|
||||
|
||||
/* Return the page-table frame number (ptfn) that a pud_t points at. */
|
||||
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
|
||||
|
||||
/* Return the page frame number (pfn) that a pud_t points at. */
|
||||
#define pud_pfn(pud) pte_pfn(pud_pte(pud))
|
||||
|
||||
/*
|
||||
* A given kernel pud_t maps to a kernel pmd_t table at a specific
|
||||
* virtual address. Since kernel pmd_t tables can be aligned at
|
||||
* sub-page granularity, this macro can return non-page-aligned
|
||||
* pointers, despite its name.
|
||||
*/
|
||||
#define pud_page_vaddr(pud) \
|
||||
(__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
|
||||
|
||||
/*
|
||||
* A pud_t points to a pmd_t array. Since we can have multiple per
|
||||
* page, we don't have a one-to-one mapping of pud_t's to pages.
|
||||
*/
|
||||
#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
|
||||
|
||||
static inline unsigned long pud_index(unsigned long address)
|
||||
{
|
||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
||||
}
|
||||
|
||||
#define pmd_offset(pud, address) \
|
||||
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
|
||||
|
||||
/* Normalize an address to having the correct high bits set. */
|
||||
#define pgd_addr_normalize pgd_addr_normalize
|
||||
static inline unsigned long pgd_addr_normalize(unsigned long addr)
|
||||
{
|
||||
return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
|
||||
(CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
|
||||
}
|
||||
|
||||
/* We don't define any pgds for these addresses. */
|
||||
static inline int pgd_addr_invalid(unsigned long addr)
|
||||
{
|
||||
return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use atomic instructions to provide atomicity against the hypervisor.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
|
||||
HV_PTE_INDEX_ACCESSED) & 0x1;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
__insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return hv_pte(__insn_exch(&ptep->val, 0UL));
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_64_H */
|
||||
64
arch/tile/include/asm/pmc.h
Normal file
64
arch/tile/include/asm/pmc.h
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2014 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PMC_H
|
||||
#define _ASM_TILE_PMC_H
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#define TILE_BASE_COUNTERS 2
|
||||
|
||||
/* Bitfields below are derived from SPR PERF_COUNT_CTL*/
|
||||
#ifndef __tilegx__
|
||||
/* PERF_COUNT_CTL on TILEPro */
|
||||
#define TILE_CTL_EXCL_USER (1 << 7) /* exclude user level */
|
||||
#define TILE_CTL_EXCL_KERNEL (1 << 8) /* exclude kernel level */
|
||||
#define TILE_CTL_EXCL_HV (1 << 9) /* exclude hypervisor level */
|
||||
|
||||
#define TILE_SEL_MASK 0x7f /* 7 bits for event SEL,
|
||||
COUNT_0_SEL */
|
||||
#define TILE_PLM_MASK 0x780 /* 4 bits priv level msks,
|
||||
COUNT_0_MASK*/
|
||||
#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_PLM_MASK)
|
||||
|
||||
#else /* __tilegx__*/
|
||||
/* PERF_COUNT_CTL on TILEGx*/
|
||||
#define TILE_CTL_EXCL_USER (1 << 10) /* exclude user level */
|
||||
#define TILE_CTL_EXCL_KERNEL (1 << 11) /* exclude kernel level */
|
||||
#define TILE_CTL_EXCL_HV (1 << 12) /* exclude hypervisor level */
|
||||
|
||||
#define TILE_SEL_MASK 0x3f /* 6 bits for event SEL,
|
||||
COUNT_0_SEL*/
|
||||
#define TILE_BOX_MASK 0x1c0 /* 3 bits box msks,
|
||||
COUNT_0_BOX */
|
||||
#define TILE_PLM_MASK 0x3c00 /* 4 bits priv level msks,
|
||||
COUNT_0_MASK */
|
||||
#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK)
|
||||
#endif /* __tilegx__*/
|
||||
|
||||
/* Takes register and fault number. Returns error to disable the interrupt. */
|
||||
typedef int (*perf_irq_t)(struct pt_regs *, int);
|
||||
|
||||
int userspace_perf_handler(struct pt_regs *regs, int fault);
|
||||
|
||||
perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq);
|
||||
void release_pmc_hardware(void);
|
||||
|
||||
unsigned long pmc_get_overflow(void);
|
||||
void pmc_ack_overflow(unsigned long status);
|
||||
|
||||
void unmask_pmc_interrupts(void);
|
||||
void mask_pmc_interrupts(void);
|
||||
|
||||
#endif /* _ASM_TILE_PMC_H */
|
||||
379
arch/tile/include/asm/processor.h
Normal file
379
arch/tile/include/asm/processor.h
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PROCESSOR_H
|
||||
#define _ASM_TILE_PROCESSOR_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
|
||||
* normally would, due to #include dependencies.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
struct task_struct;
|
||||
struct thread_struct;
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
||||
/*
|
||||
* Default implementation of macro that returns current
|
||||
* instruction pointer ("program counter").
|
||||
*/
|
||||
void *current_text_addr(void);
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* Capture the state of a suspended DMA. */
|
||||
struct tile_dma_state {
|
||||
int enabled;
|
||||
unsigned long src;
|
||||
unsigned long dest;
|
||||
unsigned long strides;
|
||||
unsigned long chunk_size;
|
||||
unsigned long src_chunk;
|
||||
unsigned long dest_chunk;
|
||||
unsigned long byte;
|
||||
unsigned long status;
|
||||
};
|
||||
|
||||
/*
|
||||
* A mask of the DMA status register for selecting only the 'running'
|
||||
* and 'done' bits.
|
||||
*/
|
||||
#define DMA_STATUS_MASK \
|
||||
(SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Track asynchronous TLB events (faults and access violations)
|
||||
* that occur while we are in kernel mode from DMA or the SN processor.
|
||||
*/
|
||||
struct async_tlb {
|
||||
short fault_num; /* original fault number; 0 if none */
|
||||
char is_fault; /* was it a fault (vs an access violation) */
|
||||
char is_write; /* for fault: was it caused by a write? */
|
||||
unsigned long address; /* what address faulted? */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
struct hardwall_info;
|
||||
struct hardwall_task {
|
||||
/* Which hardwall is this task tied to? (or NULL if none) */
|
||||
struct hardwall_info *info;
|
||||
/* Chains this task into the list at info->task_head. */
|
||||
struct list_head list;
|
||||
};
|
||||
#ifdef __tilepro__
|
||||
#define HARDWALL_TYPES 1 /* udn */
|
||||
#else
|
||||
#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct thread_struct {
|
||||
/* kernel stack pointer */
|
||||
unsigned long ksp;
|
||||
/* kernel PC */
|
||||
unsigned long pc;
|
||||
/* starting user stack pointer (for page migration) */
|
||||
unsigned long usp0;
|
||||
/* pid of process that created this one */
|
||||
pid_t creator_pid;
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* DMA info for suspended threads (byte == 0 means no DMA state) */
|
||||
struct tile_dma_state tile_dma_state;
|
||||
#endif
|
||||
/* User EX_CONTEXT registers */
|
||||
unsigned long ex_context[2];
|
||||
/* User SYSTEM_SAVE registers */
|
||||
unsigned long system_save[4];
|
||||
/* User interrupt mask */
|
||||
unsigned long long interrupt_mask;
|
||||
/* User interrupt-control 0 state */
|
||||
unsigned long intctrl_0;
|
||||
/* Is this task currently doing a backtrace? */
|
||||
bool in_backtrace;
|
||||
/* Any other miscellaneous processor state bits */
|
||||
unsigned long proc_status;
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
/* Interrupt base for PL0 interrupts */
|
||||
unsigned long interrupt_vector_base;
|
||||
#endif
|
||||
/* Tile cache retry fifo high-water mark */
|
||||
unsigned long tile_rtf_hwm;
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
/* Data stream prefetch control */
|
||||
unsigned long dstream_pf;
|
||||
#endif
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* Hardwall information for various resources. */
|
||||
struct hardwall_task hardwall[HARDWALL_TYPES];
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* Async DMA TLB fault information */
|
||||
struct async_tlb dma_async_tlb;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Start with "sp" this many bytes below the top of the kernel stack.
|
||||
* This allows us to be cache-aware when handling the initial save
|
||||
* of the pt_regs value to the stack.
|
||||
*/
|
||||
#define STACK_TOP_DELTA 64
|
||||
|
||||
/*
|
||||
* When entering the kernel via a fault, start with the top of the
|
||||
* pt_regs structure this many bytes below the top of the page.
|
||||
* This aligns the pt_regs structure optimally for cache-line access.
|
||||
*/
|
||||
#ifdef __tilegx__
|
||||
#define KSTK_PTREGS_GAP 48
|
||||
#else
|
||||
#define KSTK_PTREGS_GAP 56
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
|
||||
#else
|
||||
#define TASK_SIZE_MAX PAGE_OFFSET
|
||||
#endif
|
||||
|
||||
/* TASK_SIZE and related variables are always checked in "current" context. */
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define COMPAT_TASK_SIZE (1UL << 31)
|
||||
#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
|
||||
COMPAT_TASK_SIZE : TASK_SIZE_MAX)
|
||||
#else
|
||||
#define TASK_SIZE TASK_SIZE_MAX
|
||||
#endif
|
||||
|
||||
#define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
|
||||
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
|
||||
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
|
||||
#define STACK_TOP_MAX TASK_SIZE_MAX
|
||||
|
||||
/*
|
||||
* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's, if it is using bottom-up mapping.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
|
||||
.interrupt_mask = -1ULL \
|
||||
}
|
||||
|
||||
/* Kernel stack top for the task that first boots on this cpu. */
|
||||
DECLARE_PER_CPU(unsigned long, boot_sp);
|
||||
|
||||
/* PC to boot from on this cpu. */
|
||||
DECLARE_PER_CPU(unsigned long, boot_pc);
|
||||
|
||||
/* Do necessary setup to start up a newly executed thread. */
|
||||
static inline void start_thread(struct pt_regs *regs,
|
||||
unsigned long pc, unsigned long usp)
|
||||
{
|
||||
regs->pc = pc;
|
||||
regs->sp = usp;
|
||||
single_step_execve();
|
||||
}
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
/* Nothing for now */
|
||||
}
|
||||
|
||||
extern int do_work_pending(struct pt_regs *regs, u32 flags);
|
||||
|
||||
|
||||
/*
|
||||
* Return saved (kernel) PC of a blocked thread.
|
||||
* Only used in a printk() in kernel/sched/core.c, so don't work too hard.
|
||||
*/
|
||||
#define thread_saved_pc(t) ((t)->thread.pc)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/* Return initial ksp value for given task. */
|
||||
#define task_ksp0(task) \
|
||||
((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
|
||||
|
||||
/* Return some info about the user process TASK. */
|
||||
#define task_pt_regs(task) \
|
||||
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
|
||||
#define current_pt_regs() \
|
||||
((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
|
||||
STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
|
||||
#define task_sp(task) (task_pt_regs(task)->sp)
|
||||
#define task_pc(task) (task_pt_regs(task)->pc)
|
||||
/* Aliases for pc and sp (used in fs/proc/array.c) */
|
||||
#define KSTK_EIP(task) task_pc(task)
|
||||
#define KSTK_ESP(task) task_sp(task)
|
||||
|
||||
/* Fine-grained unaligned JIT support */
|
||||
#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
|
||||
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
|
||||
|
||||
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
|
||||
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
|
||||
|
||||
/* Standard format for printing registers and other word-size data. */
|
||||
#ifdef __tilegx__
|
||||
# define REGFMT "0x%016lx"
|
||||
#else
|
||||
# define REGFMT "0x%08lx"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do some slow action (e.g. read a slow SPR).
|
||||
* Note that this must also have compiler-barrier semantics since
|
||||
* it may be used in a busy loop reading memory.
|
||||
*/
|
||||
static inline void cpu_relax(void)
|
||||
{
|
||||
__insn_mfspr(SPR_PASS);
|
||||
barrier();
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
struct seq_operations;
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
/* Provide information about the chip model. */
|
||||
extern char chip_model[64];
|
||||
|
||||
/* Data on which physical memory controller corresponds to which NUMA node. */
|
||||
extern int node_controller[];
|
||||
|
||||
/* Does the heap allocator return hash-for-home pages by default? */
|
||||
extern int hash_default;
|
||||
|
||||
/* Should kernel stack pages be hash-for-home? */
|
||||
extern int kstack_hash;
|
||||
|
||||
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
|
||||
#define uheap_hash hash_default
|
||||
|
||||
|
||||
/* Are we using huge pages in the TLB for kernel data? */
|
||||
extern int kdata_huge;
|
||||
|
||||
/* Support standard Linux prefetching. */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define prefetch(x) __builtin_prefetch(x)
|
||||
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
|
||||
|
||||
/* Bring a value into the L1D, faulting the TLB if necessary. */
|
||||
#ifdef __tilegx__
|
||||
#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
|
||||
#else
|
||||
#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
|
||||
#endif
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/* Do some slow action (e.g. read a slow SPR). */
|
||||
#define CPU_RELAX mfspr zero, SPR_PASS
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* Assembly code assumes that the PL is in the low bits. */
|
||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
|
||||
# error Fix assembly assumptions about PL
|
||||
#endif
|
||||
|
||||
/* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
|
||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
|
||||
SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
|
||||
SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
|
||||
SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
|
||||
# error Fix assumptions that EX1 macros work for both PL0 and PL1
|
||||
#endif
|
||||
|
||||
/* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
|
||||
#define EX1_PL(ex1) \
|
||||
(((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
|
||||
#define EX1_ICS(ex1) \
|
||||
(((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
|
||||
#define PL_ICS_EX1(pl, ics) \
|
||||
(((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
|
||||
((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
|
||||
|
||||
/*
|
||||
* Provide symbolic constants for PLs.
|
||||
*/
|
||||
#define USER_PL 0
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
#define GUEST_PL 1
|
||||
#endif
|
||||
#define KERNEL_PL CONFIG_KERNEL_PL
|
||||
|
||||
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
|
||||
#ifdef __tilegx__
|
||||
#define CPU_SHIFT 48
|
||||
#if CHIP_VA_WIDTH() > CPU_SHIFT
|
||||
# error Too many VA bits!
|
||||
#endif
|
||||
#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
|
||||
#define raw_smp_processor_id() \
|
||||
((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
|
||||
#define get_current_ksp0() \
|
||||
((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
|
||||
(64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
|
||||
unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
|
||||
__ksp0 | __cpu; \
|
||||
})
|
||||
#else
|
||||
#define LOG2_NR_CPU_IDS 6
|
||||
#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
|
||||
#define raw_smp_processor_id() \
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
|
||||
#define get_current_ksp0() \
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task); \
|
||||
int __cpu = raw_smp_processor_id(); \
|
||||
BUG_ON(__ksp0 & MAX_CPU_ID); \
|
||||
__ksp0 | __cpu; \
|
||||
})
|
||||
#endif
|
||||
#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
|
||||
# error Too many cpus!
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_PROCESSOR_H */
|
||||
97
arch/tile/include/asm/ptrace.h
Normal file
97
arch/tile/include/asm/ptrace.h
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
#ifndef _ASM_TILE_PTRACE_H
|
||||
#define _ASM_TILE_PTRACE_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Benefit from consistent use of "long" on all chips. */
|
||||
typedef unsigned long pt_reg_t;
|
||||
#endif
|
||||
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
|
||||
#define PT_TRACE_MIGRATE PT_EVENT_FLAG(PTRACE_EVENT_MIGRATE)
|
||||
|
||||
/* Flag bits in pt_regs.flags */
|
||||
#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
|
||||
#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
|
||||
#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define regs_return_value(regs) ((regs)->regs[0])
|
||||
#define instruction_pointer(regs) ((regs)->pc)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
#define user_stack_pointer(regs) ((regs)->sp)
|
||||
|
||||
/* Does the process account for user or for system time? */
|
||||
#define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
|
||||
|
||||
/* Fill in a struct pt_regs with the current kernel registers. */
|
||||
struct pt_regs *get_pt_regs(struct pt_regs *);
|
||||
|
||||
/* Trace the current syscall. */
|
||||
extern int do_syscall_trace_enter(struct pt_regs *regs);
|
||||
extern void do_syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
||||
/*
|
||||
* A structure for all single-stepper state.
|
||||
*
|
||||
* Also update defines in assembler section if it changes
|
||||
*/
|
||||
struct single_step_state {
|
||||
/* the page to which we will write hacked-up bundles */
|
||||
void __user *buffer;
|
||||
|
||||
union {
|
||||
int flags;
|
||||
struct {
|
||||
unsigned long is_enabled:1, update:1, update_reg:6;
|
||||
};
|
||||
};
|
||||
|
||||
unsigned long orig_pc; /* the original PC */
|
||||
unsigned long next_pc; /* return PC if no branch (PC + 1) */
|
||||
unsigned long branch_next_pc; /* return PC if we did branch/jump */
|
||||
unsigned long update_value; /* value to restore to update_target */
|
||||
};
|
||||
|
||||
/* Single-step the instruction at regs->pc */
|
||||
extern void single_step_once(struct pt_regs *regs);
|
||||
|
||||
/* Clean up after execve(). */
|
||||
extern void single_step_execve(void);
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
|
||||
#define __ARCH_WANT_COMPAT_SYS_PTRACE
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1
|
||||
#define SINGLESTEP_STATE_MASK_UPDATE 0x2
|
||||
#define SINGLESTEP_STATE_TARGET_LB 2
|
||||
#define SINGLESTEP_STATE_TARGET_UB 7
|
||||
|
||||
#endif /* _ASM_TILE_PTRACE_H */
|
||||
47
arch/tile/include/asm/sections.h
Normal file
47
arch/tile/include/asm/sections.h
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SECTIONS_H
|
||||
#define _ASM_TILE_SECTIONS_H
|
||||
|
||||
#define arch_is_kernel_data arch_is_kernel_data
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
/* Write-once data is writable only till the end of initialization. */
|
||||
extern char __w1data_begin[], __w1data_end[];
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern char vdso32_start[], vdso32_end[];
|
||||
#endif
|
||||
|
||||
/* Not exactly sections, but PC comparison points in the code. */
|
||||
extern char __rt_sigreturn[], __rt_sigreturn_end[];
|
||||
#ifdef __tilegx__
|
||||
extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
|
||||
#else
|
||||
extern char sys_cmpxchg[], __sys_cmpxchg_end[];
|
||||
extern char __sys_cmpxchg_grab_lock[];
|
||||
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
|
||||
#endif
|
||||
|
||||
/* Handle the discontiguity between _sdata and _text. */
|
||||
static inline int arch_is_kernel_data(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)_sdata &&
|
||||
addr < (unsigned long)_end;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_SECTIONS_H */
|
||||
52
arch/tile/include/asm/setup.h
Normal file
52
arch/tile/include/asm/setup.h
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
#ifndef _ASM_TILE_SETUP_H
|
||||
#define _ASM_TILE_SETUP_H
|
||||
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/init.h>
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
/*
|
||||
* Reserved space for vmalloc and iomap - defined in asm/page.h
|
||||
*/
|
||||
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
|
||||
|
||||
int tile_console_write(const char *buf, int count);
|
||||
void early_panic(const char *fmt, ...);
|
||||
|
||||
/* Init-time routine to do tile-specific per-cpu setup. */
|
||||
void setup_cpu(int boot);
|
||||
|
||||
/* User-level DMA management functions */
|
||||
void grant_dma_mpls(void);
|
||||
void restrict_dma_mpls(void);
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* User-level network management functions */
|
||||
void reset_network_state(void);
|
||||
struct task_struct;
|
||||
void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
|
||||
void hardwall_deactivate_all(struct task_struct *task);
|
||||
int hardwall_ipi_valid(int cpu);
|
||||
|
||||
/* Hook hardwall code into changes in affinity. */
|
||||
#define arch_set_cpus_allowed(p, new_mask) do { \
|
||||
if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
|
||||
hardwall_deactivate_all(p); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_SETUP_H */
|
||||
33
arch/tile/include/asm/sigframe.h
Normal file
33
arch/tile/include/asm/sigframe.h
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SIGFRAME_H
|
||||
#define _ASM_TILE_SIGFRAME_H
|
||||
|
||||
/* Indicate that syscall return should not examine r0 */
|
||||
#define INT_SWINT_1_SIGRETURN (~0)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <arch/abi.h>
|
||||
|
||||
struct rt_sigframe {
|
||||
unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
|
||||
struct siginfo info;
|
||||
struct ucontext uc;
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_SIGFRAME_H */
|
||||
29
arch/tile/include/asm/signal.h
Normal file
29
arch/tile/include/asm/signal.h
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
#ifndef _ASM_TILE_SIGNAL_H
|
||||
#define _ASM_TILE_SIGNAL_H
|
||||
|
||||
#include <uapi/asm/signal.h>
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
struct pt_regs;
|
||||
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
|
||||
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
|
||||
void do_signal(struct pt_regs *regs);
|
||||
void signal_fault(const char *type, struct pt_regs *,
|
||||
void __user *frame, int sig);
|
||||
void trace_unhandled_signal(const char *type, struct pt_regs *regs,
|
||||
unsigned long address, int signo);
|
||||
#endif
|
||||
#endif /* _ASM_TILE_SIGNAL_H */
|
||||
138
arch/tile/include/asm/smp.h
Normal file
138
arch/tile/include/asm/smp.h
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SMP_H
|
||||
#define _ASM_TILE_SMP_H
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Set up this tile to support receiving hypervisor messages */
|
||||
void init_messaging(void);
|
||||
|
||||
/* Set up this tile to support receiving device interrupts and IPIs. */
|
||||
void init_per_tile_IRQs(void);
|
||||
|
||||
/* Send a message to processors specified in mask */
|
||||
void send_IPI_many(const struct cpumask *mask, int tag);
|
||||
|
||||
/* Send a message to all but the sending processor */
|
||||
void send_IPI_allbutself(int tag);
|
||||
|
||||
/* Send a message to a specific processor */
|
||||
void send_IPI_single(int dest, int tag);
|
||||
|
||||
/* Process an IPI message */
|
||||
void evaluate_message(int tag);
|
||||
|
||||
/* Boot a secondary cpu */
|
||||
void online_secondary(void);
|
||||
|
||||
/* Topology of the supervisor tile grid, and coordinates of boot processor */
|
||||
extern HV_Topology smp_topology;
|
||||
|
||||
/* Accessors for grid size */
|
||||
#define smp_height (smp_topology.height)
|
||||
#define smp_width (smp_topology.width)
|
||||
|
||||
/* Convenience functions for converting cpu <-> coords. */
|
||||
static inline int cpu_x(int cpu)
|
||||
{
|
||||
return cpu % smp_width;
|
||||
}
|
||||
static inline int cpu_y(int cpu)
|
||||
{
|
||||
return cpu / smp_width;
|
||||
}
|
||||
static inline int xy_to_cpu(int x, int y)
|
||||
{
|
||||
return y * smp_width + x;
|
||||
}
|
||||
|
||||
/* Hypervisor message tags sent via the tile send_IPI*() routines. */
|
||||
#define MSG_TAG_START_CPU 1
|
||||
#define MSG_TAG_STOP_CPU 2
|
||||
#define MSG_TAG_CALL_FUNCTION_MANY 3
|
||||
#define MSG_TAG_CALL_FUNCTION_SINGLE 4
|
||||
|
||||
/* Hook for the generic smp_call_function_many() routine. */
|
||||
static inline void arch_send_call_function_ipi_mask(struct cpumask *mask)
|
||||
{
|
||||
send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY);
|
||||
}
|
||||
|
||||
/* Hook for the generic smp_call_function_single() routine. */
|
||||
static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE);
|
||||
}
|
||||
|
||||
/* Print out the boot string describing which cpus were disabled. */
|
||||
void print_disabled_cpus(void);
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#define smp_master_cpu 0
|
||||
#define smp_height 1
|
||||
#define smp_width 1
|
||||
#define cpu_x(cpu) 0
|
||||
#define cpu_y(cpu) 0
|
||||
#define xy_to_cpu(x, y) 0
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
|
||||
/* Which cpus may be used as the lotar in a page table entry. */
|
||||
extern struct cpumask cpu_lotar_map;
|
||||
#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
|
||||
|
||||
/* Which processors are used for hash-for-home mapping */
|
||||
extern struct cpumask hash_for_home_map;
|
||||
|
||||
/* Which cpus can have their cache flushed by hv_flush_remote(). */
|
||||
extern struct cpumask cpu_cacheable_map;
|
||||
#define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map)
|
||||
|
||||
/* Convert an HV_LOTAR value into a cpu. */
|
||||
static inline int hv_lotar_to_cpu(HV_LOTAR lotar)
|
||||
{
|
||||
return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extension of <linux/cpumask.h> functionality when you just want
|
||||
* to express a mask or suppression or inclusion region without
|
||||
* being too concerned about exactly which cpus are valid in that region.
|
||||
*/
|
||||
int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits);
|
||||
|
||||
#define cpulist_parse_crop(buf, dst) \
|
||||
__cpulist_parse_crop((buf), (dst), NR_CPUS)
|
||||
static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp,
|
||||
int nbits)
|
||||
{
|
||||
return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits);
|
||||
}
|
||||
|
||||
/* Initialize the IPI subsystem. */
|
||||
void ipi_init(void);
|
||||
|
||||
/* Function for start-cpu message to cause us to jump to. */
|
||||
extern unsigned long start_cpu_function_addr;
|
||||
|
||||
#endif /* _ASM_TILE_SMP_H */
|
||||
24
arch/tile/include/asm/spinlock.h
Normal file
24
arch/tile/include/asm/spinlock.h
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SPINLOCK_H
|
||||
#define _ASM_TILE_SPINLOCK_H
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <asm/spinlock_64.h>
|
||||
#else
|
||||
#include <asm/spinlock_32.h>
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_SPINLOCK_H */
|
||||
129
arch/tile/include/asm/spinlock_32.h
Normal file
129
arch/tile/include/asm/spinlock_32.h
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* 32-bit SMP spinlocks.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SPINLOCK_32_H
|
||||
#define _ASM_TILE_SPINLOCK_32_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/page.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* We only use even ticket numbers so the '1' inserted by a tns is
|
||||
* an unambiguous "ticket is busy" flag.
|
||||
*/
|
||||
#define TICKET_QUANTUM 2
|
||||
|
||||
|
||||
/*
|
||||
* SMP ticket spinlocks, allowing only a single CPU anywhere
|
||||
*
|
||||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
/*
|
||||
* Note that even if a new ticket is in the process of being
|
||||
* acquired, so lock->next_ticket is 1, it's still reasonable
|
||||
* to claim the lock is held, since it will be momentarily
|
||||
* if not already. There's no need to wait for a "valid"
|
||||
* lock->next_ticket to become available.
|
||||
*/
|
||||
return lock->next_ticket != lock->current_ticket;
|
||||
}
|
||||
|
||||
void arch_spin_lock(arch_spinlock_t *lock);
|
||||
|
||||
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
|
||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||
|
||||
int arch_spin_trylock(arch_spinlock_t *lock);
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
/* For efficiency, overlap fetching the old ticket with the wmb(). */
|
||||
int old_ticket = lock->current_ticket;
|
||||
wmb(); /* guarantee anything modified under the lock is visible */
|
||||
lock->current_ticket = old_ticket + TICKET_QUANTUM;
|
||||
}
|
||||
|
||||
void arch_spin_unlock_wait(arch_spinlock_t *lock);
|
||||
|
||||
/*
|
||||
* Read-write spinlocks, allowing multiple readers
|
||||
* but only one writer.
|
||||
*
|
||||
* We use a "tns/store-back" technique on a single word to manage
|
||||
* the lock state, looping around to retry if the tns returns 1.
|
||||
*/
|
||||
|
||||
/* Internal layout of the word; do not use. */
|
||||
#define _WR_NEXT_SHIFT 8
|
||||
#define _WR_CURR_SHIFT 16
|
||||
#define _WR_WIDTH 8
|
||||
#define _RD_COUNT_SHIFT 24
|
||||
#define _RD_COUNT_WIDTH 8
|
||||
|
||||
/**
|
||||
* arch_read_can_lock() - would read_trylock() succeed?
|
||||
*/
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_write_can_lock() - would write_trylock() succeed?
|
||||
*/
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return rwlock->lock == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_read_lock() - acquire a read lock.
|
||||
*/
|
||||
void arch_read_lock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_write_lock() - acquire a write lock.
|
||||
*/
|
||||
void arch_write_lock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_read_trylock() - try to acquire a read lock.
|
||||
*/
|
||||
int arch_read_trylock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_write_trylock() - try to acquire a write lock.
|
||||
*/
|
||||
int arch_write_trylock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_read_unlock() - release a read lock.
|
||||
*/
|
||||
void arch_read_unlock(arch_rwlock_t *rwlock);
|
||||
|
||||
/**
|
||||
* arch_write_unlock() - release a write lock.
|
||||
*/
|
||||
void arch_write_unlock(arch_rwlock_t *rwlock);
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
#endif /* _ASM_TILE_SPINLOCK_32_H */
|
||||
161
arch/tile/include/asm/spinlock_64.h
Normal file
161
arch/tile/include/asm/spinlock_64.h
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
|
||||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SPINLOCK_64_H
|
||||
#define _ASM_TILE_SPINLOCK_64_H
|
||||
|
||||
/* Shifts and masks for the various fields in "lock". */
|
||||
#define __ARCH_SPIN_CURRENT_SHIFT 17
|
||||
#define __ARCH_SPIN_NEXT_MASK 0x7fff
|
||||
#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
|
||||
|
||||
/*
|
||||
* Return the "current" portion of a ticket lock value,
|
||||
* i.e. the number that currently owns the lock.
|
||||
*/
|
||||
static inline u32 arch_spin_current(u32 val)
|
||||
{
|
||||
return val >> __ARCH_SPIN_CURRENT_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the "next" portion of a ticket lock value,
|
||||
* i.e. the number that the next task to try to acquire the lock will get.
|
||||
*/
|
||||
static inline u32 arch_spin_next(u32 val)
|
||||
{
|
||||
return val & __ARCH_SPIN_NEXT_MASK;
|
||||
}
|
||||
|
||||
/* The lock is locked if a task would have to wait to get it. */
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
u32 val = lock->lock;
|
||||
return arch_spin_current(val) != arch_spin_next(val);
|
||||
}
|
||||
|
||||
/* Bump the current ticket so the next task owns the lock. */
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
wmb(); /* guarantee anything modified under the lock is visible */
|
||||
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
|
||||
}
|
||||
|
||||
void arch_spin_unlock_wait(arch_spinlock_t *lock);
|
||||
|
||||
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
|
||||
|
||||
/* Grab the "next" ticket number and bump it atomically.
|
||||
* If the current ticket is not ours, go to the slow path.
|
||||
* We also take the slow path if the "next" value overflows.
|
||||
*/
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
u32 val = __insn_fetchadd4(&lock->lock, 1);
|
||||
u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
|
||||
if (unlikely(arch_spin_current(val) != ticket))
|
||||
arch_spin_lock_slow(lock, ticket);
|
||||
}
|
||||
|
||||
/* Try to get the lock, and return whether we succeeded. */
|
||||
int arch_spin_trylock(arch_spinlock_t *lock);
|
||||
|
||||
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
|
||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||
|
||||
/*
|
||||
* Read-write spinlocks, allowing multiple readers
|
||||
* but only one writer.
|
||||
*
|
||||
* We use fetchadd() for readers, and fetchor() with the sign bit
|
||||
* for writers.
|
||||
*/
|
||||
|
||||
#define __WRITE_LOCK_BIT (1 << 31)
|
||||
|
||||
static inline int arch_write_val_locked(int val)
|
||||
{
|
||||
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
|
||||
}
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return !arch_write_val_locked(rw->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return rw->lock == 0;
|
||||
}
|
||||
|
||||
extern void __read_lock_failed(arch_rwlock_t *rw);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
u32 val = __insn_fetchaddgez4(&rw->lock, 1);
|
||||
if (unlikely(arch_write_val_locked(val)))
|
||||
__read_lock_failed(rw);
|
||||
}
|
||||
|
||||
extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
|
||||
if (unlikely(val != 0))
|
||||
__write_lock_failed(rw, val);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
__insn_mf();
|
||||
__insn_fetchadd4(&rw->lock, -1);
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
__insn_mf();
|
||||
__insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
|
||||
}
|
||||
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
|
||||
if (likely(val == 0))
|
||||
return 1;
|
||||
if (!arch_write_val_locked(val))
|
||||
__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
#endif /* _ASM_TILE_SPINLOCK_64_H */
|
||||
60
arch/tile/include/asm/spinlock_types.h
Normal file
60
arch/tile/include/asm/spinlock_types.h
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SPINLOCK_TYPES_H
|
||||
#define _ASM_TILE_SPINLOCK_TYPES_H
|
||||
|
||||
#ifndef __LINUX_SPINLOCK_TYPES_H
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
/* Low 15 bits are "next"; high 15 bits are "current". */
|
||||
typedef struct arch_spinlock {
|
||||
unsigned int lock;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
/* High bit is "writer owns"; low 31 bits are a count of readers. */
|
||||
typedef struct arch_rwlock {
|
||||
unsigned int lock;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
||||
#else
|
||||
|
||||
typedef struct arch_spinlock {
|
||||
/* Next ticket number to hand out. */
|
||||
int next_ticket;
|
||||
/* The ticket number that currently owns this lock. */
|
||||
int current_ticket;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 }
|
||||
|
||||
/*
|
||||
* Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next",
|
||||
* byte 2 for ticket-lock "current", byte 3 for reader count.
|
||||
*/
|
||||
typedef struct arch_rwlock {
|
||||
unsigned int lock;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
||||
#endif
|
||||
#endif /* _ASM_TILE_SPINLOCK_TYPES_H */
|
||||
74
arch/tile/include/asm/stack.h
Normal file
74
arch/tile/include/asm/stack.h
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_STACK_H
|
||||
#define _ASM_TILE_STACK_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Everything we need to keep track of a backtrace iteration */
|
||||
struct KBacktraceIterator {
|
||||
BacktraceIterator it;
|
||||
struct task_struct *task; /* task we are backtracing */
|
||||
int end; /* iteration complete. */
|
||||
int new_context; /* new context is starting */
|
||||
int profile; /* profiling, so stop on async intrpt */
|
||||
int verbose; /* printk extra info (don't want to
|
||||
* do this for profiling) */
|
||||
int is_current; /* backtracing current task */
|
||||
};
|
||||
|
||||
/* Iteration methods for kernel backtraces */
|
||||
|
||||
/*
|
||||
* Initialize a KBacktraceIterator from a task_struct, and optionally from
|
||||
* a set of registers. If the registers are omitted, the process is
|
||||
* assumed to be descheduled, and registers are read from the process's
|
||||
* thread_struct and stack. "verbose" means to printk some additional
|
||||
* information about fault handlers as we pass them on the stack.
|
||||
*/
|
||||
extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
|
||||
struct task_struct *, struct pt_regs *);
|
||||
|
||||
/* Initialize iterator based on current stack. */
|
||||
extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt);
|
||||
|
||||
/* Helper method for above. */
|
||||
extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt,
|
||||
ulong pc, ulong lr, ulong sp, ulong r52);
|
||||
|
||||
/* No more frames? */
|
||||
extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
|
||||
|
||||
/* Advance to the next frame. */
|
||||
extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
|
||||
|
||||
/*
|
||||
* Dump stack given complete register info. Use only from the
|
||||
* architecture-specific code; show_stack()
|
||||
* and dump_stack() (in entry.S) are architecture-independent entry points.
|
||||
*/
|
||||
extern void tile_show_stack(struct KBacktraceIterator *, int headers);
|
||||
|
||||
/* Dump stack of current process, with registers to seed the backtrace. */
|
||||
extern void dump_stack_regs(struct pt_regs *);
|
||||
|
||||
/* Helper method for assembly dump_stack(). */
|
||||
extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
|
||||
|
||||
#endif /* _ASM_TILE_STACK_H */
|
||||
34
arch/tile/include/asm/string.h
Normal file
34
arch/tile/include/asm/string.h
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_STRING_H
|
||||
#define _ASM_TILE_STRING_H
|
||||
|
||||
#define __HAVE_ARCH_MEMCHR
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
#define __HAVE_ARCH_MEMMOVE
|
||||
#define __HAVE_ARCH_STRCHR
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
#define __HAVE_ARCH_STRNLEN
|
||||
|
||||
extern __kernel_size_t strlen(const char *);
|
||||
extern __kernel_size_t strnlen(const char *, __kernel_size_t);
|
||||
extern char *strchr(const char *s, int c);
|
||||
extern void *memchr(const void *s, int c, size_t n);
|
||||
extern void *memset(void *, int, __kernel_size_t);
|
||||
extern void *memcpy(void *, const void *, __kernel_size_t);
|
||||
extern void *memmove(void *, const void *, __kernel_size_t);
|
||||
|
||||
#endif /* _ASM_TILE_STRING_H */
|
||||
79
arch/tile/include/asm/switch_to.h
Normal file
79
arch/tile/include/asm/switch_to.h
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SWITCH_TO_H
|
||||
#define _ASM_TILE_SWITCH_TO_H
|
||||
|
||||
#include <arch/sim_def.h>
|
||||
|
||||
/*
|
||||
* switch_to(n) should switch tasks to task nr n, first
|
||||
* checking that n isn't the current task, in which case it does nothing.
|
||||
* The number of callee-saved registers saved on the kernel stack
|
||||
* is defined here for use in copy_thread() and must agree with __switch_to().
|
||||
*/
|
||||
#define CALLEE_SAVED_FIRST_REG 30
|
||||
#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* Pause the DMA engine and static network before task switching.
|
||||
*/
|
||||
#define prepare_arch_switch(next) _prepare_arch_switch(next)
|
||||
void _prepare_arch_switch(struct task_struct *next);
|
||||
|
||||
struct task_struct;
|
||||
#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
|
||||
extern struct task_struct *_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
/* Helper function for _switch_to(). */
|
||||
extern struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next,
|
||||
unsigned long new_system_save_k_0);
|
||||
|
||||
/* Address that switched-away from tasks are at. */
|
||||
extern unsigned long get_switch_to_pc(void);
|
||||
|
||||
/*
|
||||
* Kernel threads can check to see if they need to migrate their
|
||||
* stack whenever they return from a context switch; for user
|
||||
* threads, we defer until they are returning to user-space.
|
||||
*/
|
||||
#define finish_arch_switch(prev) do { \
|
||||
if (unlikely((prev)->state == TASK_DEAD)) \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
|
||||
((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
|
||||
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
|
||||
if (current->mm == NULL && !kstack_hash && \
|
||||
current_thread_info()->homecache_cpu != smp_processor_id()) \
|
||||
homecache_migrate_kthread(); \
|
||||
} while (0)
|
||||
|
||||
/* Support function for forking a new task. */
|
||||
void ret_from_fork(void);
|
||||
|
||||
/* Support function for forking a new kernel thread. */
|
||||
void ret_from_kernel_thread(void *fn, void *arg);
|
||||
|
||||
/* Called from ret_from_xxx() when a new process starts up. */
|
||||
struct task_struct *sim_notify_fork(struct task_struct *prev);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_SWITCH_TO_H */
|
||||
85
arch/tile/include/asm/syscall.h
Normal file
85
arch/tile/include/asm/syscall.h
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* See asm-generic/syscall.h for descriptions of what we must do here.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SYSCALL_H
|
||||
#define _ASM_TILE_SYSCALL_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/err.h>
|
||||
#include <arch/abi.h>
|
||||
|
||||
/* The array of function pointers for syscalls. */
|
||||
extern void *sys_call_table[];
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern void *compat_sys_call_table[];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
|
||||
* This importantly ignores the high bits on 64-bit, so comparisons
|
||||
* sign-extend the low 32 bits.
|
||||
*/
|
||||
static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs)
|
||||
{
|
||||
return regs->regs[TREG_SYSCALL_NR];
|
||||
}
|
||||
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->regs[0] = regs->orig_r0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long error = regs->regs[0];
|
||||
return IS_ERR_VALUE(error) ? error : 0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->regs[0];
|
||||
}
|
||||
|
||||
static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
regs->regs[0] = (long) error ?: val;
|
||||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(args, ®s[i], n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(®s[i], args, n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_SYSCALL_H */
|
||||
70
arch/tile/include/asm/syscalls.h
Normal file
70
arch/tile/include/asm/syscalls.h
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* syscalls.h - Linux syscall interfaces (arch-specific)
|
||||
*
|
||||
* Copyright (c) 2008 Jaswinder Singh Rajput
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SYSCALLS_H
|
||||
#define _ASM_TILE_SYSCALLS_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
/*
|
||||
* Note that by convention, any syscall which requires the current
|
||||
* register set takes an additional "struct pt_regs *" pointer; a
|
||||
* _sys_xxx() trampoline in intvec*.S just sets up the pointer and
|
||||
* jumps to sys_xxx().
|
||||
*/
|
||||
|
||||
/* kernel/sys.c */
|
||||
ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count);
|
||||
long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
|
||||
u32 len, int advice);
|
||||
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi, int advice);
|
||||
long sys_cacheflush(unsigned long addr, unsigned long len,
|
||||
unsigned long flags);
|
||||
#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
|
||||
#define sys_mmap sys_mmap
|
||||
#endif
|
||||
|
||||
#ifndef __tilegx__
|
||||
/* mm/fault.c */
|
||||
long sys_cmpxchg_badaddr(unsigned long address);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
|
||||
long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
|
||||
long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
|
||||
long sys_truncate64(const char __user *path, loff_t length);
|
||||
long sys_ftruncate64(unsigned int fd, loff_t length);
|
||||
#endif
|
||||
|
||||
/* Provide versions of standard syscalls that use current_pt_regs(). */
|
||||
long sys_rt_sigreturn(void);
|
||||
#define sys_rt_sigreturn sys_rt_sigreturn
|
||||
|
||||
/* These are the intvec*.S trampolines. */
|
||||
long _sys_rt_sigreturn(void);
|
||||
long _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
void __user *parent_tid, void __user *child_tid);
|
||||
|
||||
#include <asm-generic/syscalls.h>
|
||||
|
||||
#endif /* _ASM_TILE_SYSCALLS_H */
|
||||
195
arch/tile/include/asm/thread_info.h
Normal file
195
arch/tile/include/asm/thread_info.h
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_THREAD_INFO_H
|
||||
#define _ASM_TILE_THREAD_INFO_H
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Low level task data that assembly code needs immediate access to.
|
||||
* The structure is placed at the bottom of the supervisor stack.
|
||||
*/
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
struct exec_domain *exec_domain; /* execution domain */
|
||||
unsigned long flags; /* low level flags */
|
||||
unsigned long status; /* thread-synchronous flags */
|
||||
__u32 homecache_cpu; /* CPU we are homecached on */
|
||||
__u32 cpu; /* current CPU */
|
||||
int preempt_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
|
||||
mm_segment_t addr_limit; /* thread address space
|
||||
(KERNEL_DS or USER_DS) */
|
||||
struct restart_block restart_block;
|
||||
struct single_step_state *step_state; /* single step state
|
||||
(if non-zero) */
|
||||
int align_ctl; /* controls unaligned access */
|
||||
#ifdef __tilegx__
|
||||
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
|
||||
void __user *unalign_jit_base; /* unalign fixup JIT base */
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* macros/functions for gaining access to the thread information structure.
|
||||
*/
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
.exec_domain = &default_exec_domain, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
.restart_block = { \
|
||||
.fn = do_no_restart_syscall, \
|
||||
}, \
|
||||
.step_state = NULL, \
|
||||
.align_ctl = 0, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#if PAGE_SIZE < 8192
|
||||
#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
|
||||
#else
|
||||
#define THREAD_SIZE_ORDER (0)
|
||||
#endif
|
||||
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
|
||||
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
||||
|
||||
#define STACK_WARN (THREAD_SIZE/8)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
void arch_release_thread_info(struct thread_info *info);
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
register unsigned long stack_pointer __asm__("sp");
|
||||
|
||||
#define current_thread_info() \
|
||||
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
|
||||
|
||||
/* Sit on a nap instruction until interrupted. */
|
||||
extern void smp_nap(void);
|
||||
|
||||
/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */
|
||||
extern void _cpu_idle(void);
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* How to get the thread information struct from assembly.
|
||||
* Note that we use different macros since different architectures
|
||||
* have different semantics in their "mm" instruction and we would
|
||||
* like to guarantee that the macro expands to exactly one instruction.
|
||||
*/
|
||||
#ifdef __tilegx__
|
||||
#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
|
||||
#else
|
||||
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Thread information flags that various assembly files may need to access.
|
||||
* Keep flags accessed frequently in low bits, particular since it makes
|
||||
* it easier to build constants in assembly.
|
||||
*/
|
||||
#define TIF_SIGPENDING 0 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 2 /* restore singlestep on return to
|
||||
user mode */
|
||||
#define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
|
||||
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 6 /* secure computing */
|
||||
#define TIF_MEMDIE 7 /* OOM killer at work */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
|
||||
#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
|
||||
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
#define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB)
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
|
||||
/* Work to do on any return to user space. */
|
||||
#define _TIF_ALLWORK_MASK \
|
||||
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
|
||||
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
|
||||
|
||||
/* Work to do at syscall entry. */
|
||||
#define _TIF_SYSCALL_ENTRY_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
/* Work to do at syscall exit. */
|
||||
#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
/*
|
||||
* Thread-synchronous status.
|
||||
*
|
||||
* This is different from the flags in that nobody else
|
||||
* ever touches our thread-synchronous status, so we don't
|
||||
* have to worry about atomic accesses.
|
||||
*/
|
||||
#ifdef __tilegx__
|
||||
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
|
||||
#endif
|
||||
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define HAVE_SET_RESTORE_SIGMASK 1
|
||||
static inline void set_restore_sigmask(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
ti->status |= TS_RESTORE_SIGMASK;
|
||||
WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
|
||||
}
|
||||
static inline void clear_restore_sigmask(void)
|
||||
{
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
}
|
||||
static inline bool test_restore_sigmask(void)
|
||||
{
|
||||
return current_thread_info()->status & TS_RESTORE_SIGMASK;
|
||||
}
|
||||
static inline bool test_and_clear_restore_sigmask(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
if (!(ti->status & TS_RESTORE_SIGMASK))
|
||||
return false;
|
||||
ti->status &= ~TS_RESTORE_SIGMASK;
|
||||
return true;
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_THREAD_INFO_H */
|
||||
19
arch/tile/include/asm/tile-desc.h
Normal file
19
arch/tile/include/asm/tile-desc.h
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __tilegx__
|
||||
#include <asm/tile-desc_32.h>
|
||||
#else
|
||||
#include <asm/tile-desc_64.h>
|
||||
#endif
|
||||
553
arch/tile/include/asm/tile-desc_32.h
Normal file
553
arch/tile/include/asm/tile-desc_32.h
Normal file
|
|
@ -0,0 +1,553 @@
|
|||
/* TILEPro opcode information.
|
||||
*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef opcode_tilepro_h
|
||||
#define opcode_tilepro_h
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
|
||||
enum
|
||||
{
|
||||
TILEPRO_MAX_OPERANDS = 5 /* mm */
|
||||
};
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEPRO_OPC_BPT,
|
||||
TILEPRO_OPC_INFO,
|
||||
TILEPRO_OPC_INFOL,
|
||||
TILEPRO_OPC_J,
|
||||
TILEPRO_OPC_JAL,
|
||||
TILEPRO_OPC_MOVE,
|
||||
TILEPRO_OPC_MOVE_SN,
|
||||
TILEPRO_OPC_MOVEI,
|
||||
TILEPRO_OPC_MOVEI_SN,
|
||||
TILEPRO_OPC_MOVELI,
|
||||
TILEPRO_OPC_MOVELI_SN,
|
||||
TILEPRO_OPC_MOVELIS,
|
||||
TILEPRO_OPC_PREFETCH,
|
||||
TILEPRO_OPC_RAISE,
|
||||
TILEPRO_OPC_ADD,
|
||||
TILEPRO_OPC_ADD_SN,
|
||||
TILEPRO_OPC_ADDB,
|
||||
TILEPRO_OPC_ADDB_SN,
|
||||
TILEPRO_OPC_ADDBS_U,
|
||||
TILEPRO_OPC_ADDBS_U_SN,
|
||||
TILEPRO_OPC_ADDH,
|
||||
TILEPRO_OPC_ADDH_SN,
|
||||
TILEPRO_OPC_ADDHS,
|
||||
TILEPRO_OPC_ADDHS_SN,
|
||||
TILEPRO_OPC_ADDI,
|
||||
TILEPRO_OPC_ADDI_SN,
|
||||
TILEPRO_OPC_ADDIB,
|
||||
TILEPRO_OPC_ADDIB_SN,
|
||||
TILEPRO_OPC_ADDIH,
|
||||
TILEPRO_OPC_ADDIH_SN,
|
||||
TILEPRO_OPC_ADDLI,
|
||||
TILEPRO_OPC_ADDLI_SN,
|
||||
TILEPRO_OPC_ADDLIS,
|
||||
TILEPRO_OPC_ADDS,
|
||||
TILEPRO_OPC_ADDS_SN,
|
||||
TILEPRO_OPC_ADIFFB_U,
|
||||
TILEPRO_OPC_ADIFFB_U_SN,
|
||||
TILEPRO_OPC_ADIFFH,
|
||||
TILEPRO_OPC_ADIFFH_SN,
|
||||
TILEPRO_OPC_AND,
|
||||
TILEPRO_OPC_AND_SN,
|
||||
TILEPRO_OPC_ANDI,
|
||||
TILEPRO_OPC_ANDI_SN,
|
||||
TILEPRO_OPC_AULI,
|
||||
TILEPRO_OPC_AVGB_U,
|
||||
TILEPRO_OPC_AVGB_U_SN,
|
||||
TILEPRO_OPC_AVGH,
|
||||
TILEPRO_OPC_AVGH_SN,
|
||||
TILEPRO_OPC_BBNS,
|
||||
TILEPRO_OPC_BBNS_SN,
|
||||
TILEPRO_OPC_BBNST,
|
||||
TILEPRO_OPC_BBNST_SN,
|
||||
TILEPRO_OPC_BBS,
|
||||
TILEPRO_OPC_BBS_SN,
|
||||
TILEPRO_OPC_BBST,
|
||||
TILEPRO_OPC_BBST_SN,
|
||||
TILEPRO_OPC_BGEZ,
|
||||
TILEPRO_OPC_BGEZ_SN,
|
||||
TILEPRO_OPC_BGEZT,
|
||||
TILEPRO_OPC_BGEZT_SN,
|
||||
TILEPRO_OPC_BGZ,
|
||||
TILEPRO_OPC_BGZ_SN,
|
||||
TILEPRO_OPC_BGZT,
|
||||
TILEPRO_OPC_BGZT_SN,
|
||||
TILEPRO_OPC_BITX,
|
||||
TILEPRO_OPC_BITX_SN,
|
||||
TILEPRO_OPC_BLEZ,
|
||||
TILEPRO_OPC_BLEZ_SN,
|
||||
TILEPRO_OPC_BLEZT,
|
||||
TILEPRO_OPC_BLEZT_SN,
|
||||
TILEPRO_OPC_BLZ,
|
||||
TILEPRO_OPC_BLZ_SN,
|
||||
TILEPRO_OPC_BLZT,
|
||||
TILEPRO_OPC_BLZT_SN,
|
||||
TILEPRO_OPC_BNZ,
|
||||
TILEPRO_OPC_BNZ_SN,
|
||||
TILEPRO_OPC_BNZT,
|
||||
TILEPRO_OPC_BNZT_SN,
|
||||
TILEPRO_OPC_BYTEX,
|
||||
TILEPRO_OPC_BYTEX_SN,
|
||||
TILEPRO_OPC_BZ,
|
||||
TILEPRO_OPC_BZ_SN,
|
||||
TILEPRO_OPC_BZT,
|
||||
TILEPRO_OPC_BZT_SN,
|
||||
TILEPRO_OPC_CLZ,
|
||||
TILEPRO_OPC_CLZ_SN,
|
||||
TILEPRO_OPC_CRC32_32,
|
||||
TILEPRO_OPC_CRC32_32_SN,
|
||||
TILEPRO_OPC_CRC32_8,
|
||||
TILEPRO_OPC_CRC32_8_SN,
|
||||
TILEPRO_OPC_CTZ,
|
||||
TILEPRO_OPC_CTZ_SN,
|
||||
TILEPRO_OPC_DRAIN,
|
||||
TILEPRO_OPC_DTLBPR,
|
||||
TILEPRO_OPC_DWORD_ALIGN,
|
||||
TILEPRO_OPC_DWORD_ALIGN_SN,
|
||||
TILEPRO_OPC_FINV,
|
||||
TILEPRO_OPC_FLUSH,
|
||||
TILEPRO_OPC_FNOP,
|
||||
TILEPRO_OPC_ICOH,
|
||||
TILEPRO_OPC_ILL,
|
||||
TILEPRO_OPC_INTHB,
|
||||
TILEPRO_OPC_INTHB_SN,
|
||||
TILEPRO_OPC_INTHH,
|
||||
TILEPRO_OPC_INTHH_SN,
|
||||
TILEPRO_OPC_INTLB,
|
||||
TILEPRO_OPC_INTLB_SN,
|
||||
TILEPRO_OPC_INTLH,
|
||||
TILEPRO_OPC_INTLH_SN,
|
||||
TILEPRO_OPC_INV,
|
||||
TILEPRO_OPC_IRET,
|
||||
TILEPRO_OPC_JALB,
|
||||
TILEPRO_OPC_JALF,
|
||||
TILEPRO_OPC_JALR,
|
||||
TILEPRO_OPC_JALRP,
|
||||
TILEPRO_OPC_JB,
|
||||
TILEPRO_OPC_JF,
|
||||
TILEPRO_OPC_JR,
|
||||
TILEPRO_OPC_JRP,
|
||||
TILEPRO_OPC_LB,
|
||||
TILEPRO_OPC_LB_SN,
|
||||
TILEPRO_OPC_LB_U,
|
||||
TILEPRO_OPC_LB_U_SN,
|
||||
TILEPRO_OPC_LBADD,
|
||||
TILEPRO_OPC_LBADD_SN,
|
||||
TILEPRO_OPC_LBADD_U,
|
||||
TILEPRO_OPC_LBADD_U_SN,
|
||||
TILEPRO_OPC_LH,
|
||||
TILEPRO_OPC_LH_SN,
|
||||
TILEPRO_OPC_LH_U,
|
||||
TILEPRO_OPC_LH_U_SN,
|
||||
TILEPRO_OPC_LHADD,
|
||||
TILEPRO_OPC_LHADD_SN,
|
||||
TILEPRO_OPC_LHADD_U,
|
||||
TILEPRO_OPC_LHADD_U_SN,
|
||||
TILEPRO_OPC_LNK,
|
||||
TILEPRO_OPC_LNK_SN,
|
||||
TILEPRO_OPC_LW,
|
||||
TILEPRO_OPC_LW_SN,
|
||||
TILEPRO_OPC_LW_NA,
|
||||
TILEPRO_OPC_LW_NA_SN,
|
||||
TILEPRO_OPC_LWADD,
|
||||
TILEPRO_OPC_LWADD_SN,
|
||||
TILEPRO_OPC_LWADD_NA,
|
||||
TILEPRO_OPC_LWADD_NA_SN,
|
||||
TILEPRO_OPC_MAXB_U,
|
||||
TILEPRO_OPC_MAXB_U_SN,
|
||||
TILEPRO_OPC_MAXH,
|
||||
TILEPRO_OPC_MAXH_SN,
|
||||
TILEPRO_OPC_MAXIB_U,
|
||||
TILEPRO_OPC_MAXIB_U_SN,
|
||||
TILEPRO_OPC_MAXIH,
|
||||
TILEPRO_OPC_MAXIH_SN,
|
||||
TILEPRO_OPC_MF,
|
||||
TILEPRO_OPC_MFSPR,
|
||||
TILEPRO_OPC_MINB_U,
|
||||
TILEPRO_OPC_MINB_U_SN,
|
||||
TILEPRO_OPC_MINH,
|
||||
TILEPRO_OPC_MINH_SN,
|
||||
TILEPRO_OPC_MINIB_U,
|
||||
TILEPRO_OPC_MINIB_U_SN,
|
||||
TILEPRO_OPC_MINIH,
|
||||
TILEPRO_OPC_MINIH_SN,
|
||||
TILEPRO_OPC_MM,
|
||||
TILEPRO_OPC_MNZ,
|
||||
TILEPRO_OPC_MNZ_SN,
|
||||
TILEPRO_OPC_MNZB,
|
||||
TILEPRO_OPC_MNZB_SN,
|
||||
TILEPRO_OPC_MNZH,
|
||||
TILEPRO_OPC_MNZH_SN,
|
||||
TILEPRO_OPC_MTSPR,
|
||||
TILEPRO_OPC_MULHH_SS,
|
||||
TILEPRO_OPC_MULHH_SS_SN,
|
||||
TILEPRO_OPC_MULHH_SU,
|
||||
TILEPRO_OPC_MULHH_SU_SN,
|
||||
TILEPRO_OPC_MULHH_UU,
|
||||
TILEPRO_OPC_MULHH_UU_SN,
|
||||
TILEPRO_OPC_MULHHA_SS,
|
||||
TILEPRO_OPC_MULHHA_SS_SN,
|
||||
TILEPRO_OPC_MULHHA_SU,
|
||||
TILEPRO_OPC_MULHHA_SU_SN,
|
||||
TILEPRO_OPC_MULHHA_UU,
|
||||
TILEPRO_OPC_MULHHA_UU_SN,
|
||||
TILEPRO_OPC_MULHHSA_UU,
|
||||
TILEPRO_OPC_MULHHSA_UU_SN,
|
||||
TILEPRO_OPC_MULHL_SS,
|
||||
TILEPRO_OPC_MULHL_SS_SN,
|
||||
TILEPRO_OPC_MULHL_SU,
|
||||
TILEPRO_OPC_MULHL_SU_SN,
|
||||
TILEPRO_OPC_MULHL_US,
|
||||
TILEPRO_OPC_MULHL_US_SN,
|
||||
TILEPRO_OPC_MULHL_UU,
|
||||
TILEPRO_OPC_MULHL_UU_SN,
|
||||
TILEPRO_OPC_MULHLA_SS,
|
||||
TILEPRO_OPC_MULHLA_SS_SN,
|
||||
TILEPRO_OPC_MULHLA_SU,
|
||||
TILEPRO_OPC_MULHLA_SU_SN,
|
||||
TILEPRO_OPC_MULHLA_US,
|
||||
TILEPRO_OPC_MULHLA_US_SN,
|
||||
TILEPRO_OPC_MULHLA_UU,
|
||||
TILEPRO_OPC_MULHLA_UU_SN,
|
||||
TILEPRO_OPC_MULHLSA_UU,
|
||||
TILEPRO_OPC_MULHLSA_UU_SN,
|
||||
TILEPRO_OPC_MULLL_SS,
|
||||
TILEPRO_OPC_MULLL_SS_SN,
|
||||
TILEPRO_OPC_MULLL_SU,
|
||||
TILEPRO_OPC_MULLL_SU_SN,
|
||||
TILEPRO_OPC_MULLL_UU,
|
||||
TILEPRO_OPC_MULLL_UU_SN,
|
||||
TILEPRO_OPC_MULLLA_SS,
|
||||
TILEPRO_OPC_MULLLA_SS_SN,
|
||||
TILEPRO_OPC_MULLLA_SU,
|
||||
TILEPRO_OPC_MULLLA_SU_SN,
|
||||
TILEPRO_OPC_MULLLA_UU,
|
||||
TILEPRO_OPC_MULLLA_UU_SN,
|
||||
TILEPRO_OPC_MULLLSA_UU,
|
||||
TILEPRO_OPC_MULLLSA_UU_SN,
|
||||
TILEPRO_OPC_MVNZ,
|
||||
TILEPRO_OPC_MVNZ_SN,
|
||||
TILEPRO_OPC_MVZ,
|
||||
TILEPRO_OPC_MVZ_SN,
|
||||
TILEPRO_OPC_MZ,
|
||||
TILEPRO_OPC_MZ_SN,
|
||||
TILEPRO_OPC_MZB,
|
||||
TILEPRO_OPC_MZB_SN,
|
||||
TILEPRO_OPC_MZH,
|
||||
TILEPRO_OPC_MZH_SN,
|
||||
TILEPRO_OPC_NAP,
|
||||
TILEPRO_OPC_NOP,
|
||||
TILEPRO_OPC_NOR,
|
||||
TILEPRO_OPC_NOR_SN,
|
||||
TILEPRO_OPC_OR,
|
||||
TILEPRO_OPC_OR_SN,
|
||||
TILEPRO_OPC_ORI,
|
||||
TILEPRO_OPC_ORI_SN,
|
||||
TILEPRO_OPC_PACKBS_U,
|
||||
TILEPRO_OPC_PACKBS_U_SN,
|
||||
TILEPRO_OPC_PACKHB,
|
||||
TILEPRO_OPC_PACKHB_SN,
|
||||
TILEPRO_OPC_PACKHS,
|
||||
TILEPRO_OPC_PACKHS_SN,
|
||||
TILEPRO_OPC_PACKLB,
|
||||
TILEPRO_OPC_PACKLB_SN,
|
||||
TILEPRO_OPC_PCNT,
|
||||
TILEPRO_OPC_PCNT_SN,
|
||||
TILEPRO_OPC_RL,
|
||||
TILEPRO_OPC_RL_SN,
|
||||
TILEPRO_OPC_RLI,
|
||||
TILEPRO_OPC_RLI_SN,
|
||||
TILEPRO_OPC_S1A,
|
||||
TILEPRO_OPC_S1A_SN,
|
||||
TILEPRO_OPC_S2A,
|
||||
TILEPRO_OPC_S2A_SN,
|
||||
TILEPRO_OPC_S3A,
|
||||
TILEPRO_OPC_S3A_SN,
|
||||
TILEPRO_OPC_SADAB_U,
|
||||
TILEPRO_OPC_SADAB_U_SN,
|
||||
TILEPRO_OPC_SADAH,
|
||||
TILEPRO_OPC_SADAH_SN,
|
||||
TILEPRO_OPC_SADAH_U,
|
||||
TILEPRO_OPC_SADAH_U_SN,
|
||||
TILEPRO_OPC_SADB_U,
|
||||
TILEPRO_OPC_SADB_U_SN,
|
||||
TILEPRO_OPC_SADH,
|
||||
TILEPRO_OPC_SADH_SN,
|
||||
TILEPRO_OPC_SADH_U,
|
||||
TILEPRO_OPC_SADH_U_SN,
|
||||
TILEPRO_OPC_SB,
|
||||
TILEPRO_OPC_SBADD,
|
||||
TILEPRO_OPC_SEQ,
|
||||
TILEPRO_OPC_SEQ_SN,
|
||||
TILEPRO_OPC_SEQB,
|
||||
TILEPRO_OPC_SEQB_SN,
|
||||
TILEPRO_OPC_SEQH,
|
||||
TILEPRO_OPC_SEQH_SN,
|
||||
TILEPRO_OPC_SEQI,
|
||||
TILEPRO_OPC_SEQI_SN,
|
||||
TILEPRO_OPC_SEQIB,
|
||||
TILEPRO_OPC_SEQIB_SN,
|
||||
TILEPRO_OPC_SEQIH,
|
||||
TILEPRO_OPC_SEQIH_SN,
|
||||
TILEPRO_OPC_SH,
|
||||
TILEPRO_OPC_SHADD,
|
||||
TILEPRO_OPC_SHL,
|
||||
TILEPRO_OPC_SHL_SN,
|
||||
TILEPRO_OPC_SHLB,
|
||||
TILEPRO_OPC_SHLB_SN,
|
||||
TILEPRO_OPC_SHLH,
|
||||
TILEPRO_OPC_SHLH_SN,
|
||||
TILEPRO_OPC_SHLI,
|
||||
TILEPRO_OPC_SHLI_SN,
|
||||
TILEPRO_OPC_SHLIB,
|
||||
TILEPRO_OPC_SHLIB_SN,
|
||||
TILEPRO_OPC_SHLIH,
|
||||
TILEPRO_OPC_SHLIH_SN,
|
||||
TILEPRO_OPC_SHR,
|
||||
TILEPRO_OPC_SHR_SN,
|
||||
TILEPRO_OPC_SHRB,
|
||||
TILEPRO_OPC_SHRB_SN,
|
||||
TILEPRO_OPC_SHRH,
|
||||
TILEPRO_OPC_SHRH_SN,
|
||||
TILEPRO_OPC_SHRI,
|
||||
TILEPRO_OPC_SHRI_SN,
|
||||
TILEPRO_OPC_SHRIB,
|
||||
TILEPRO_OPC_SHRIB_SN,
|
||||
TILEPRO_OPC_SHRIH,
|
||||
TILEPRO_OPC_SHRIH_SN,
|
||||
TILEPRO_OPC_SLT,
|
||||
TILEPRO_OPC_SLT_SN,
|
||||
TILEPRO_OPC_SLT_U,
|
||||
TILEPRO_OPC_SLT_U_SN,
|
||||
TILEPRO_OPC_SLTB,
|
||||
TILEPRO_OPC_SLTB_SN,
|
||||
TILEPRO_OPC_SLTB_U,
|
||||
TILEPRO_OPC_SLTB_U_SN,
|
||||
TILEPRO_OPC_SLTE,
|
||||
TILEPRO_OPC_SLTE_SN,
|
||||
TILEPRO_OPC_SLTE_U,
|
||||
TILEPRO_OPC_SLTE_U_SN,
|
||||
TILEPRO_OPC_SLTEB,
|
||||
TILEPRO_OPC_SLTEB_SN,
|
||||
TILEPRO_OPC_SLTEB_U,
|
||||
TILEPRO_OPC_SLTEB_U_SN,
|
||||
TILEPRO_OPC_SLTEH,
|
||||
TILEPRO_OPC_SLTEH_SN,
|
||||
TILEPRO_OPC_SLTEH_U,
|
||||
TILEPRO_OPC_SLTEH_U_SN,
|
||||
TILEPRO_OPC_SLTH,
|
||||
TILEPRO_OPC_SLTH_SN,
|
||||
TILEPRO_OPC_SLTH_U,
|
||||
TILEPRO_OPC_SLTH_U_SN,
|
||||
TILEPRO_OPC_SLTI,
|
||||
TILEPRO_OPC_SLTI_SN,
|
||||
TILEPRO_OPC_SLTI_U,
|
||||
TILEPRO_OPC_SLTI_U_SN,
|
||||
TILEPRO_OPC_SLTIB,
|
||||
TILEPRO_OPC_SLTIB_SN,
|
||||
TILEPRO_OPC_SLTIB_U,
|
||||
TILEPRO_OPC_SLTIB_U_SN,
|
||||
TILEPRO_OPC_SLTIH,
|
||||
TILEPRO_OPC_SLTIH_SN,
|
||||
TILEPRO_OPC_SLTIH_U,
|
||||
TILEPRO_OPC_SLTIH_U_SN,
|
||||
TILEPRO_OPC_SNE,
|
||||
TILEPRO_OPC_SNE_SN,
|
||||
TILEPRO_OPC_SNEB,
|
||||
TILEPRO_OPC_SNEB_SN,
|
||||
TILEPRO_OPC_SNEH,
|
||||
TILEPRO_OPC_SNEH_SN,
|
||||
TILEPRO_OPC_SRA,
|
||||
TILEPRO_OPC_SRA_SN,
|
||||
TILEPRO_OPC_SRAB,
|
||||
TILEPRO_OPC_SRAB_SN,
|
||||
TILEPRO_OPC_SRAH,
|
||||
TILEPRO_OPC_SRAH_SN,
|
||||
TILEPRO_OPC_SRAI,
|
||||
TILEPRO_OPC_SRAI_SN,
|
||||
TILEPRO_OPC_SRAIB,
|
||||
TILEPRO_OPC_SRAIB_SN,
|
||||
TILEPRO_OPC_SRAIH,
|
||||
TILEPRO_OPC_SRAIH_SN,
|
||||
TILEPRO_OPC_SUB,
|
||||
TILEPRO_OPC_SUB_SN,
|
||||
TILEPRO_OPC_SUBB,
|
||||
TILEPRO_OPC_SUBB_SN,
|
||||
TILEPRO_OPC_SUBBS_U,
|
||||
TILEPRO_OPC_SUBBS_U_SN,
|
||||
TILEPRO_OPC_SUBH,
|
||||
TILEPRO_OPC_SUBH_SN,
|
||||
TILEPRO_OPC_SUBHS,
|
||||
TILEPRO_OPC_SUBHS_SN,
|
||||
TILEPRO_OPC_SUBS,
|
||||
TILEPRO_OPC_SUBS_SN,
|
||||
TILEPRO_OPC_SW,
|
||||
TILEPRO_OPC_SWADD,
|
||||
TILEPRO_OPC_SWINT0,
|
||||
TILEPRO_OPC_SWINT1,
|
||||
TILEPRO_OPC_SWINT2,
|
||||
TILEPRO_OPC_SWINT3,
|
||||
TILEPRO_OPC_TBLIDXB0,
|
||||
TILEPRO_OPC_TBLIDXB0_SN,
|
||||
TILEPRO_OPC_TBLIDXB1,
|
||||
TILEPRO_OPC_TBLIDXB1_SN,
|
||||
TILEPRO_OPC_TBLIDXB2,
|
||||
TILEPRO_OPC_TBLIDXB2_SN,
|
||||
TILEPRO_OPC_TBLIDXB3,
|
||||
TILEPRO_OPC_TBLIDXB3_SN,
|
||||
TILEPRO_OPC_TNS,
|
||||
TILEPRO_OPC_TNS_SN,
|
||||
TILEPRO_OPC_WH64,
|
||||
TILEPRO_OPC_XOR,
|
||||
TILEPRO_OPC_XOR_SN,
|
||||
TILEPRO_OPC_XORI,
|
||||
TILEPRO_OPC_XORI_SN,
|
||||
TILEPRO_OPC_NONE
|
||||
} tilepro_mnemonic;
|
||||
|
||||
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEPRO_PIPELINE_X0,
|
||||
TILEPRO_PIPELINE_X1,
|
||||
TILEPRO_PIPELINE_Y0,
|
||||
TILEPRO_PIPELINE_Y1,
|
||||
TILEPRO_PIPELINE_Y2,
|
||||
} tilepro_pipeline;
|
||||
|
||||
#define tilepro_is_x_pipeline(p) ((int)(p) <= (int)TILEPRO_PIPELINE_X1)
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEPRO_OP_TYPE_REGISTER,
|
||||
TILEPRO_OP_TYPE_IMMEDIATE,
|
||||
TILEPRO_OP_TYPE_ADDRESS,
|
||||
TILEPRO_OP_TYPE_SPR
|
||||
} tilepro_operand_type;
|
||||
|
||||
struct tilepro_operand
|
||||
{
|
||||
/* Is this operand a register, immediate or address? */
|
||||
tilepro_operand_type type;
|
||||
|
||||
/* The default relocation type for this operand. */
|
||||
signed int default_reloc : 16;
|
||||
|
||||
/* How many bits is this value? (used for range checking) */
|
||||
unsigned int num_bits : 5;
|
||||
|
||||
/* Is the value signed? (used for range checking) */
|
||||
unsigned int is_signed : 1;
|
||||
|
||||
/* Is this operand a source register? */
|
||||
unsigned int is_src_reg : 1;
|
||||
|
||||
/* Is this operand written? (i.e. is it a destination register) */
|
||||
unsigned int is_dest_reg : 1;
|
||||
|
||||
/* Is this operand PC-relative? */
|
||||
unsigned int is_pc_relative : 1;
|
||||
|
||||
/* By how many bits do we right shift the value before inserting? */
|
||||
unsigned int rightshift : 2;
|
||||
|
||||
/* Return the bits for this operand to be ORed into an existing bundle. */
|
||||
tilepro_bundle_bits (*insert) (int op);
|
||||
|
||||
/* Extract this operand and return it. */
|
||||
unsigned int (*extract) (tilepro_bundle_bits bundle);
|
||||
};
|
||||
|
||||
|
||||
extern const struct tilepro_operand tilepro_operands[];
|
||||
|
||||
/* One finite-state machine per pipe for rapid instruction decoding. */
|
||||
extern const unsigned short * const
|
||||
tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS];
|
||||
|
||||
|
||||
struct tilepro_opcode
|
||||
{
|
||||
/* The opcode mnemonic, e.g. "add" */
|
||||
const char *name;
|
||||
|
||||
/* The enum value for this mnemonic. */
|
||||
tilepro_mnemonic mnemonic;
|
||||
|
||||
/* A bit mask of which of the five pipes this instruction
|
||||
is compatible with:
|
||||
X0 0x01
|
||||
X1 0x02
|
||||
Y0 0x04
|
||||
Y1 0x08
|
||||
Y2 0x10 */
|
||||
unsigned char pipes;
|
||||
|
||||
/* How many operands are there? */
|
||||
unsigned char num_operands;
|
||||
|
||||
/* Which register does this write implicitly, or TREG_ZERO if none? */
|
||||
unsigned char implicitly_written_register;
|
||||
|
||||
/* Can this be bundled with other instructions (almost always true). */
|
||||
unsigned char can_bundle;
|
||||
|
||||
/* The description of the operands. Each of these is an
|
||||
* index into the tilepro_operands[] table. */
|
||||
unsigned char operands[TILEPRO_NUM_PIPELINE_ENCODINGS][TILEPRO_MAX_OPERANDS];
|
||||
|
||||
};
|
||||
|
||||
extern const struct tilepro_opcode tilepro_opcodes[];
|
||||
|
||||
|
||||
/* Used for non-textual disassembly into structs. */
|
||||
struct tilepro_decoded_instruction
|
||||
{
|
||||
const struct tilepro_opcode *opcode;
|
||||
const struct tilepro_operand *operands[TILEPRO_MAX_OPERANDS];
|
||||
int operand_values[TILEPRO_MAX_OPERANDS];
|
||||
};
|
||||
|
||||
|
||||
/* Disassemble a bundle into a struct for machine processing. */
|
||||
extern int parse_insn_tilepro(tilepro_bundle_bits bits,
|
||||
unsigned int pc,
|
||||
struct tilepro_decoded_instruction
|
||||
decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE]);
|
||||
|
||||
|
||||
/* Given a set of bundle bits and a specific pipe, returns which
|
||||
* instruction the bundle contains in that pipe.
|
||||
*/
|
||||
extern const struct tilepro_opcode *
|
||||
find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe);
|
||||
|
||||
|
||||
|
||||
#endif /* opcode_tilepro_h */
|
||||
483
arch/tile/include/asm/tile-desc_64.h
Normal file
483
arch/tile/include/asm/tile-desc_64.h
Normal file
|
|
@ -0,0 +1,483 @@
|
|||
/* TILE-Gx opcode information.
|
||||
*
|
||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef opcode_tile_h
|
||||
#define opcode_tile_h
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
|
||||
enum
|
||||
{
|
||||
TILEGX_MAX_OPERANDS = 4 /* bfexts */
|
||||
};
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEGX_OPC_BPT,
|
||||
TILEGX_OPC_INFO,
|
||||
TILEGX_OPC_INFOL,
|
||||
TILEGX_OPC_MOVE,
|
||||
TILEGX_OPC_MOVEI,
|
||||
TILEGX_OPC_MOVELI,
|
||||
TILEGX_OPC_PREFETCH,
|
||||
TILEGX_OPC_PREFETCH_ADD_L1,
|
||||
TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
|
||||
TILEGX_OPC_PREFETCH_ADD_L2,
|
||||
TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
|
||||
TILEGX_OPC_PREFETCH_ADD_L3,
|
||||
TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
|
||||
TILEGX_OPC_PREFETCH_L1,
|
||||
TILEGX_OPC_PREFETCH_L1_FAULT,
|
||||
TILEGX_OPC_PREFETCH_L2,
|
||||
TILEGX_OPC_PREFETCH_L2_FAULT,
|
||||
TILEGX_OPC_PREFETCH_L3,
|
||||
TILEGX_OPC_PREFETCH_L3_FAULT,
|
||||
TILEGX_OPC_RAISE,
|
||||
TILEGX_OPC_ADD,
|
||||
TILEGX_OPC_ADDI,
|
||||
TILEGX_OPC_ADDLI,
|
||||
TILEGX_OPC_ADDX,
|
||||
TILEGX_OPC_ADDXI,
|
||||
TILEGX_OPC_ADDXLI,
|
||||
TILEGX_OPC_ADDXSC,
|
||||
TILEGX_OPC_AND,
|
||||
TILEGX_OPC_ANDI,
|
||||
TILEGX_OPC_BEQZ,
|
||||
TILEGX_OPC_BEQZT,
|
||||
TILEGX_OPC_BFEXTS,
|
||||
TILEGX_OPC_BFEXTU,
|
||||
TILEGX_OPC_BFINS,
|
||||
TILEGX_OPC_BGEZ,
|
||||
TILEGX_OPC_BGEZT,
|
||||
TILEGX_OPC_BGTZ,
|
||||
TILEGX_OPC_BGTZT,
|
||||
TILEGX_OPC_BLBC,
|
||||
TILEGX_OPC_BLBCT,
|
||||
TILEGX_OPC_BLBS,
|
||||
TILEGX_OPC_BLBST,
|
||||
TILEGX_OPC_BLEZ,
|
||||
TILEGX_OPC_BLEZT,
|
||||
TILEGX_OPC_BLTZ,
|
||||
TILEGX_OPC_BLTZT,
|
||||
TILEGX_OPC_BNEZ,
|
||||
TILEGX_OPC_BNEZT,
|
||||
TILEGX_OPC_CLZ,
|
||||
TILEGX_OPC_CMOVEQZ,
|
||||
TILEGX_OPC_CMOVNEZ,
|
||||
TILEGX_OPC_CMPEQ,
|
||||
TILEGX_OPC_CMPEQI,
|
||||
TILEGX_OPC_CMPEXCH,
|
||||
TILEGX_OPC_CMPEXCH4,
|
||||
TILEGX_OPC_CMPLES,
|
||||
TILEGX_OPC_CMPLEU,
|
||||
TILEGX_OPC_CMPLTS,
|
||||
TILEGX_OPC_CMPLTSI,
|
||||
TILEGX_OPC_CMPLTU,
|
||||
TILEGX_OPC_CMPLTUI,
|
||||
TILEGX_OPC_CMPNE,
|
||||
TILEGX_OPC_CMUL,
|
||||
TILEGX_OPC_CMULA,
|
||||
TILEGX_OPC_CMULAF,
|
||||
TILEGX_OPC_CMULF,
|
||||
TILEGX_OPC_CMULFR,
|
||||
TILEGX_OPC_CMULH,
|
||||
TILEGX_OPC_CMULHR,
|
||||
TILEGX_OPC_CRC32_32,
|
||||
TILEGX_OPC_CRC32_8,
|
||||
TILEGX_OPC_CTZ,
|
||||
TILEGX_OPC_DBLALIGN,
|
||||
TILEGX_OPC_DBLALIGN2,
|
||||
TILEGX_OPC_DBLALIGN4,
|
||||
TILEGX_OPC_DBLALIGN6,
|
||||
TILEGX_OPC_DRAIN,
|
||||
TILEGX_OPC_DTLBPR,
|
||||
TILEGX_OPC_EXCH,
|
||||
TILEGX_OPC_EXCH4,
|
||||
TILEGX_OPC_FDOUBLE_ADD_FLAGS,
|
||||
TILEGX_OPC_FDOUBLE_ADDSUB,
|
||||
TILEGX_OPC_FDOUBLE_MUL_FLAGS,
|
||||
TILEGX_OPC_FDOUBLE_PACK1,
|
||||
TILEGX_OPC_FDOUBLE_PACK2,
|
||||
TILEGX_OPC_FDOUBLE_SUB_FLAGS,
|
||||
TILEGX_OPC_FDOUBLE_UNPACK_MAX,
|
||||
TILEGX_OPC_FDOUBLE_UNPACK_MIN,
|
||||
TILEGX_OPC_FETCHADD,
|
||||
TILEGX_OPC_FETCHADD4,
|
||||
TILEGX_OPC_FETCHADDGEZ,
|
||||
TILEGX_OPC_FETCHADDGEZ4,
|
||||
TILEGX_OPC_FETCHAND,
|
||||
TILEGX_OPC_FETCHAND4,
|
||||
TILEGX_OPC_FETCHOR,
|
||||
TILEGX_OPC_FETCHOR4,
|
||||
TILEGX_OPC_FINV,
|
||||
TILEGX_OPC_FLUSH,
|
||||
TILEGX_OPC_FLUSHWB,
|
||||
TILEGX_OPC_FNOP,
|
||||
TILEGX_OPC_FSINGLE_ADD1,
|
||||
TILEGX_OPC_FSINGLE_ADDSUB2,
|
||||
TILEGX_OPC_FSINGLE_MUL1,
|
||||
TILEGX_OPC_FSINGLE_MUL2,
|
||||
TILEGX_OPC_FSINGLE_PACK1,
|
||||
TILEGX_OPC_FSINGLE_PACK2,
|
||||
TILEGX_OPC_FSINGLE_SUB1,
|
||||
TILEGX_OPC_ICOH,
|
||||
TILEGX_OPC_ILL,
|
||||
TILEGX_OPC_INV,
|
||||
TILEGX_OPC_IRET,
|
||||
TILEGX_OPC_J,
|
||||
TILEGX_OPC_JAL,
|
||||
TILEGX_OPC_JALR,
|
||||
TILEGX_OPC_JALRP,
|
||||
TILEGX_OPC_JR,
|
||||
TILEGX_OPC_JRP,
|
||||
TILEGX_OPC_LD,
|
||||
TILEGX_OPC_LD1S,
|
||||
TILEGX_OPC_LD1S_ADD,
|
||||
TILEGX_OPC_LD1U,
|
||||
TILEGX_OPC_LD1U_ADD,
|
||||
TILEGX_OPC_LD2S,
|
||||
TILEGX_OPC_LD2S_ADD,
|
||||
TILEGX_OPC_LD2U,
|
||||
TILEGX_OPC_LD2U_ADD,
|
||||
TILEGX_OPC_LD4S,
|
||||
TILEGX_OPC_LD4S_ADD,
|
||||
TILEGX_OPC_LD4U,
|
||||
TILEGX_OPC_LD4U_ADD,
|
||||
TILEGX_OPC_LD_ADD,
|
||||
TILEGX_OPC_LDNA,
|
||||
TILEGX_OPC_LDNA_ADD,
|
||||
TILEGX_OPC_LDNT,
|
||||
TILEGX_OPC_LDNT1S,
|
||||
TILEGX_OPC_LDNT1S_ADD,
|
||||
TILEGX_OPC_LDNT1U,
|
||||
TILEGX_OPC_LDNT1U_ADD,
|
||||
TILEGX_OPC_LDNT2S,
|
||||
TILEGX_OPC_LDNT2S_ADD,
|
||||
TILEGX_OPC_LDNT2U,
|
||||
TILEGX_OPC_LDNT2U_ADD,
|
||||
TILEGX_OPC_LDNT4S,
|
||||
TILEGX_OPC_LDNT4S_ADD,
|
||||
TILEGX_OPC_LDNT4U,
|
||||
TILEGX_OPC_LDNT4U_ADD,
|
||||
TILEGX_OPC_LDNT_ADD,
|
||||
TILEGX_OPC_LNK,
|
||||
TILEGX_OPC_MF,
|
||||
TILEGX_OPC_MFSPR,
|
||||
TILEGX_OPC_MM,
|
||||
TILEGX_OPC_MNZ,
|
||||
TILEGX_OPC_MTSPR,
|
||||
TILEGX_OPC_MUL_HS_HS,
|
||||
TILEGX_OPC_MUL_HS_HU,
|
||||
TILEGX_OPC_MUL_HS_LS,
|
||||
TILEGX_OPC_MUL_HS_LU,
|
||||
TILEGX_OPC_MUL_HU_HU,
|
||||
TILEGX_OPC_MUL_HU_LS,
|
||||
TILEGX_OPC_MUL_HU_LU,
|
||||
TILEGX_OPC_MUL_LS_LS,
|
||||
TILEGX_OPC_MUL_LS_LU,
|
||||
TILEGX_OPC_MUL_LU_LU,
|
||||
TILEGX_OPC_MULA_HS_HS,
|
||||
TILEGX_OPC_MULA_HS_HU,
|
||||
TILEGX_OPC_MULA_HS_LS,
|
||||
TILEGX_OPC_MULA_HS_LU,
|
||||
TILEGX_OPC_MULA_HU_HU,
|
||||
TILEGX_OPC_MULA_HU_LS,
|
||||
TILEGX_OPC_MULA_HU_LU,
|
||||
TILEGX_OPC_MULA_LS_LS,
|
||||
TILEGX_OPC_MULA_LS_LU,
|
||||
TILEGX_OPC_MULA_LU_LU,
|
||||
TILEGX_OPC_MULAX,
|
||||
TILEGX_OPC_MULX,
|
||||
TILEGX_OPC_MZ,
|
||||
TILEGX_OPC_NAP,
|
||||
TILEGX_OPC_NOP,
|
||||
TILEGX_OPC_NOR,
|
||||
TILEGX_OPC_OR,
|
||||
TILEGX_OPC_ORI,
|
||||
TILEGX_OPC_PCNT,
|
||||
TILEGX_OPC_REVBITS,
|
||||
TILEGX_OPC_REVBYTES,
|
||||
TILEGX_OPC_ROTL,
|
||||
TILEGX_OPC_ROTLI,
|
||||
TILEGX_OPC_SHL,
|
||||
TILEGX_OPC_SHL16INSLI,
|
||||
TILEGX_OPC_SHL1ADD,
|
||||
TILEGX_OPC_SHL1ADDX,
|
||||
TILEGX_OPC_SHL2ADD,
|
||||
TILEGX_OPC_SHL2ADDX,
|
||||
TILEGX_OPC_SHL3ADD,
|
||||
TILEGX_OPC_SHL3ADDX,
|
||||
TILEGX_OPC_SHLI,
|
||||
TILEGX_OPC_SHLX,
|
||||
TILEGX_OPC_SHLXI,
|
||||
TILEGX_OPC_SHRS,
|
||||
TILEGX_OPC_SHRSI,
|
||||
TILEGX_OPC_SHRU,
|
||||
TILEGX_OPC_SHRUI,
|
||||
TILEGX_OPC_SHRUX,
|
||||
TILEGX_OPC_SHRUXI,
|
||||
TILEGX_OPC_SHUFFLEBYTES,
|
||||
TILEGX_OPC_ST,
|
||||
TILEGX_OPC_ST1,
|
||||
TILEGX_OPC_ST1_ADD,
|
||||
TILEGX_OPC_ST2,
|
||||
TILEGX_OPC_ST2_ADD,
|
||||
TILEGX_OPC_ST4,
|
||||
TILEGX_OPC_ST4_ADD,
|
||||
TILEGX_OPC_ST_ADD,
|
||||
TILEGX_OPC_STNT,
|
||||
TILEGX_OPC_STNT1,
|
||||
TILEGX_OPC_STNT1_ADD,
|
||||
TILEGX_OPC_STNT2,
|
||||
TILEGX_OPC_STNT2_ADD,
|
||||
TILEGX_OPC_STNT4,
|
||||
TILEGX_OPC_STNT4_ADD,
|
||||
TILEGX_OPC_STNT_ADD,
|
||||
TILEGX_OPC_SUB,
|
||||
TILEGX_OPC_SUBX,
|
||||
TILEGX_OPC_SUBXSC,
|
||||
TILEGX_OPC_SWINT0,
|
||||
TILEGX_OPC_SWINT1,
|
||||
TILEGX_OPC_SWINT2,
|
||||
TILEGX_OPC_SWINT3,
|
||||
TILEGX_OPC_TBLIDXB0,
|
||||
TILEGX_OPC_TBLIDXB1,
|
||||
TILEGX_OPC_TBLIDXB2,
|
||||
TILEGX_OPC_TBLIDXB3,
|
||||
TILEGX_OPC_V1ADD,
|
||||
TILEGX_OPC_V1ADDI,
|
||||
TILEGX_OPC_V1ADDUC,
|
||||
TILEGX_OPC_V1ADIFFU,
|
||||
TILEGX_OPC_V1AVGU,
|
||||
TILEGX_OPC_V1CMPEQ,
|
||||
TILEGX_OPC_V1CMPEQI,
|
||||
TILEGX_OPC_V1CMPLES,
|
||||
TILEGX_OPC_V1CMPLEU,
|
||||
TILEGX_OPC_V1CMPLTS,
|
||||
TILEGX_OPC_V1CMPLTSI,
|
||||
TILEGX_OPC_V1CMPLTU,
|
||||
TILEGX_OPC_V1CMPLTUI,
|
||||
TILEGX_OPC_V1CMPNE,
|
||||
TILEGX_OPC_V1DDOTPU,
|
||||
TILEGX_OPC_V1DDOTPUA,
|
||||
TILEGX_OPC_V1DDOTPUS,
|
||||
TILEGX_OPC_V1DDOTPUSA,
|
||||
TILEGX_OPC_V1DOTP,
|
||||
TILEGX_OPC_V1DOTPA,
|
||||
TILEGX_OPC_V1DOTPU,
|
||||
TILEGX_OPC_V1DOTPUA,
|
||||
TILEGX_OPC_V1DOTPUS,
|
||||
TILEGX_OPC_V1DOTPUSA,
|
||||
TILEGX_OPC_V1INT_H,
|
||||
TILEGX_OPC_V1INT_L,
|
||||
TILEGX_OPC_V1MAXU,
|
||||
TILEGX_OPC_V1MAXUI,
|
||||
TILEGX_OPC_V1MINU,
|
||||
TILEGX_OPC_V1MINUI,
|
||||
TILEGX_OPC_V1MNZ,
|
||||
TILEGX_OPC_V1MULTU,
|
||||
TILEGX_OPC_V1MULU,
|
||||
TILEGX_OPC_V1MULUS,
|
||||
TILEGX_OPC_V1MZ,
|
||||
TILEGX_OPC_V1SADAU,
|
||||
TILEGX_OPC_V1SADU,
|
||||
TILEGX_OPC_V1SHL,
|
||||
TILEGX_OPC_V1SHLI,
|
||||
TILEGX_OPC_V1SHRS,
|
||||
TILEGX_OPC_V1SHRSI,
|
||||
TILEGX_OPC_V1SHRU,
|
||||
TILEGX_OPC_V1SHRUI,
|
||||
TILEGX_OPC_V1SUB,
|
||||
TILEGX_OPC_V1SUBUC,
|
||||
TILEGX_OPC_V2ADD,
|
||||
TILEGX_OPC_V2ADDI,
|
||||
TILEGX_OPC_V2ADDSC,
|
||||
TILEGX_OPC_V2ADIFFS,
|
||||
TILEGX_OPC_V2AVGS,
|
||||
TILEGX_OPC_V2CMPEQ,
|
||||
TILEGX_OPC_V2CMPEQI,
|
||||
TILEGX_OPC_V2CMPLES,
|
||||
TILEGX_OPC_V2CMPLEU,
|
||||
TILEGX_OPC_V2CMPLTS,
|
||||
TILEGX_OPC_V2CMPLTSI,
|
||||
TILEGX_OPC_V2CMPLTU,
|
||||
TILEGX_OPC_V2CMPLTUI,
|
||||
TILEGX_OPC_V2CMPNE,
|
||||
TILEGX_OPC_V2DOTP,
|
||||
TILEGX_OPC_V2DOTPA,
|
||||
TILEGX_OPC_V2INT_H,
|
||||
TILEGX_OPC_V2INT_L,
|
||||
TILEGX_OPC_V2MAXS,
|
||||
TILEGX_OPC_V2MAXSI,
|
||||
TILEGX_OPC_V2MINS,
|
||||
TILEGX_OPC_V2MINSI,
|
||||
TILEGX_OPC_V2MNZ,
|
||||
TILEGX_OPC_V2MULFSC,
|
||||
TILEGX_OPC_V2MULS,
|
||||
TILEGX_OPC_V2MULTS,
|
||||
TILEGX_OPC_V2MZ,
|
||||
TILEGX_OPC_V2PACKH,
|
||||
TILEGX_OPC_V2PACKL,
|
||||
TILEGX_OPC_V2PACKUC,
|
||||
TILEGX_OPC_V2SADAS,
|
||||
TILEGX_OPC_V2SADAU,
|
||||
TILEGX_OPC_V2SADS,
|
||||
TILEGX_OPC_V2SADU,
|
||||
TILEGX_OPC_V2SHL,
|
||||
TILEGX_OPC_V2SHLI,
|
||||
TILEGX_OPC_V2SHLSC,
|
||||
TILEGX_OPC_V2SHRS,
|
||||
TILEGX_OPC_V2SHRSI,
|
||||
TILEGX_OPC_V2SHRU,
|
||||
TILEGX_OPC_V2SHRUI,
|
||||
TILEGX_OPC_V2SUB,
|
||||
TILEGX_OPC_V2SUBSC,
|
||||
TILEGX_OPC_V4ADD,
|
||||
TILEGX_OPC_V4ADDSC,
|
||||
TILEGX_OPC_V4INT_H,
|
||||
TILEGX_OPC_V4INT_L,
|
||||
TILEGX_OPC_V4PACKSC,
|
||||
TILEGX_OPC_V4SHL,
|
||||
TILEGX_OPC_V4SHLSC,
|
||||
TILEGX_OPC_V4SHRS,
|
||||
TILEGX_OPC_V4SHRU,
|
||||
TILEGX_OPC_V4SUB,
|
||||
TILEGX_OPC_V4SUBSC,
|
||||
TILEGX_OPC_WH64,
|
||||
TILEGX_OPC_XOR,
|
||||
TILEGX_OPC_XORI,
|
||||
TILEGX_OPC_NONE
|
||||
} tilegx_mnemonic;
|
||||
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEGX_PIPELINE_X0,
|
||||
TILEGX_PIPELINE_X1,
|
||||
TILEGX_PIPELINE_Y0,
|
||||
TILEGX_PIPELINE_Y1,
|
||||
TILEGX_PIPELINE_Y2,
|
||||
} tilegx_pipeline;
|
||||
|
||||
#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
|
||||
|
||||
typedef enum
|
||||
{
|
||||
TILEGX_OP_TYPE_REGISTER,
|
||||
TILEGX_OP_TYPE_IMMEDIATE,
|
||||
TILEGX_OP_TYPE_ADDRESS,
|
||||
TILEGX_OP_TYPE_SPR
|
||||
} tilegx_operand_type;
|
||||
|
||||
struct tilegx_operand
|
||||
{
|
||||
/* Is this operand a register, immediate or address? */
|
||||
tilegx_operand_type type;
|
||||
|
||||
/* The default relocation type for this operand. */
|
||||
signed int default_reloc : 16;
|
||||
|
||||
/* How many bits is this value? (used for range checking) */
|
||||
unsigned int num_bits : 5;
|
||||
|
||||
/* Is the value signed? (used for range checking) */
|
||||
unsigned int is_signed : 1;
|
||||
|
||||
/* Is this operand a source register? */
|
||||
unsigned int is_src_reg : 1;
|
||||
|
||||
/* Is this operand written? (i.e. is it a destination register) */
|
||||
unsigned int is_dest_reg : 1;
|
||||
|
||||
/* Is this operand PC-relative? */
|
||||
unsigned int is_pc_relative : 1;
|
||||
|
||||
/* By how many bits do we right shift the value before inserting? */
|
||||
unsigned int rightshift : 2;
|
||||
|
||||
/* Return the bits for this operand to be ORed into an existing bundle. */
|
||||
tilegx_bundle_bits (*insert) (int op);
|
||||
|
||||
/* Extract this operand and return it. */
|
||||
unsigned int (*extract) (tilegx_bundle_bits bundle);
|
||||
};
|
||||
|
||||
|
||||
extern const struct tilegx_operand tilegx_operands[];
|
||||
|
||||
/* One finite-state machine per pipe for rapid instruction decoding. */
|
||||
extern const unsigned short * const
|
||||
tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
|
||||
|
||||
|
||||
struct tilegx_opcode
|
||||
{
|
||||
/* The opcode mnemonic, e.g. "add" */
|
||||
const char *name;
|
||||
|
||||
/* The enum value for this mnemonic. */
|
||||
tilegx_mnemonic mnemonic;
|
||||
|
||||
/* A bit mask of which of the five pipes this instruction
|
||||
is compatible with:
|
||||
X0 0x01
|
||||
X1 0x02
|
||||
Y0 0x04
|
||||
Y1 0x08
|
||||
Y2 0x10 */
|
||||
unsigned char pipes;
|
||||
|
||||
/* How many operands are there? */
|
||||
unsigned char num_operands;
|
||||
|
||||
/* Which register does this write implicitly, or TREG_ZERO if none? */
|
||||
unsigned char implicitly_written_register;
|
||||
|
||||
/* Can this be bundled with other instructions (almost always true). */
|
||||
unsigned char can_bundle;
|
||||
|
||||
/* The description of the operands. Each of these is an
|
||||
* index into the tilegx_operands[] table. */
|
||||
unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
|
||||
|
||||
};
|
||||
|
||||
extern const struct tilegx_opcode tilegx_opcodes[];
|
||||
|
||||
/* Used for non-textual disassembly into structs. */
|
||||
struct tilegx_decoded_instruction
|
||||
{
|
||||
const struct tilegx_opcode *opcode;
|
||||
const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
|
||||
long long operand_values[TILEGX_MAX_OPERANDS];
|
||||
};
|
||||
|
||||
|
||||
/* Disassemble a bundle into a struct for machine processing. */
|
||||
extern int parse_insn_tilegx(tilegx_bundle_bits bits,
|
||||
unsigned long long pc,
|
||||
struct tilegx_decoded_instruction
|
||||
decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
|
||||
|
||||
|
||||
|
||||
#endif /* opcode_tilegx_h */
|
||||
52
arch/tile/include/asm/timex.h
Normal file
52
arch/tile/include/asm/timex.h
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_TIMEX_H
|
||||
#define _ASM_TILE_TIMEX_H
|
||||
|
||||
/*
|
||||
* This rate should be a multiple of the possible HZ values (100, 250, 1000)
|
||||
* and a fraction of the possible hardware timer frequencies. Our timer
|
||||
* frequency is highly tunable but also quite precise, so for the primary use
|
||||
* of this value (setting ACT_HZ from HZ) we just pick a value that causes
|
||||
* ACT_HZ to be set to HZ. We make the value somewhat large just to be
|
||||
* more robust in case someone tries out a new value of HZ.
|
||||
*/
|
||||
#define CLOCK_TICK_RATE 1000000
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
#if CHIP_HAS_SPLIT_CYCLE()
|
||||
cycles_t get_cycles(void);
|
||||
#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
|
||||
#else
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return __insn_mfspr(SPR_CYCLE);
|
||||
}
|
||||
#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
|
||||
#endif
|
||||
|
||||
cycles_t get_clock_rate(void);
|
||||
|
||||
/* Convert nanoseconds to core clock cycles. */
|
||||
cycles_t ns2cycles(unsigned long nsecs);
|
||||
|
||||
/* Called at cpu initialization to set some low-level constants. */
|
||||
void setup_clock(void);
|
||||
|
||||
/* Called at cpu initialization to start the tile-timer clock device. */
|
||||
void setup_tile_timer(void);
|
||||
|
||||
#endif /* _ASM_TILE_TIMEX_H */
|
||||
25
arch/tile/include/asm/tlb.h
Normal file
25
arch/tile/include/asm/tlb.h
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_TLB_H
|
||||
#define _ASM_TILE_TLB_H
|
||||
|
||||
#define tlb_start_vma(tlb, vma) do { } while (0)
|
||||
#define tlb_end_vma(tlb, vma) do { } while (0)
|
||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
||||
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
#endif /* _ASM_TILE_TLB_H */
|
||||
123
arch/tile/include/asm/tlbflush.h
Normal file
123
arch/tile/include/asm/tlbflush.h
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_TLBFLUSH_H
|
||||
#define _ASM_TILE_TLBFLUSH_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/*
|
||||
* Rather than associating each mm with its own ASID, we just use
|
||||
* ASIDs to allow us to lazily flush the TLB when we switch mms.
|
||||
* This way we only have to do an actual TLB flush on mm switch
|
||||
* every time we wrap ASIDs, not every single time we switch.
|
||||
*
|
||||
* FIXME: We might improve performance by keeping ASIDs around
|
||||
* properly, though since the hypervisor direct-maps VAs to TSB
|
||||
* entries, we're likely to have lost at least the executable page
|
||||
* mappings by the time we switch back to the original mm.
|
||||
*/
|
||||
DECLARE_PER_CPU(int, current_asid);
|
||||
|
||||
/* The hypervisor tells us what ASIDs are available to us. */
|
||||
extern int min_asid, max_asid;
|
||||
|
||||
/* Pass as vma pointer for non-executable mapping, if no vma available. */
|
||||
#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
|
||||
|
||||
/* Flush a single user page on this cpu. */
|
||||
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
unsigned long page_size)
|
||||
{
|
||||
int rc = hv_flush_page(addr, page_size);
|
||||
if (rc < 0)
|
||||
panic("hv_flush_page(%#lx,%#lx) failed: %d",
|
||||
addr, page_size, rc);
|
||||
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
|
||||
__flush_icache();
|
||||
}
|
||||
|
||||
/* Flush range of user pages on this cpu. */
|
||||
static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
unsigned long page_size,
|
||||
unsigned long len)
|
||||
{
|
||||
int rc = hv_flush_pages(addr, page_size, len);
|
||||
if (rc < 0)
|
||||
panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d",
|
||||
addr, page_size, len, rc);
|
||||
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
|
||||
__flush_icache();
|
||||
}
|
||||
|
||||
/* Flush all user pages on this cpu. */
|
||||
static inline void local_flush_tlb(void)
|
||||
{
|
||||
int rc = hv_flush_all(1); /* preserve global mappings */
|
||||
if (rc < 0)
|
||||
panic("hv_flush_all(1) failed: %d", rc);
|
||||
__flush_icache();
|
||||
}
|
||||
|
||||
/*
|
||||
* Global pages have to be flushed a bit differently. Not a real
|
||||
* performance problem because this does not happen often.
|
||||
*/
|
||||
static inline void local_flush_tlb_all(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; ; ++i) {
|
||||
HV_VirtAddrRange r = hv_inquire_virtual(i);
|
||||
if (r.size == 0)
|
||||
break;
|
||||
local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size);
|
||||
local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TLB flushing:
|
||||
*
|
||||
* - flush_tlb() flushes the current mm struct TLBs
|
||||
* - flush_tlb_all() flushes all processes TLBs
|
||||
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
||||
* - flush_tlb_page(vma, vmaddr) flushes one page
|
||||
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
||||
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
||||
* - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
|
||||
*
|
||||
* Here (as in vm_area_struct), "end" means the first byte after
|
||||
* our end address.
|
||||
*/
|
||||
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_tlb_current_task(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *);
|
||||
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
||||
extern void flush_tlb_page_mm(struct vm_area_struct *,
|
||||
struct mm_struct *, unsigned long);
|
||||
extern void flush_tlb_range(struct vm_area_struct *,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
#define flush_tlb() flush_tlb_current_task()
|
||||
|
||||
#endif /* _ASM_TILE_TLBFLUSH_H */
|
||||
61
arch/tile/include/asm/topology.h
Normal file
61
arch/tile/include/asm/topology.h
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_TOPOLOGY_H
|
||||
#define _ASM_TILE_TOPOLOGY_H
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/* Mappings between logical cpu number and node number. */
|
||||
extern struct cpumask node_2_cpu_mask[];
|
||||
extern char cpu_2_node[];
|
||||
|
||||
/* Returns the number of the node containing CPU 'cpu'. */
|
||||
static inline int cpu_to_node(int cpu)
|
||||
{
|
||||
return cpu_2_node[cpu];
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of the node containing Node 'node'.
|
||||
* This architecture is flat, so it is a pretty simple function!
|
||||
*/
|
||||
#define parent_node(node) (node)
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_2_cpu_mask[node];
|
||||
}
|
||||
|
||||
/* For now, use numa node -1 for global allocation. */
|
||||
#define pcibus_to_node(bus) ((void)(bus), -1)
|
||||
|
||||
/* By definition, we create nodes based on online memory. */
|
||||
#define node_has_online_mem(nid) 1
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define topology_physical_package_id(cpu) ((void)(cpu), 0)
|
||||
#define topology_core_id(cpu) (cpu)
|
||||
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_TOPOLOGY_H */
|
||||
85
arch/tile/include/asm/traps.h
Normal file
85
arch/tile/include/asm/traps.h
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_TRAPS_H
|
||||
#define _ASM_TILE_TRAPS_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* mm/fault.c */
|
||||
void do_page_fault(struct pt_regs *, int fault_num,
|
||||
unsigned long address, unsigned long write);
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
void do_async_page_fault(struct pt_regs *);
|
||||
#endif
|
||||
|
||||
#ifndef __tilegx__
|
||||
/*
|
||||
* We return this structure in registers to avoid having to write
|
||||
* additional save/restore code in the intvec.S caller.
|
||||
*/
|
||||
struct intvec_state {
|
||||
void *handler;
|
||||
unsigned long vecnum;
|
||||
unsigned long fault_num;
|
||||
unsigned long info;
|
||||
unsigned long retval;
|
||||
};
|
||||
struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
||||
unsigned long address,
|
||||
unsigned long info);
|
||||
#endif
|
||||
|
||||
/* kernel/traps.c */
|
||||
void do_trap(struct pt_regs *, int fault_num, unsigned long reason);
|
||||
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
|
||||
|
||||
/* kernel/time.c */
|
||||
void do_timer_interrupt(struct pt_regs *, int fault_num);
|
||||
|
||||
/* kernel/messaging.c */
|
||||
void hv_message_intr(struct pt_regs *, int intnum);
|
||||
|
||||
/* kernel/irq.c */
|
||||
void tile_dev_intr(struct pt_regs *, int intnum);
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* kernel/hardwall.c */
|
||||
void do_hardwall_trap(struct pt_regs *, int fault_num);
|
||||
#endif
|
||||
|
||||
/* kernel/ptrace.c */
|
||||
void do_breakpoint(struct pt_regs *, int fault_num);
|
||||
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* kernel/single_step.c */
|
||||
void gx_singlestep_handle(struct pt_regs *, int fault_num);
|
||||
|
||||
/* kernel/intvec_64.S */
|
||||
void fill_ra_stack(void);
|
||||
|
||||
/* Handle unalign data fixup. */
|
||||
extern void do_unaligned(struct pt_regs *regs, int vecnum);
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* 128 byte JIT per unalign fixup. */
|
||||
#define UNALIGN_JIT_SHIFT 7
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_TRAPS_H */
|
||||
602
arch/tile/include/asm/uaccess.h
Normal file
602
arch/tile/include/asm/uaccess.h
Normal file
|
|
@ -0,0 +1,602 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_UACCESS_H
|
||||
#define _ASM_TILE_UACCESS_H
|
||||
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm-generic/uaccess-unaligned.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
/*
|
||||
* The fs value determines whether argument validity checking should be
|
||||
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
||||
* get_fs() == KERNEL_DS, checking is bypassed.
|
||||
*
|
||||
* For historical reasons, these macros are grossly misnamed.
|
||||
*/
|
||||
#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
|
||||
|
||||
#define KERNEL_DS MAKE_MM_SEG(-1UL)
|
||||
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
|
||||
|
||||
#define get_ds() (KERNEL_DS)
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#ifndef __tilegx__
|
||||
/*
|
||||
* We could allow mapping all 16 MB at 0xfc000000, but we set up a
|
||||
* special hack in arch_setup_additional_pages() to auto-create a mapping
|
||||
* for the first 16 KB, and it would seem strange to have different
|
||||
* user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
|
||||
*/
|
||||
static inline int is_arch_mappable_range(unsigned long addr,
|
||||
unsigned long size)
|
||||
{
|
||||
return (addr >= MEM_USER_INTRPT &&
|
||||
addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
|
||||
size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
|
||||
}
|
||||
#define is_arch_mappable_range is_arch_mappable_range
|
||||
#else
|
||||
#define is_arch_mappable_range(addr, size) 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
* Returns 0 if the range is valid, nonzero otherwise.
|
||||
*/
|
||||
int __range_ok(unsigned long addr, unsigned long size);
|
||||
|
||||
/**
|
||||
* access_ok: - Checks if a user space pointer is valid
|
||||
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
|
||||
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
|
||||
* to write to a block, it is always safe to read from it.
|
||||
* @addr: User space pointer to start of block to check
|
||||
* @size: Size of block to check
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Checks if a pointer to a block of memory in user space is valid.
|
||||
*
|
||||
* Returns true (nonzero) if the memory block may be valid, false (zero)
|
||||
* if it is definitely invalid.
|
||||
*
|
||||
* Note that, depending on architecture, this function probably just
|
||||
* checks that the pointer is in the user space range - after calling
|
||||
* this function, memory access functions may still return -EFAULT.
|
||||
*/
|
||||
#define access_ok(type, addr, size) ({ \
|
||||
__chk_user_ptr(addr); \
|
||||
likely(__range_ok((unsigned long)(addr), (size)) == 0); \
|
||||
})
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry {
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Support macros for __get_user().
|
||||
*
|
||||
* Implementation note: The "case 8" logic of casting to the type of
|
||||
* the result of subtracting the value from itself is basically a way
|
||||
* of keeping all integer types the same, but casting any pointers to
|
||||
* ptrdiff_t, i.e. also an integer type. This way there are no
|
||||
* questionable casts seen by the compiler on an ILP32 platform.
|
||||
*
|
||||
* Note that __get_user() and __put_user() assume proper alignment.
|
||||
*/
|
||||
|
||||
#ifdef __LP64__
|
||||
#define _ASM_PTR ".quad"
|
||||
#define _ASM_ALIGN ".align 8"
|
||||
#else
|
||||
#define _ASM_PTR ".long"
|
||||
#define _ASM_ALIGN ".align 4"
|
||||
#endif
|
||||
|
||||
#define __get_user_asm(OP, x, ptr, ret) \
|
||||
asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %1, 0; movei %0, %3 }\n" \
|
||||
"j 9f\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
: "=r" (ret), "=r" (x) \
|
||||
: "r" (ptr), "i" (-EFAULT))
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
|
||||
#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
|
||||
#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
|
||||
#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
|
||||
#else
|
||||
#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
|
||||
#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
|
||||
#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#define __lo32(a, b) a
|
||||
#define __hi32(a, b) b
|
||||
#else
|
||||
#define __lo32(a, b) b
|
||||
#define __hi32(a, b) a
|
||||
#endif
|
||||
#define __get_user_8(x, ptr, ret) \
|
||||
({ \
|
||||
unsigned int __a, __b; \
|
||||
asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
|
||||
"2: { lw %2, %2; movei %0, 0 }\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %1, 0; movei %2, 0 }\n" \
|
||||
"{ movei %0, %4; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 4\n" \
|
||||
".word 1b, 0b\n" \
|
||||
".word 2b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
: "=r" (ret), "=r" (__a), "=&r" (__b) \
|
||||
: "r" (ptr), "i" (-EFAULT)); \
|
||||
(x) = (__typeof(x))(__typeof((x)-(x))) \
|
||||
(((u64)__hi32(__a, __b) << 32) | \
|
||||
__lo32(__a, __b)); \
|
||||
})
|
||||
#endif
|
||||
|
||||
extern int __get_user_bad(void)
|
||||
__attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
|
||||
|
||||
/**
|
||||
* __get_user: - Get a simple variable from user space, with less checking.
|
||||
* @x: Variable to store result.
|
||||
* @ptr: Source address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* This macro copies a single simple variable from user space to kernel
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
* data types like structures or arrays.
|
||||
*
|
||||
* @ptr must have pointer-to-simple-variable type, and the result of
|
||||
* dereferencing @ptr must be assignable to @x without a cast.
|
||||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*
|
||||
* Caller must check the pointer with access_ok() before calling this
|
||||
* function.
|
||||
*/
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
int __ret; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_1(x, ptr, __ret); break; \
|
||||
case 2: __get_user_2(x, ptr, __ret); break; \
|
||||
case 4: __get_user_4(x, ptr, __ret); break; \
|
||||
case 8: __get_user_8(x, ptr, __ret); break; \
|
||||
default: __ret = __get_user_bad(); break; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/* Support macros for __put_user(). */
|
||||
|
||||
#define __put_user_asm(OP, x, ptr, ret) \
|
||||
asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %3; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
: "=r" (ret) \
|
||||
: "r" (ptr), "r" (x), "i" (-EFAULT))
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
|
||||
#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
|
||||
#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
|
||||
#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
|
||||
#else
|
||||
#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
|
||||
#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
|
||||
#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
|
||||
#define __put_user_8(x, ptr, ret) \
|
||||
({ \
|
||||
u64 __x = (__typeof((x)-(x)))(x); \
|
||||
int __lo = (int) __x, __hi = (int) (__x >> 32); \
|
||||
asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
|
||||
"2: { sw %0, %3; movei %0, 0 }\n" \
|
||||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %4; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 4\n" \
|
||||
".word 1b, 0b\n" \
|
||||
".word 2b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
: "=&r" (ret) \
|
||||
: "r" (ptr), "r" (__lo32(__lo, __hi)), \
|
||||
"r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
|
||||
})
|
||||
#endif
|
||||
|
||||
extern int __put_user_bad(void)
|
||||
__attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
|
||||
|
||||
/**
|
||||
* __put_user: - Write a simple value into user space, with less checking.
|
||||
* @x: Value to copy to user space.
|
||||
* @ptr: Destination address, in user space.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* This macro copies a single simple value from kernel space to user
|
||||
* space. It supports simple types like char and int, but not larger
|
||||
* data types like structures or arrays.
|
||||
*
|
||||
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
|
||||
* to the result of dereferencing @ptr.
|
||||
*
|
||||
* Caller must check the pointer with access_ok() before calling this
|
||||
* function.
|
||||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
*/
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
int __ret; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_1(x, ptr, __ret); break; \
|
||||
case 2: __put_user_2(x, ptr, __ret); break; \
|
||||
case 4: __put_user_4(x, ptr, __ret); break; \
|
||||
case 8: __put_user_8(x, ptr, __ret); break; \
|
||||
default: __ret = __put_user_bad(); break; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* The versions of get_user and put_user without initial underscores
|
||||
* check the address of their arguments to make sure they are not
|
||||
* in kernel space.
|
||||
*/
|
||||
#define put_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __user *__Pu_addr = (ptr); \
|
||||
access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \
|
||||
__put_user((x), (__Pu_addr)) : \
|
||||
-EFAULT; \
|
||||
})
|
||||
|
||||
#define get_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \
|
||||
access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \
|
||||
__get_user((x), (__Gu_addr)) : \
|
||||
((x) = 0, -EFAULT); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __copy_to_user() - copy data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* An alternate version - __copy_to_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable().
|
||||
*/
|
||||
extern unsigned long __must_check __copy_to_user_inatomic(
|
||||
void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_to_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* __copy_from_user() - copy data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*
|
||||
* An alternate version - __copy_from_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable(). This version
|
||||
* does *NOT* pad with zeros.
|
||||
*/
|
||||
extern unsigned long __must_check __copy_from_user_inatomic(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_from_user_zeroing(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_from_user_zeroing(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
/*
|
||||
* There are still unprovable places in the generic code as of 2.6.34, so this
|
||||
* option is not really compatible with -Werror, which is more useful in
|
||||
* general.
|
||||
*/
|
||||
extern void copy_from_user_overflow(void)
|
||||
__compiletime_warning("copy_from_user() size is not provably correct");
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
n = _copy_from_user(to, from, n);
|
||||
else
|
||||
copy_from_user_overflow();
|
||||
|
||||
return n;
|
||||
}
|
||||
#else
|
||||
#define copy_from_user _copy_from_user
|
||||
#endif
|
||||
|
||||
#ifdef __tilegx__
|
||||
/**
|
||||
* __copy_in_user() - copy data within user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Copy data from user space to user space. Caller must check
|
||||
* the specified blocks with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long __copy_in_user_inatomic(
|
||||
void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_in_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_in_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* strlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
* Returns the size of the string INCLUDING the terminating NUL.
|
||||
* On exception, returns 0.
|
||||
*
|
||||
* If there is a limit on the length of a valid string, you may wish to
|
||||
* consider using strnlen_user() instead.
|
||||
*/
|
||||
extern long strnlen_user_asm(const char __user *str, long n);
|
||||
static inline long __must_check strnlen_user(const char __user *str, long n)
|
||||
{
|
||||
might_fault();
|
||||
return strnlen_user_asm(str, n);
|
||||
}
|
||||
#define strlen_user(str) strnlen_user(str, LONG_MAX)
|
||||
|
||||
/**
|
||||
* strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
|
||||
* @dst: Destination address, in kernel space. This buffer must be at
|
||||
* least @count bytes long.
|
||||
* @src: Source address, in user space.
|
||||
* @count: Maximum number of bytes to copy, including the trailing NUL.
|
||||
*
|
||||
* Copies a NUL-terminated string from userspace to kernel space.
|
||||
* Caller must check the specified block with access_ok() before calling
|
||||
* this function.
|
||||
*
|
||||
* On success, returns the length of the string (not including the trailing
|
||||
* NUL).
|
||||
*
|
||||
* If access to userspace fails, returns -EFAULT (some data may have been
|
||||
* copied).
|
||||
*
|
||||
* If @count is smaller than the length of the string, copies @count bytes
|
||||
* and returns @count.
|
||||
*/
|
||||
extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
|
||||
static inline long __must_check __strncpy_from_user(
|
||||
char *dst, const char __user *src, long count)
|
||||
{
|
||||
might_fault();
|
||||
return strncpy_from_user_asm(dst, src, count);
|
||||
}
|
||||
static inline long __must_check strncpy_from_user(
|
||||
char *dst, const char __user *src, long count)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, src, 1))
|
||||
return __strncpy_from_user(dst, src, count);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_user: - Zero a block of memory in user space.
|
||||
* @mem: Destination address, in user space.
|
||||
* @len: Number of bytes to zero.
|
||||
*
|
||||
* Zero a block of memory in user space.
|
||||
*
|
||||
* Returns number of bytes that could not be cleared.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
|
||||
static inline unsigned long __must_check __clear_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
might_fault();
|
||||
return clear_user_asm(mem, len);
|
||||
}
|
||||
static inline unsigned long __must_check clear_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, mem, len))
|
||||
return __clear_user(mem, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* flush_user: - Flush a block of memory in user space from cache.
|
||||
* @mem: Destination address, in user space.
|
||||
* @len: Number of bytes to flush.
|
||||
*
|
||||
* Returns number of bytes that could not be flushed.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
|
||||
static inline unsigned long __must_check __flush_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
int retval;
|
||||
|
||||
might_fault();
|
||||
retval = flush_user_asm(mem, len);
|
||||
mb_incoherent();
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check flush_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, mem, len))
|
||||
return __flush_user(mem, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* finv_user: - Flush-inval a block of memory in user space from cache.
|
||||
* @mem: Destination address, in user space.
|
||||
* @len: Number of bytes to invalidate.
|
||||
*
|
||||
* Returns number of bytes that could not be flush-invalidated.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
|
||||
static inline unsigned long __must_check __finv_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
int retval;
|
||||
|
||||
might_fault();
|
||||
retval = finv_user_asm(mem, len);
|
||||
mb_incoherent();
|
||||
return retval;
|
||||
}
|
||||
static inline unsigned long __must_check finv_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, mem, len))
|
||||
return __finv_user(mem, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_UACCESS_H */
|
||||
43
arch/tile/include/asm/unaligned.h
Normal file
43
arch/tile/include/asm/unaligned.h
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_UNALIGNED_H
|
||||
#define _ASM_TILE_UNALIGNED_H
|
||||
|
||||
/*
|
||||
* We could implement faster get_unaligned_[be/le]64 using the ldna
|
||||
* instruction on tilegx; however, we need to either copy all of the
|
||||
* other generic functions to here (which is pretty ugly) or else
|
||||
* modify both the generic code and other arch code to allow arch
|
||||
* specific unaligned data access functions. Given these functions
|
||||
* are not often called, we'll stick with the generic version.
|
||||
*/
|
||||
#include <asm-generic/unaligned.h>
|
||||
|
||||
/*
|
||||
* Is the kernel doing fixups of unaligned accesses? If <0, no kernel
|
||||
* intervention occurs and SIGBUS is delivered with no data address
|
||||
* info. If 0, the kernel single-steps the instruction to discover
|
||||
* the data address to provide with the SIGBUS. If 1, the kernel does
|
||||
* a fixup.
|
||||
*/
|
||||
extern int unaligned_fixup;
|
||||
|
||||
/* Is the kernel printing on each unaligned fixup? */
|
||||
extern int unaligned_printk;
|
||||
|
||||
/* Number of unaligned fixups performed */
|
||||
extern unsigned int unaligned_fixup_count;
|
||||
|
||||
#endif /* _ASM_TILE_UNALIGNED_H */
|
||||
20
arch/tile/include/asm/unistd.h
Normal file
20
arch/tile/include/asm/unistd.h
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define __ARCH_WANT_SYS_LLSEEK
|
||||
#endif
|
||||
#define __ARCH_WANT_SYS_NEWFSTATAT
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#include <uapi/asm/unistd.h>
|
||||
21
arch/tile/include/asm/user.h
Normal file
21
arch/tile/include/asm/user.h
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_USER_H
|
||||
#define _ASM_TILE_USER_H
|
||||
|
||||
/* This header is for a.out file formats, which TILE does not support. */
|
||||
|
||||
#endif /* _ASM_TILE_USER_H */
|
||||
55
arch/tile/include/asm/vdso.h
Normal file
55
arch/tile/include/asm/vdso.h
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __TILE_VDSO_H__
|
||||
#define __TILE_VDSO_H__
|
||||
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Note about the vdso_data structure:
|
||||
*
|
||||
* NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
|
||||
* structure is supposed to be known only to the function in the vdso
|
||||
* itself and may change without notice.
|
||||
*/
|
||||
|
||||
struct vdso_data {
|
||||
seqcount_t tz_seq; /* Timezone seqlock */
|
||||
seqcount_t tb_seq; /* Timebase seqlock */
|
||||
__u64 cycle_last; /* TOD clock for xtime */
|
||||
__u64 mask; /* Cycle mask */
|
||||
__u32 mult; /* Cycle to nanosecond multiplier */
|
||||
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
|
||||
__u64 wall_time_sec;
|
||||
__u64 wall_time_snsec;
|
||||
__u64 monotonic_time_sec;
|
||||
__u64 monotonic_time_snsec;
|
||||
__u64 wall_time_coarse_sec;
|
||||
__u64 wall_time_coarse_nsec;
|
||||
__u64 monotonic_time_coarse_sec;
|
||||
__u64 monotonic_time_coarse_nsec;
|
||||
__u32 tz_minuteswest; /* Minutes west of Greenwich */
|
||||
__u32 tz_dsttime; /* Type of dst correction */
|
||||
};
|
||||
|
||||
extern struct vdso_data *vdso_data;
|
||||
|
||||
/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
|
||||
extern void __vdso_rt_sigreturn(void);
|
||||
|
||||
extern int setup_vdso_pages(void);
|
||||
|
||||
#endif /* __TILE_VDSO_H__ */
|
||||
39
arch/tile/include/asm/vga.h
Normal file
39
arch/tile/include/asm/vga.h
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Access to VGA videoram.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_VGA_H
|
||||
#define _ASM_TILE_VGA_H
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
#define VT_BUF_HAVE_RW
|
||||
|
||||
static inline void scr_writew(u16 val, volatile u16 *addr)
|
||||
{
|
||||
__raw_writew(val, (volatile u16 __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline u16 scr_readw(volatile const u16 *addr)
|
||||
{
|
||||
return __raw_readw((volatile const u16 __iomem *) addr);
|
||||
}
|
||||
|
||||
#define vga_readb(a) readb((u8 __iomem *)(a))
|
||||
#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
|
||||
|
||||
#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s))
|
||||
|
||||
#endif
|
||||
Loading…
Add table
Add a link
Reference in a new issue