Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,37 @@
#ifndef _4LEVEL_FIXUP_H
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
#define __PAGETABLE_PUD_FOLDED
#define PUD_SIZE PGDIR_SIZE
#define PUD_MASK PGDIR_MASK
#define PTRS_PER_PUD 1
#define pud_t pgd_t
#define pmd_alloc(mm, pud, address) \
((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
NULL: pmd_offset(pud, address))
#define pud_alloc(mm, pgd, address) (pgd)
#define pud_offset(pgd, start) (pgd)
#define pud_none(pud) 0
#define pud_bad(pud) 0
#define pud_present(pud) 1
#define pud_ERROR(pud) do { } while (0)
#define pud_clear(pud) pgd_clear(pud)
#define pud_val(pud) pgd_val(pud)
#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
#define pud_page(pud) pgd_page(pud)
#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
#undef pud_free_tlb
#define pud_free_tlb(tlb, x, addr) do { } while (0)
#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x, addr) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
#endif

View file

@ -0,0 +1 @@
include include/uapi/asm-generic/Kbuild.asm

View file

@ -0,0 +1,258 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
* Christoph Lameter
*
* Allows to provide arch independent atomic definitions without the need to
* edit all arch specific atomic.h files.
*/
#include <asm/types.h>
/*
* Suppport for atomic_long_t
*
* Casts for parameters are avoided for existing atomic functions in order to
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
* macros of a platform may have.
*/
#if BITS_PER_LONG == 64
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return atomic64_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic64_xchg((atomic64_t *)(v), (new)))
#else /* BITS_PER_LONG == 64 */
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
atomic_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(i, v);
}
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_sub_and_test(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_dec_and_test(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_inc_and_test(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return atomic_add_negative(i, v);
}
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_return(i, v);
}
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_sub_return(i, v);
}
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_inc_return(v);
}
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_dec_return(v);
}
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
#endif /* BITS_PER_LONG == 64 */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */

View file

@ -0,0 +1,187 @@
/*
* Generic C implementation of atomic counter operations. Usable on
* UP systems only. Do not include in machine independent code.
*
* Originally implemented for MN10300.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
/*
* atomic_$op() - $op integer to atomic variable
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
* smp_mb__{before,after}_atomic().
*/
/*
* atomic_$op_return() - $op interer to atomic variable and returns the result
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does imply a full memory barrier.
*/
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#else
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = (v->counter = v->counter c_op i); \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif /* CONFIG_SMP */
#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
#endif
#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_clear_mask
ATOMIC_OP(and, &)
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
#ifndef atomic_set_mask
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(or, |)
#define atomic_set_mask(i, v) atomic_or((i), (v))
#endif
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#ifndef atomic_read
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#endif
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
#include <linux/irqflags.h>
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
}
static inline void atomic_dec(atomic_t *v)
{
atomic_sub_return(1, v);
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old;
return c;
}
#endif /* __ASM_GENERIC_ATOMIC_H */

View file

@ -0,0 +1,54 @@
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
typedef struct {
long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
#define ATOMIC64_OP(op) \
extern void atomic64_##op(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#endif /* _ASM_GENERIC_ATOMIC64_H */

View file

@ -0,0 +1,32 @@
#ifdef __NR_chmod
__NR_chmod,
#endif
__NR_fchmod,
#ifdef __NR_chown
__NR_chown,
__NR_lchown,
#endif
#ifdef __NR_fchown
__NR_fchown,
#endif
__NR_setxattr,
__NR_lsetxattr,
__NR_fsetxattr,
__NR_removexattr,
__NR_lremovexattr,
__NR_fremovexattr,
#ifdef __NR_fchownat
__NR_fchownat,
__NR_fchmodat,
#endif
#ifdef __NR_chown32
__NR_chown32,
__NR_fchown32,
__NR_lchown32,
#endif
#ifdef __NR_link
__NR_link,
#endif
#ifdef __NR_linkat
__NR_linkat,
#endif

View file

@ -0,0 +1,32 @@
#ifdef __NR_rename
__NR_rename,
#endif
#ifdef __NR_mkdir
__NR_mkdir,
#endif
#ifdef __NR_rmdir
__NR_rmdir,
#endif
#ifdef __NR_creat
__NR_creat,
#endif
#ifdef __NR_link
__NR_link,
#endif
#ifdef __NR_unlink
__NR_unlink,
#endif
#ifdef __NR_symlink
__NR_symlink,
#endif
#ifdef __NR_mknod
__NR_mknod,
#endif
#ifdef __NR_mkdirat
__NR_mkdirat,
__NR_mknodat,
__NR_unlinkat,
__NR_renameat,
__NR_linkat,
__NR_symlinkat,
#endif

View file

@ -0,0 +1,13 @@
#ifdef __NR_readlink
__NR_readlink,
#endif
__NR_quotactl,
__NR_listxattr,
__NR_llistxattr,
__NR_flistxattr,
__NR_getxattr,
__NR_lgetxattr,
__NR_fgetxattr,
#ifdef __NR_readlinkat
__NR_readlinkat,
#endif

View file

@ -0,0 +1,3 @@
__NR_kill,
__NR_tgkill,
__NR_tkill,

View file

@ -0,0 +1,21 @@
#include <asm-generic/audit_dir_write.h>
__NR_acct,
#ifdef __NR_swapon
__NR_swapon,
#endif
__NR_quotactl,
#ifdef __NR_truncate
__NR_truncate,
#endif
#ifdef __NR_truncate64
__NR_truncate64,
#endif
#ifdef __NR_ftruncate
__NR_ftruncate,
#endif
#ifdef __NR_ftruncate64
__NR_ftruncate64,
#endif
#ifdef __NR_bind
__NR_bind, /* bind can affect fs object only in one way... */
#endif

View file

@ -0,0 +1,89 @@
/*
* Generic barrier definitions, originally based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_BARRIER_H
#define __ASM_GENERIC_BARRIER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#ifndef nop
#define nop() asm volatile ("nop")
#endif
/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
* Fall back to compiler barriers if nothing better is provided.
*/
#ifndef mb
#define mb() barrier()
#endif
#ifndef rmb
#define rmb() mb()
#endif
#ifndef wmb
#define wmb() mb()
#endif
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif
#ifndef set_mb
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic() smp_mb()
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic() smp_mb()
#endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */

View file

@ -0,0 +1,37 @@
#ifndef __ASM_GENERIC_BITOPS_H
#define __ASM_GENERIC_BITOPS_H
/*
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents. You should
* recode these in the native assembly language, if at all possible.
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
#include <linux/irqflags.h>
#include <linux/compiler.h>
#include <asm/barrier.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __ASM_GENERIC_BITOPS_H */

View file

@ -0,0 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS___FFS_H_
#define _ASM_GENERIC_BITOPS___FFS_H_
#include <asm/types.h>
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __ffs(unsigned long word)
{
int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}
#endif
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */

View file

@ -0,0 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS___FLS_H_
#define _ASM_GENERIC_BITOPS___FLS_H_
#include <asm/types.h>
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
{
int num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}
#endif
if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (BITS_PER_LONG-1))))
num -= 1;
return num;
}
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */

View file

@ -0,0 +1,25 @@
#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
#include <asm/types.h>
static inline unsigned int __arch_hweight32(unsigned int w)
{
return __sw_hweight32(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __sw_hweight16(w);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __sw_hweight8(w);
}
static inline unsigned long __arch_hweight64(__u64 w)
{
return __sw_hweight64(w);
}
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View file

@ -0,0 +1,189 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
#include <asm/types.h>
#include <linux/irqflags.h>
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h> /* we use L1_CACHE_BYTES */
/* Use an array of spinlocks for our atomic_ts.
* Hash function to index into a different SPINLOCK.
* Since "a" is usually an address, use one spinlock per cacheline.
*/
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
#else
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
/*
* NMI events can occur at any time, including when interrupts have been
* disabled by *_irqsave(). So you can get NMI events occurring while a
* *_bit function is holding a spin lock. If the NMI handler also wants
* to do bit manipulation (and they do) then you can get a deadlock
* between the original caller of *_bit() and the NMI handler.
*
* by Keith Owens
*/
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p |= mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p &= ~mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered. It may be
* reordered on other architectures than x86.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
*p ^= mask;
_atomic_spin_unlock_irqrestore(p, flags);
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It may be reordered on other architectures than x86.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old | mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old & ~mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
unsigned long flags;
_atomic_spin_lock_irqsave(p, flags);
old = *p;
*p = old ^ mask;
_atomic_spin_unlock_irqrestore(p, flags);
return (old & mask) != 0;
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */

View file

@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
#endif

View file

@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
#endif

View file

@ -0,0 +1,17 @@
#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
/**
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static __always_inline int ffs(int x)
{
return __builtin_ffs(x);
}
#endif

View file

@ -0,0 +1,16 @@
#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __always_inline int fls(int x)
{
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
#endif

View file

@ -0,0 +1,43 @@
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
/*
* Compile time versions of __arch_hweightN()
*/
#define __const_hweight8(w) \
((unsigned int) \
((!!((w) & (1ULL << 0))) + \
(!!((w) & (1ULL << 1))) + \
(!!((w) & (1ULL << 2))) + \
(!!((w) & (1ULL << 3))) + \
(!!((w) & (1ULL << 4))) + \
(!!((w) & (1ULL << 5))) + \
(!!((w) & (1ULL << 6))) + \
(!!((w) & (1ULL << 7)))))
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
/*
* Generic interface.
*/
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
/*
* Interface for known constant arguments
*/
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
/*
* Type invariant interface to the compile time constant hweight functions.
*/
#define HWEIGHT(w) HWEIGHT64((u64)w)
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */

View file

@ -0,0 +1,57 @@
/* Count leading and trailing zeros functions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
#define _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
#include <asm/bitops.h>
/**
* count_leading_zeros - Count the number of zeros from the MSB back
* @x: The value
*
* Count the number of leading zeros from the MSB going towards the LSB in @x.
*
* If the MSB of @x is set, the result is 0.
* If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
* If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
*/
static inline int count_leading_zeros(unsigned long x)
{
if (sizeof(x) == 4)
return BITS_PER_LONG - fls(x);
else
return BITS_PER_LONG - fls64(x);
}
#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
/**
* count_trailing_zeros - Count the number of zeros from the LSB forwards
* @x: The value
*
* Count the number of trailing zeros from the LSB going towards the MSB in @x.
*
* If the LSB of @x is set, the result is 0.
* If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
* If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
*/
static inline int count_trailing_zeros(unsigned long x)
{
#define COUNT_TRAILING_ZEROS_0 (-1)
if (sizeof(x) == 4)
return ffs(x);
else
return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
}
#endif /* _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ */

View file

@ -0,0 +1,11 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
/*
* Atomic bitops based version of ext2 atomic bitops
*/
#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */

View file

@ -0,0 +1,26 @@
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
/*
* Spinlock based version of ext2 atomic bitops
*/
#define ext2_set_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = __test_and_set_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
#define ext2_clear_bit_atomic(lock, nr, addr) \
({ \
int ret; \
spin_lock(lock); \
ret = __test_and_clear_bit_le(nr, addr); \
spin_unlock(lock); \
ret; \
})
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */

View file

@ -0,0 +1,41 @@
#ifndef _ASM_GENERIC_BITOPS_FFS_H_
#define _ASM_GENERIC_BITOPS_FFS_H_
/**
* ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int r = 1;
if (!x)
return 0;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */

View file

@ -0,0 +1,12 @@
#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
#define _ASM_GENERIC_BITOPS_FFZ_H_
/*
* ffz - find first zero in word.
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
#define ffz(x) __ffs(~(x))
#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */

View file

@ -0,0 +1,62 @@
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
extern unsigned long find_first_bit(const unsigned long *addr,
unsigned long size);
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */

View file

@ -0,0 +1,41 @@
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __always_inline int fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */

View file

@ -0,0 +1,36 @@
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
#include <asm/types.h>
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#if BITS_PER_LONG == 32
static __always_inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
#else
#error BITS_PER_LONG not 32 or 64
#endif
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */

View file

@ -0,0 +1,7 @@
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
#include <asm-generic/bitops/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */

View file

@ -0,0 +1,97 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
#include <asm/types.h>
#include <asm/byteorder.h>
#if defined(__LITTLE_ENDIAN)
#define BITOP_LE_SWIZZLE 0
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
static inline unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
static inline unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
#ifndef find_next_zero_bit_le
extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif
#ifndef find_next_bit_le
extern unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif
#ifndef find_first_zero_bit_le
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
static inline int test_bit_le(int nr, const void *addr)
{
return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline void set_bit_le(int nr, void *addr)
{
set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline void clear_bit_le(int nr, void *addr)
{
clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline void __clear_bit_le(int nr, void *addr)
{
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline int test_and_set_bit_le(int nr, void *addr)
{
return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline int test_and_clear_bit_le(int nr, void *addr)
{
return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline int __test_and_set_bit_le(int nr, void *addr)
{
return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline int __test_and_clear_bit_le(int nr, void *addr)
{
return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */

View file

@ -0,0 +1,45 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
#define clear_bit_unlock(nr, addr) \
do { \
smp_mb__before_atomic(); \
clear_bit(nr, addr); \
} while (0)
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is like clear_bit_unlock, however it is not atomic.
* It does provide release barrier semantics so it can be used to unlock
* a bit lock, however it would only be used if no other CPU can modify
* any bits in the memory until the lock is released (a good example is
* if the bit lock itself protects access to the other bits in the word).
*/
#define __clear_bit_unlock(nr, addr) \
do { \
smp_mb(); \
__clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */

View file

@ -0,0 +1,108 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h>
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */

View file

@ -0,0 +1,31 @@
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
#define _ASM_GENERIC_BITOPS_SCHED_H_
#include <linux/compiler.h> /* unlikely() */
#include <asm/types.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 100-bit bitmap. It's guaranteed that at least
* one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
if (b[0])
return __ffs(b[0]);
if (b[1])
return __ffs(b[1]) + 32;
if (b[2])
return __ffs(b[2]) + 64;
return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
}
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */

View file

@ -0,0 +1,25 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
#include <uapi/asm-generic/bitsperlong.h>
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif /* CONFIG_64BIT */
/*
* FIXME: The check currently breaks x86-64 build, so it's
* temporarily disabled. Please fix x86-64 and reenable
*/
#if 0 && BITS_PER_LONG != __BITS_PER_LONG
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
#ifndef BITS_PER_LONG_LONG
#define BITS_PER_LONG_LONG 64
#endif
#endif /* __ASM_GENERIC_BITS_PER_LONG */

211
include/asm-generic/bug.h Normal file
View file

@ -0,0 +1,211 @@
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
#ifdef CONFIG_GENERIC_BUG
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
signed int bug_addr_disp;
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
const char *file;
#else
signed int file_disp;
#endif
unsigned short line;
#endif
unsigned short flags;
};
#endif /* CONFIG_GENERIC_BUG */
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
* example might be detecting data structure corruption in the middle
* of an operation that can't be backed out of. If the (sub)system
* can somehow continue operating, perhaps with reduced functionality,
* it's probably not BUG-worthy.
*
* If you're tempted to BUG(), think again: is completely giving up
* really the *only* solution? There are usually better options, where
* users don't need to reboot ASAP and can mostly shut down cleanly.
*/
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
panic("BUG!"); \
} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant issues that need prompt attention if they should ever
* appear at runtime. Use the versions with printk format strings
* to provide better diagnostics.
*/
#ifndef __WARN_TAINT
extern __printf(3, 4)
void warn_slowpath_fmt(const char *file, const int line,
const char *fmt, ...);
extern __printf(4, 5)
void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
const char *fmt, ...);
extern void warn_slowpath_null(const char *file, const int line);
#define WANT_WARN_ON_SLOWPATH
#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
#define __WARN_printf_taint(taint, arg...) \
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
#define __WARN() __WARN_TAINT(TAINT_WARN)
#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#define __WARN_printf_taint(taint, arg...) \
do { printk(arg); __WARN_TAINT(taint); } while (0)
#endif
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
#endif
#ifndef WARN
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
unlikely(__ret_warn_on); \
})
#endif
#define WARN_TAINT(condition, taint, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf_taint(taint, format); \
unlikely(__ret_warn_on); \
})
#define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
__warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
__warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_TAINT(!__warned, taint, format)) \
__warned = true; \
unlikely(__ret_warn_once); \
})
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
#define BUG() do {} while (1)
#endif
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (condition) ; } while (0)
#endif
#ifndef HAVE_ARCH_WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
#endif
#ifndef WARN
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
no_printk(format); \
unlikely(__ret_warn_on); \
})
#endif
#define WARN_ON_ONCE(condition) WARN_ON(condition)
#define WARN_ONCE(condition, format...) WARN(condition, format)
#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
#endif
/*
* WARN_ON_SMP() is for cases that the warning is either
* meaningless for !SMP or may even cause failures.
* This is usually used for cases that we have
* WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
* returns 0 for uniprocessor settings.
* It can also be used with values that are only defined
* on SMP:
*
* struct foo {
* [...]
* #ifdef CONFIG_SMP
* int bar;
* #endif
* };
*
* void func(struct foo *zoot)
* {
* WARN_ON_SMP(!zoot->bar);
*
* For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
* and should be a nop and return false for uniprocessor.
*
* if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
* and x is true.
*/
#ifdef CONFIG_SMP
# define WARN_ON_SMP(x) WARN_ON(x)
#else
/*
* Use of ({0;}) because WARN_ON_SMP(x) may be used either as
* a stand alone line statement or as a condition in an if ()
* statement.
* A simple "0" would cause gcc to give a "statement has no effect"
* warning.
*/
# define WARN_ON_SMP(x) ({0;})
#endif
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,10 @@
#ifndef __ASM_GENERIC_BUGS_H
#define __ASM_GENERIC_BUGS_H
/*
* This file is included by 'init/main.c' to check for
* architecture-dependent bugs.
*/
static inline void check_bugs(void) { }
#endif /* __ASM_GENERIC_BUGS_H */

View file

@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_CACHE_H
#define __ASM_GENERIC_CACHE_H
/*
* 32 bytes appears to be the most common cache line size,
* so make that the default here. Architectures with larger
* cache lines need to provide their own cache.h.
*/
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif /* __ASM_GENERIC_CACHE_H */

View file

@ -0,0 +1,34 @@
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __ASM_CACHEFLUSH_H */

View file

@ -0,0 +1,87 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
/*
* the same as csum_partial_copy, but copies from user space.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err);
#ifndef csum_partial_copy_nocheck
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy((src), (dst), (len), (sum))
#endif
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
#endif
#ifndef csum_fold
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
#endif
#ifndef csum_tcpudp_nofold
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
extern __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum);
#endif
#ifndef csum_tcpudp_magic
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
#endif
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
extern __sum16 ip_compute_csum(const void *buff, int len);
#endif /* __ASM_GENERIC_CHECKSUM_H */

View file

@ -0,0 +1,30 @@
/*
* include/asm-generic/clkdev.h
*
* Based on the ARM clkdev.h:
* Copyright (C) 2008 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __ASM_CLKDEV_H
#define __ASM_CLKDEV_H
#include <linux/slab.h>
#ifndef CONFIG_COMMON_CLK
struct clk;
static inline int __clk_get(struct clk *clk) { return 1; }
static inline void __clk_put(struct clk *clk) { }
#endif
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
#endif

View file

@ -0,0 +1,67 @@
#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
#define __ASM_GENERIC_CMPXCHG_LOCAL_H
#include <linux/types.h>
#include <linux/irqflags.h>
extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
__noreturn;
/*
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
/*
* Sanity checking, compile-time.
*/
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
raw_local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = (u8)new;
break;
case 2: prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = (u16)new;
break;
case 4: prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = (u32)new;
break;
case 8: prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = (u64)new;
break;
default:
wrong_size_cmpxchg(ptr);
}
raw_local_irq_restore(flags);
return prev;
}
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
unsigned long flags;
raw_local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
raw_local_irq_restore(flags);
return prev;
}
#endif

View file

@ -0,0 +1,108 @@
/*
* Generic UP xchg and cmpxchg using interrupt disablement. Does not
* support SMP.
*/
#ifndef __ASM_GENERIC_CMPXCHG_H
#define __ASM_GENERIC_CMPXCHG_H
#ifdef CONFIG_SMP
#error "Cannot use generic cmpxchg on SMP"
#endif
#include <linux/types.h>
#include <linux/irqflags.h>
#ifndef xchg
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
extern void __xchg_called_with_bad_pointer(void);
static inline
unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
switch (size) {
case 1:
#ifdef __xchg_u8
return __xchg_u8(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u8 *)ptr;
*(volatile u8 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u8 */
case 2:
#ifdef __xchg_u16
return __xchg_u16(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u16 *)ptr;
*(volatile u16 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u16 */
case 4:
#ifdef __xchg_u32
return __xchg_u32(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u32 *)ptr;
*(volatile u32 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u32 */
#ifdef CONFIG_64BIT
case 8:
#ifdef __xchg_u64
return __xchg_u64(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u64 *)ptr;
*(volatile u64 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u64 */
#endif /* CONFIG_64BIT */
default:
__xchg_called_with_bad_pointer();
return x;
}
}
#define xchg(ptr, x) \
((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#endif /* xchg */
/*
* Atomic compare and exchange.
*
* Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
* a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
#ifndef cmpxchg_local
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#endif
#ifndef cmpxchg64_local
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* __ASM_GENERIC_CMPXCHG_H */

View file

@ -0,0 +1,15 @@
#ifndef _ASM_GENERIC_CPUTIME_H
#define _ASM_GENERIC_CPUTIME_H
#include <linux/time.h>
#include <linux/jiffies.h>
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
# include <asm-generic/cputime_jiffies.h>
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# include <asm-generic/cputime_nsecs.h>
#endif
#endif

View file

@ -0,0 +1,76 @@
#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
#define _ASM_GENERIC_CPUTIME_JIFFIES_H
typedef unsigned long __nocast cputime_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
typedef u64 __nocast cputime64_t;
#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
/*
* Convert nanoseconds <-> cputime
*/
#define cputime_to_nsecs(__ct) \
jiffies_to_nsecs(cputime_to_jiffies(__ct))
#define nsecs_to_cputime64(__nsec) \
jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
#define nsecs_to_cputime(__nsec) \
jiffies_to_cputime(nsecs_to_jiffies(__nsec))
/*
* Convert cputime to microseconds and back.
*/
#define cputime_to_usecs(__ct) \
jiffies_to_usecs(cputime_to_jiffies(__ct))
#define usecs_to_cputime(__usec) \
jiffies_to_cputime(usecs_to_jiffies(__usec))
#define usecs_to_cputime64(__usec) \
jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
*/
#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/*
* Convert cputime to timespec and back.
*/
#define timespec_to_cputime(__val) \
jiffies_to_cputime(timespec_to_jiffies(__val))
#define cputime_to_timespec(__ct,__val) \
jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to timeval and back.
*/
#define timeval_to_cputime(__val) \
jiffies_to_cputime(timeval_to_jiffies(__val))
#define cputime_to_timeval(__ct,__val) \
jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to clock and back.
*/
#define cputime_to_clock_t(__ct) \
jiffies_to_clock_t(cputime_to_jiffies(__ct))
#define clock_t_to_cputime(__x) \
jiffies_to_cputime(clock_t_to_jiffies(__x))
/*
* Convert cputime64 to clock.
*/
#define cputime64_to_clock_t(__ct) \
jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif

View file

@ -0,0 +1,119 @@
/*
* Definitions for measuring cputime in nsecs resolution.
*
* Based on <arch/ia64/include/asm/cputime.h>
*
* Copyright (C) 2007 FUJITSU LIMITED
* Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
#define _ASM_GENERIC_CPUTIME_NSECS_H
#include <linux/math64.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
#define cputime_div_rem(__ct, divisor, remainder) \
div_u64_rem((__force u64)__ct, divisor, remainder);
/*
* Convert cputime <-> jiffies (HZ)
*/
#define cputime_to_jiffies(__ct) \
cputime_div(__ct, NSEC_PER_SEC / HZ)
#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__jif) \
(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) \
cputime_div(__ct, NSEC_PER_SEC / HZ)
#define jiffies64_to_cputime64(__jif) \
(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
/*
* Convert cputime <-> nanoseconds
*/
#define cputime_to_nsecs(__ct) \
(__force u64)(__ct)
#define nsecs_to_cputime(__nsecs) \
(__force cputime_t)(__nsecs)
/*
* Convert cputime <-> microseconds
*/
#define cputime_to_usecs(__ct) \
cputime_div(__ct, NSEC_PER_USEC)
#define usecs_to_cputime(__usecs) \
(__force cputime_t)((__usecs) * NSEC_PER_USEC)
#define usecs_to_cputime64(__usecs) \
(__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
*/
#define cputime_to_secs(__ct) \
cputime_div(__ct, NSEC_PER_SEC)
#define secs_to_cputime(__secs) \
(__force cputime_t)((__secs) * NSEC_PER_SEC)
/*
* Convert cputime <-> timespec (nsec)
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{
u32 rem;
val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
val->tv_nsec = rem;
}
/*
* Convert cputime <-> timeval (msec)
*/
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{
u32 rem;
val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
val->tv_usec = rem / NSEC_PER_USEC;
}
/*
* Convert cputime <-> clock (USER_HZ)
*/
#define cputime_to_clock_t(__ct) \
cputime_div(__ct, (NSEC_PER_SEC / USER_HZ))
#define clock_t_to_cputime(__x) \
(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
/*
* Convert cputime64 to clock.
*/
#define cputime64_to_clock_t(__ct) \
cputime_to_clock_t((__force cputime_t)__ct)
#endif

View file

@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
#endif /* __ASM_GENERIC_CURRENT_H */

View file

@ -0,0 +1,44 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/*
* The weird n/20000 thing suppresses a "comparison is always false due to
* limited range of data type" warning with non-const 8-bit arguments.
*/
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) \
({ \
if (__builtin_constant_p(n)) { \
if ((n) / 20000 >= 1) \
__bad_udelay(); \
else \
__const_udelay((n) * 0x10c7ul); \
} else { \
__udelay(n); \
} \
})
/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) \
({ \
if (__builtin_constant_p(n)) { \
if ((n) / 20000 >= 1) \
__bad_ndelay(); \
else \
__const_udelay((n) * 5ul); \
} else { \
__ndelay(n); \
} \
})
#endif /* __ASM_GENERIC_DELAY_H */

View file

@ -0,0 +1,15 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_GENERIC_DEVICE_H
#define _ASM_GENERIC_DEVICE_H
struct dev_archdata {
};
struct pdev_archdata {
};
#endif /* _ASM_GENERIC_DEVICE_H */

View file

@ -0,0 +1,58 @@
#ifndef _ASM_GENERIC_DIV64_H
#define _ASM_GENERIC_DIV64_H
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
* Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
* return remainder;
* }
*
* NOTE: macro parameter n is evaluated multiple times,
* beware of side effects!
*/
#include <linux/types.h>
#include <linux/compiler.h>
#if BITS_PER_LONG == 64
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
*/
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
#endif /* BITS_PER_LONG */
#endif /* _ASM_GENERIC_DIV64_H */

View file

@ -0,0 +1,32 @@
#ifndef DMA_COHERENT_H
#define DMA_COHERENT_H
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
* These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
/*
* Standard interface
*/
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags);
void dma_release_declared_memory(struct device *dev);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
#else
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
#define dma_release_from_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
#endif
#endif

View file

@ -0,0 +1,9 @@
#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
#define _ASM_GENERIC_DMA_CONTIGUOUS_H
#include <linux/types.h>
static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
#endif

View file

@ -0,0 +1,95 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
/* define the dma api to allow compilation but not linking of
* dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig
*/
struct scatterlist;
extern void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag);
extern void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
/* attrs is not supported and ignored */
return dma_alloc_coherent(dev, size, dma_handle, flag);
}
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
/* attrs is not supported and ignored */
dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction);
extern void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction);
extern int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
extern void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction);
extern dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction);
extern void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction);
extern void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction);
#define dma_sync_single_for_device dma_sync_single_for_cpu
#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
extern int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int
dma_supported(struct device *dev, u64 mask);
extern int
dma_set_mask(struct device *dev, u64 mask);
extern int
dma_get_cache_alignment(void);
extern void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#endif /* _ASM_GENERIC_DMA_MAPPING_H */

View file

@ -0,0 +1,237 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
#include <linux/kmemcheck.h>
#include <linux/bug.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
return addr;
}
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
for_each_sg(sg, s, nents, i)
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
}
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, NULL);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
#define dma_map_sg_attr(d, s, n, r, a) dma_map_sg_attrs(d, s, n, r, a)
#define dma_unmap_sg_attr(d, s, n, r, a) dma_unmap_sg_attrs(d, s, n, r, a)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
* @handle: device-view address returned from dma_alloc_attrs
* @size: size of memory originally requested in dma_alloc_attrs
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
*
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
#endif

15
include/asm-generic/dma.h Normal file
View file

@ -0,0 +1,15 @@
#ifndef __ASM_GENERIC_DMA_H
#define __ASM_GENERIC_DMA_H
/*
* This file traditionally describes the i8237 PC style DMA controller.
* Most architectures don't have these any more and can get the minimal
* implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
*
* Some code relies on seeing MAX_DMA_ADDRESS though.
*/
#define MAX_DMA_ADDRESS PAGE_OFFSET
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
#endif /* __ASM_GENERIC_DMA_H */

View file

@ -0,0 +1,42 @@
#ifndef _ASM_EARLY_IOREMAP_H_
#define _ASM_EARLY_IOREMAP_H_
#include <linux/types.h>
/*
* early_ioremap() and early_iounmap() are for temporary early boot-time
* mappings, before the real ioremap() is functional.
*/
extern void __iomem *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
/*
* Weak function called by early_ioremap_reset(). It does nothing, but
* architectures may provide their own version to do any needed cleanups.
*/
extern void early_ioremap_shutdown(void);
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
/* Generic initialization called by architecture code */
extern void early_ioremap_setup(void);
/*
* Called as last step in paging_init() so library can act
* accordingly for subsequent map/unmap requests.
*/
extern void early_ioremap_reset(void);
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
static inline void early_ioremap_reset(void) { }
#endif
#endif /* _ASM_EARLY_IOREMAP_H_ */

View file

@ -0,0 +1,9 @@
#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
#define _ASM_GENERIC_EMERGENCY_RESTART_H
static inline void machine_emergency_restart(void)
{
machine_restart(NULL);
}
#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */

View file

@ -0,0 +1,19 @@
/* Generic process execution definitions, based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_EXEC_H
#define __ASM_GENERIC_EXEC_H
#define arch_align_stack(x) (x)
#endif /* __ASM_GENERIC_EXEC_H */

12
include/asm-generic/fb.h Normal file
View file

@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_FB_H_
#define __ASM_GENERIC_FB_H_
#include <linux/fb.h>
#define fb_pgprotect(...) do {} while (0)
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* __ASM_GENERIC_FB_H_ */

View file

@ -0,0 +1,100 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
* Break out common bits to asm-generic by Mark Salter, November 2013
*/
#ifndef __ASM_GENERIC_FIXMAP_H
#define __ASM_GENERIC_FIXMAP_H
#include <linux/bug.h>
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
#ifndef __ASSEMBLY__
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
/*
* Provide some reasonable defaults for page flags.
* Not all architectures use all of these different types and some
* architectures use different names.
*/
#ifndef FIXMAP_PAGE_NORMAL
#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
#endif
#ifndef FIXMAP_PAGE_NOCACHE
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#endif
#ifndef FIXMAP_PAGE_IO
#define FIXMAP_PAGE_IO PAGE_KERNEL_IO
#endif
#ifndef FIXMAP_PAGE_CLEAR
#define FIXMAP_PAGE_CLEAR __pgprot(0)
#endif
#ifndef set_fixmap
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL)
#endif
#ifndef clear_fixmap
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR)
#endif
/* Return a pointer with offset calculated */
#define __set_fixmap_offset(idx, phys, flags) \
({ \
unsigned long addr; \
__set_fixmap(idx, phys, flags); \
addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
addr; \
})
#define set_fixmap_offset(idx, phys) \
__set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE)
#define set_fixmap_offset_nocache(idx, phys) \
__set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE)
/*
* Some fixmaps are for IO
*/
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
#define set_fixmap_offset_io(idx, phys) \
__set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */

View file

@ -0,0 +1,16 @@
/*
* linux/include/asm-generic/ftrace.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_GENERIC_FTRACE_H__
#define __ASM_GENERIC_FTRACE_H__
/*
* Not all architectures need their own ftrace.h, the most
* common definitions are already in linux/ftrace.h.
*/
#endif /* __ASM_GENERIC_FTRACE_H__ */

View file

@ -0,0 +1,57 @@
#ifndef _ASM_GENERIC_FUTEX_H
#define _ASM_GENERIC_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
case FUTEX_OP_ADD:
case FUTEX_OP_OR:
case FUTEX_OP_ANDN:
case FUTEX_OP_XOR:
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return -ENOSYS;
}
#endif

View file

@ -0,0 +1,61 @@
#ifndef __ASM_GENERIC_GETORDER_H
#define __ASM_GENERIC_GETORDER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/log2.h>
/*
* Runtime evaluation of get_order()
*/
static inline __attribute_const__
int __get_order(unsigned long size)
{
int order;
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
order = fls(size);
#else
order = fls64(size);
#endif
return order;
}
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
*
* Determine the allocation order of a particular sized block of memory. This
* is on a logarithmic scale, where:
*
* 0 -> 2^0 * PAGE_SIZE and below
* 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
* 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
* 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
* 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
* ...
*
* The order returned is used to find the smallest allocation granule required
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
*
* This function may be used to initialise variables with compile time
* evaluations of constants.
*/
#define get_order(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \
ilog2((n) - 1) - PAGE_SHIFT + 1) \
) : \
__get_order(n) \
)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_GETORDER_H */

220
include/asm-generic/gpio.h Normal file
View file

@ -0,0 +1,220 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#ifdef CONFIG_GPIOLIB
#include <linux/compiler.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
/* Platforms may implement their GPIO interface with library code,
* at a small performance cost for non-inlined operations and some
* extra memory (for code and for per-GPIO table entries).
*
* While the GPIO programming interface defines valid GPIO numbers
* to be in the range 0..MAX_INT, this library restricts them to the
* smaller range 0..ARCH_NR_GPIOS-1.
*
* ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
* builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
* actually an estimate of a board-specific value.
*/
#ifndef ARCH_NR_GPIOS
#define ARCH_NR_GPIOS 512
#endif
/*
* "valid" GPIO numbers are nonnegative and may be passed to
* setup routines like gpio_request(). only some valid numbers
* can successfully be requested and used.
*
* Invalid GPIO numbers are useful for indicating no-such-GPIO in
* platform data and other tables.
*/
static inline bool gpio_is_valid(int number)
{
return number >= 0 && number < ARCH_NR_GPIOS;
}
struct device;
struct gpio;
struct seq_file;
struct module;
struct device_node;
struct gpio_desc;
/* caller holds gpio_lock *OR* gpio is marked as requested */
static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpiod_to_chip(gpio_to_desc(gpio));
}
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
*/
extern int gpio_request(unsigned gpio, const char *label);
extern void gpio_free(unsigned gpio);
static inline int gpio_direction_input(unsigned gpio)
{
return gpiod_direction_input(gpio_to_desc(gpio));
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
}
static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
{
return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
}
static inline int gpio_get_value_cansleep(unsigned gpio)
{
return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
}
static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
* the GPIO is constant and refers to some always-present controller,
* giving direct access to chip registers and tight bitbanging loops.
*/
static inline int __gpio_get_value(unsigned gpio)
{
return gpiod_get_raw_value(gpio_to_desc(gpio));
}
static inline void __gpio_set_value(unsigned gpio, int value)
{
return gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
static inline int __gpio_cansleep(unsigned gpio)
{
return gpiod_cansleep(gpio_to_desc(gpio));
}
static inline int __gpio_to_irq(unsigned gpio)
{
return gpiod_to_irq(gpio_to_desc(gpio));
}
extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
extern int gpio_request_array(const struct gpio *array, size_t num);
extern void gpio_free_array(const struct gpio *array, size_t num);
/*
* A sysfs interface can be exported by individual drivers if they want,
* but more typically is configured entirely from userspace.
*/
static inline int gpio_export(unsigned gpio, bool direction_may_change)
{
return gpiod_export(gpio_to_desc(gpio), direction_may_change);
}
static inline int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
{
return gpiod_export_link(dev, name, gpio_to_desc(gpio));
}
static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
{
return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
}
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
}
#ifdef CONFIG_PINCTRL
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
* @head: list for maintaining set of pin ranges, used internally
* @pctldev: pinctrl device which handles corresponding pins
* @range: actual range of pins controlled by a gpio controller
*/
struct gpio_pin_range {
struct list_head node;
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range range;
};
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins);
int gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
#else
static inline int
gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
static inline int
gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
return 0;
}
static inline void
gpiochip_remove_pin_ranges(struct gpio_chip *chip)
{
}
#endif /* CONFIG_PINCTRL */
#else /* !CONFIG_GPIOLIB */
static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
return number >= 0;
}
/* platforms that don't directly support access to GPIOs through I2C, SPI,
* or other blocking infrastructure can use these wrappers.
*/
static inline int gpio_cansleep(unsigned gpio)
{
return 0;
}
static inline int gpio_get_value_cansleep(unsigned gpio)
{
might_sleep();
return __gpio_get_value(gpio);
}
static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
might_sleep();
__gpio_set_value(gpio, value);
}
#endif /* !CONFIG_GPIOLIB */
#endif /* _ASM_GENERIC_GPIO_H */

View file

@ -0,0 +1,21 @@
#ifndef __ASM_GENERIC_HARDIRQ_H
#define __ASM_GENERIC_HARDIRQ_H
#include <linux/cache.h>
#include <linux/threads.h>
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#include <linux/irq.h>
#ifndef ack_bad_irq
static inline void ack_bad_irq(unsigned int irq)
{
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
#endif
#endif /* __ASM_GENERIC_HARDIRQ_H */

View file

@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_HASH_H
#define __ASM_GENERIC_HASH_H
struct fast_hash_ops;
static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
{
}
#endif /* __ASM_GENERIC_HASH_H */

View file

@ -0,0 +1,40 @@
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
}
static inline unsigned long huge_pte_write(pte_t pte)
{
return pte_write(pte);
}
static inline unsigned long huge_pte_dirty(pte_t pte)
{
return pte_dirty(pte);
}
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
return pte_mkwrite(pte);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
return pte_mkdirty(pte);
}
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
return pte_modify(pte, newprot);
}
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_clear(mm, addr, ptep);
}
#endif /* _ASM_GENERIC_HUGETLB_H */

View file

@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_HW_IRQ_H
#define __ASM_GENERIC_HW_IRQ_H
/*
* hw_irq.h has internal declarations for the low-level interrupt
* controller, like the original i8259A.
* In general, this is not needed for new architectures.
*/
#endif /* __ASM_GENERIC_HW_IRQ_H */

View file

@ -0,0 +1,38 @@
/* Generic I/O and MEMIO string operations. */
#define __ide_insw insw
#define __ide_insl insl
#define __ide_outsw outsw
#define __ide_outsl outsl
static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u16 *)addr = readw(port);
addr += 2;
}
}
static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{
while (count--) {
*(u32 *)addr = readl(port);
addr += 4;
}
}
static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{
while (count--) {
writew(*(u16 *)addr, port);
addr += 2;
}
}
static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
{
while (count--) {
writel(*(u32 *)addr, port);
addr += 4;
}
}

View file

@ -0,0 +1,49 @@
/*
* asm-generic/int-ll64.h
*
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
#include <uapi/asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
#define S8_C(x) x
#define U8_C(x) x ## U
#define S16_C(x) x
#define U16_C(x) x ## U
#define S32_C(x) x
#define U32_C(x) x ## U
#define S64_C(x) x ## LL
#define U64_C(x) x ## ULL
#else /* __ASSEMBLY__ */
#define S8_C(x) x
#define U8_C(x) x
#define S16_C(x) x
#define U16_C(x) x
#define S32_C(x) x
#define U32_C(x) x
#define S64_C(x) x
#define U64_C(x) x
#endif /* __ASSEMBLY__ */
#endif /* _ASM_GENERIC_INT_LL64_H */

View file

@ -0,0 +1,32 @@
#ifndef _ASM_IO_64_NONATOMIC_HI_LO_H_
#define _ASM_IO_64_NONATOMIC_HI_LO_H_
#include <linux/io.h>
#include <asm-generic/int-ll64.h>
static inline __u64 hi_lo_readq(const volatile void __iomem *addr)
{
const volatile u32 __iomem *p = addr;
u32 low, high;
high = readl(p + 1);
low = readl(p);
return low + ((u64)high << 32);
}
static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
{
writel(val >> 32, addr + 4);
writel(val, addr);
}
#ifndef readq
#define readq hi_lo_readq
#endif
#ifndef writeq
#define writeq hi_lo_writeq
#endif
#endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */

View file

@ -0,0 +1,32 @@
#ifndef _ASM_IO_64_NONATOMIC_LO_HI_H_
#define _ASM_IO_64_NONATOMIC_LO_HI_H_
#include <linux/io.h>
#include <asm-generic/int-ll64.h>
static inline __u64 lo_hi_readq(const volatile void __iomem *addr)
{
const volatile u32 __iomem *p = addr;
u32 low, high;
low = readl(p);
high = readl(p + 1);
return low + ((u64)high << 32);
}
static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#ifndef readq
#define readq lo_hi_readq
#endif
#ifndef writeq
#define writeq lo_hi_writeq
#endif
#endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */

380
include/asm-generic/io.h Normal file
View file

@ -0,0 +1,380 @@
/* Generic I/O port emulation, based on MN10300 code
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __ASM_GENERIC_IO_H
#define __ASM_GENERIC_IO_H
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/types.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
#endif
#include <asm-generic/pci_iomap.h>
#ifndef mmiowb
#define mmiowb() do {} while (0)
#endif
/*****************************************************************************/
/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the simple architectures, we just read/write the
* memory location directly.
*/
#ifndef __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
return *(const volatile u8 __force *) addr;
}
#endif
#ifndef __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
return *(const volatile u16 __force *) addr;
}
#endif
#ifndef __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
return *(const volatile u32 __force *) addr;
}
#endif
#define readb __raw_readb
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{
return __le16_to_cpu(__raw_readw(addr));
}
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{
return __le32_to_cpu(__raw_readl(addr));
}
#ifndef __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
*(volatile u8 __force *) addr = b;
}
#endif
#ifndef __raw_writew
static inline void __raw_writew(u16 b, volatile void __iomem *addr)
{
*(volatile u16 __force *) addr = b;
}
#endif
#ifndef __raw_writel
static inline void __raw_writel(u32 b, volatile void __iomem *addr)
{
*(volatile u32 __force *) addr = b;
}
#endif
#define writeb __raw_writeb
#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
#endif
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{
return __le64_to_cpu(__raw_readq(addr));
}
#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
#endif
#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
#endif /* CONFIG_64BIT */
#ifndef PCI_IOBASE
#define PCI_IOBASE ((void __iomem *) 0)
#endif
/*****************************************************************************/
/*
* traditional input/output functions
*/
static inline u8 inb(unsigned long addr)
{
return readb(addr + PCI_IOBASE);
}
static inline u16 inw(unsigned long addr)
{
return readw(addr + PCI_IOBASE);
}
static inline u32 inl(unsigned long addr)
{
return readl(addr + PCI_IOBASE);
}
static inline void outb(u8 b, unsigned long addr)
{
writeb(b, addr + PCI_IOBASE);
}
static inline void outw(u16 b, unsigned long addr)
{
writew(b, addr + PCI_IOBASE);
}
static inline void outl(u32 b, unsigned long addr)
{
writel(b, addr + PCI_IOBASE);
}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
#define outb_p(x, addr) outb((x), (addr))
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
#ifndef insb
static inline void insb(unsigned long addr, void *buffer, int count)
{
if (count) {
u8 *buf = buffer;
do {
u8 x = __raw_readb(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
}
#endif
#ifndef insw
static inline void insw(unsigned long addr, void *buffer, int count)
{
if (count) {
u16 *buf = buffer;
do {
u16 x = __raw_readw(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
}
#endif
#ifndef insl
static inline void insl(unsigned long addr, void *buffer, int count)
{
if (count) {
u32 *buf = buffer;
do {
u32 x = __raw_readl(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
}
#endif
#ifndef outsb
static inline void outsb(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u8 *buf = buffer;
do {
__raw_writeb(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
#endif
#ifndef outsw
static inline void outsw(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u16 *buf = buffer;
do {
__raw_writew(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
#endif
#ifndef outsl
static inline void outsl(unsigned long addr, const void *buffer, int count)
{
if (count) {
const u32 *buf = buffer;
do {
__raw_writel(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
#endif
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
#define ioread32(addr) readl(addr)
#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
#define iowrite8(v, addr) writeb((v), (addr))
#define iowrite16(v, addr) writew((v), (addr))
#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
#define iowrite32(v, addr) writel((v), (addr))
#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
#define ioread8_rep(p, dst, count) \
insb((unsigned long) (p), (dst), (count))
#define ioread16_rep(p, dst, count) \
insw((unsigned long) (p), (dst), (count))
#define ioread32_rep(p, dst, count) \
insl((unsigned long) (p), (dst), (count))
#define iowrite8_rep(p, src, count) \
outsb((unsigned long) (p), (src), (count))
#define iowrite16_rep(p, src, count) \
outsw((unsigned long) (p), (src), (count))
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
#ifndef IO_SPACE_LIMIT
#define IO_SPACE_LIMIT 0xffff
#endif
#ifdef __KERNEL__
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *) (x))
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
#ifndef pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
#endif
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
}
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to physical address.
*
* This implementation is for the no-MMU case only... if you have an MMU
* you'll need to provide your own definitions.
*/
#ifndef CONFIG_MMU
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return (void __iomem*) (unsigned long)offset;
}
#define __ioremap(offset, size, flags) ioremap(offset, size)
#ifndef ioremap_nocache
#define ioremap_nocache ioremap
#endif
#ifndef ioremap_wc
#define ioremap_wc ioremap_nocache
#endif
static inline void iounmap(void __iomem *addr)
{
}
#endif /* CONFIG_MMU */
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return PCI_IOBASE + (port & IO_SPACE_LIMIT);
}
static inline void ioport_unmap(void __iomem *p)
{
}
#else /* CONFIG_GENERIC_IOMAP */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr(p) p
#endif
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr(p) __va(p)
#endif
#ifdef CONFIG_VIRT_TO_BUS
#ifndef virt_to_bus
static inline unsigned long virt_to_bus(volatile void *address)
{
return ((unsigned long) address);
}
static inline void *bus_to_virt(unsigned long address)
{
return (void *) address;
}
#endif
#endif
#ifndef memset_io
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
#endif
#ifndef memcpy_fromio
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
#endif
#ifndef memcpy_toio
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_IO_H */

View file

@ -0,0 +1,17 @@
#ifndef _ASM_GENERIC_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
#include <uapi/asm-generic/ioctl.h>
#ifdef __CHECKER__
#define _IOC_TYPECHECK(t) (sizeof(t))
#else
/* provoke compile error for invalid uses of size argument */
extern unsigned int __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
#endif
#endif /* _ASM_GENERIC_IOCTL_H */

View file

@ -0,0 +1,81 @@
#ifndef __GENERIC_IO_H
#define __GENERIC_IO_H
#include <linux/linkage.h>
#include <asm/byteorder.h>
/*
* These are the "generic" interfaces for doing new-style
* memory-mapped or PIO accesses. Architectures may do
* their own arch-optimized versions, these just act as
* wrappers around the old-style IO register access functions:
* read[bwl]/write[bwl]/in[bwl]/out[bwl]
*
* Don't include this directly, include it from <asm/io.h>.
*/
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines just encode the PIO/MMIO as part of the
* cookie, and coldly assume that the MMIO IO mappings are not
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
extern unsigned int ioread8(void __iomem *);
extern unsigned int ioread16(void __iomem *);
extern unsigned int ioread16be(void __iomem *);
extern unsigned int ioread32(void __iomem *);
extern unsigned int ioread32be(void __iomem *);
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
extern void iowrite32(u32, void __iomem *);
extern void iowrite32be(u32, void __iomem *);
/*
* "string" versions of the above. Note that they
* use native byte ordering for the accesses (on
* the assumption that IO and memory agree on a
* byte order, and CPU byteorder is irrelevant).
*
* They do _not_ update the port address. If you
* want MMIO that copies stuff laid out in MMIO
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#endif
#ifndef ARCH_HAS_IOREMAP_WC
#define ioremap_wc ioremap_nocache
#endif
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#elif defined(CONFIG_GENERIC_IOMAP)
struct pci_dev;
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{ }
#endif
#include <asm-generic/pci_iomap.h>
#endif

18
include/asm-generic/irq.h Normal file
View file

@ -0,0 +1,18 @@
#ifndef __ASM_GENERIC_IRQ_H
#define __ASM_GENERIC_IRQ_H
/*
* NR_IRQS is the upper bound of how many interrupts can be handled
* in the platform. It is used to size the static irq_map array,
* so don't make it too big.
*/
#ifndef NR_IRQS
#define NR_IRQS 64
#endif
static inline int irq_canonicalize(int irq)
{
return irq;
}
#endif /* __ASM_GENERIC_IRQ_H */

View file

@ -0,0 +1,37 @@
/* Fallback per-CPU frame pointer holder
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_IRQ_REGS_H
#define _ASM_GENERIC_IRQ_REGS_H
#include <linux/percpu.h>
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack
*/
DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return __this_cpu_read(__irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = __this_cpu_read(__irq_regs);
__this_cpu_write(__irq_regs, new_regs);
return old_regs;
}
#endif /* _ASM_GENERIC_IRQ_REGS_H */

View file

@ -0,0 +1,10 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
static inline bool arch_irq_work_has_interrupt(void)
{
return false;
}
#endif /* __ASM_IRQ_WORK_H */

View file

@ -0,0 +1,66 @@
#ifndef __ASM_GENERIC_IRQFLAGS_H
#define __ASM_GENERIC_IRQFLAGS_H
/*
* All architectures should implement at least the first two functions,
* usually inline assembly will be the best way.
*/
#ifndef ARCH_IRQ_DISABLED
#define ARCH_IRQ_DISABLED 0
#define ARCH_IRQ_ENABLED 1
#endif
/* read interrupt enabled status */
#ifndef arch_local_save_flags
unsigned long arch_local_save_flags(void);
#endif
/* set interrupt enabled status */
#ifndef arch_local_irq_restore
void arch_local_irq_restore(unsigned long flags);
#endif
/* get status and disable interrupts */
#ifndef arch_local_irq_save
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
flags = arch_local_save_flags();
arch_local_irq_restore(ARCH_IRQ_DISABLED);
return flags;
}
#endif
/* test flags */
#ifndef arch_irqs_disabled_flags
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags == ARCH_IRQ_DISABLED;
}
#endif
/* unconditionally enable interrupts */
#ifndef arch_local_irq_enable
static inline void arch_local_irq_enable(void)
{
arch_local_irq_restore(ARCH_IRQ_ENABLED);
}
#endif
/* unconditionally disable interrupts */
#ifndef arch_local_irq_disable
static inline void arch_local_irq_disable(void)
{
arch_local_irq_restore(ARCH_IRQ_DISABLED);
}
#endif
/* test hardware interrupt enable bit */
#ifndef arch_irqs_disabled
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif
#endif /* __ASM_GENERIC_IRQFLAGS_H */

View file

@ -0,0 +1,9 @@
#ifndef _ASM_GENERIC_KDEBUG_H
#define _ASM_GENERIC_KDEBUG_H
enum die_val {
DIE_UNUSED,
DIE_OOPS = 1,
};
#endif /* _ASM_GENERIC_KDEBUG_H */

View file

@ -0,0 +1,10 @@
#ifndef _ASM_GENERIC_KMAP_TYPES_H
#define _ASM_GENERIC_KMAP_TYPES_H
#ifdef __WITH_KM_FENCE
# define KM_TYPE_NR 41
#else
# define KM_TYPE_NR 20
#endif
#endif

View file

@ -0,0 +1,26 @@
#ifndef _ASM_GENERIC_KVM_PARA_H
#define _ASM_GENERIC_KVM_PARA_H
#include <uapi/asm-generic/kvm_para.h>
/*
* This function is used by architectures that support kvm to avoid issuing
* false soft lockup messages.
*/
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
static inline bool kvm_para_available(void)
{
return false;
}
#endif

View file

@ -0,0 +1,7 @@
#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
#define __ASM_GENERIC_LIBATA_PORTMAP_H
#define ATA_PRIMARY_IRQ(dev) 14
#define ATA_SECONDARY_IRQ(dev) 15
#endif

View file

@ -0,0 +1,8 @@
#ifndef __ASM_GENERIC_LINKAGE_H
#define __ASM_GENERIC_LINKAGE_H
/*
* linux/linkage.h provides reasonable defaults.
* an architecture can override them by providing its own version.
*/
#endif /* __ASM_GENERIC_LINKAGE_H */

View file

@ -0,0 +1,55 @@
#ifndef _ASM_GENERIC_LOCAL_H
#define _ASM_GENERIC_LOCAL_H
#include <linux/percpu.h>
#include <linux/atomic.h>
#include <asm/types.h>
/*
* A signed long type for operations which are atomic for a single CPU.
* Usually used in combination with per-cpu variables.
*
* This is the default implementation, which uses atomic_long_t. Which is
* rather pointless. The whole point behind local_t is that some processors
* can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
* running on this CPU. local_t allows exploitation of such capabilities.
*/
/* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
typedef struct
{
atomic_long_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l,i) atomic_long_set((&(l)->a),(i))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
#define local_add(i,l) atomic_long_add((i),(&(l)->a))
#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local_inc(l) local_set((l), local_read(l) + 1)
#define __local_dec(l) local_set((l), local_read(l) - 1)
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
#endif /* _ASM_GENERIC_LOCAL_H */

View file

@ -0,0 +1,96 @@
#ifndef _ASM_GENERIC_LOCAL64_H
#define _ASM_GENERIC_LOCAL64_H
#include <linux/percpu.h>
#include <asm/types.h>
/*
* A signed long type for operations which are atomic for a single CPU.
* Usually used in combination with per-cpu variables.
*
* This is the default implementation, which uses atomic64_t. Which is
* rather pointless. The whole point behind local64_t is that some processors
* can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
* running on this CPU. local64_t allows exploitation of such capabilities.
*/
/* Implement in terms of atomics. */
#if BITS_PER_LONG == 64
#include <asm/local.h>
typedef struct {
local_t a;
} local64_t;
#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
#define local64_read(l) local_read(&(l)->a)
#define local64_set(l,i) local_set((&(l)->a),(i))
#define local64_inc(l) local_inc(&(l)->a)
#define local64_dec(l) local_dec(&(l)->a)
#define local64_add(i,l) local_add((i),(&(l)->a))
#define local64_sub(i,l) local_sub((i),(&(l)->a))
#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
#define local64_inc_return(l) local_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
#else /* BITS_PER_LONG != 64 */
#include <linux/atomic.h>
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
typedef struct {
atomic64_t a;
} local64_t;
#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local64_read(l) atomic64_read(&(l)->a)
#define local64_set(l,i) atomic64_set((&(l)->a),(i))
#define local64_inc(l) atomic64_inc(&(l)->a)
#define local64_dec(l) atomic64_dec(&(l)->a)
#define local64_add(i,l) atomic64_add((i),(&(l)->a))
#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
#endif /* BITS_PER_LONG != 64 */
#endif /* _ASM_GENERIC_LOCAL64_H */

View file

@ -0,0 +1,13 @@
#ifndef __ASM_MCS_SPINLOCK_H
#define __ASM_MCS_SPINLOCK_H
/*
* Architectures can define their own:
*
* arch_mcs_spin_lock_contended(l)
* arch_mcs_spin_unlock_contended(l)
*
* See kernel/locking/mcs_spinlock.c.
*/
#endif /* __ASM_MCS_SPINLOCK_H */

View file

@ -0,0 +1,77 @@
#ifndef __ASM_MEMORY_MODEL_H
#define __ASM_MEMORY_MODEL_H
#ifndef __ASSEMBLY__
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
#elif defined(CONFIG_DISCONTIGMEM)
#ifndef arch_pfn_to_nid
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
#endif
#ifndef arch_local_page_offset
#define arch_local_page_offset(pfn, nid) \
((pfn) - NODE_DATA(nid)->node_start_pfn)
#endif
#endif /* CONFIG_DISCONTIGMEM */
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
__pgdat->node_start_pfn; \
})
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
/* memmap is virtually contiguous. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
#elif defined(CONFIG_SPARSEMEM)
/*
* Note: section's mem_map is encoded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
#endif /* __ASSEMBLY__ */
#endif

View file

@ -0,0 +1,18 @@
/*
* Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
* be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
* need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* _ASM_GENERIC_MM_HOOKS_H */

19
include/asm-generic/mmu.h Normal file
View file

@ -0,0 +1,19 @@
#ifndef __ASM_GENERIC_MMU_H
#define __ASM_GENERIC_MMU_H
/*
* This is the mmu.h header for nommu implementations.
* Architectures with an MMU need something more complex.
*/
#ifndef __ASSEMBLY__
typedef struct {
unsigned long end_brk;
#ifdef CONFIG_BINFMT_ELF_FDPIC
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
} mm_context_t;
#endif
#endif /* __ASM_GENERIC_MMU_H */

View file

@ -0,0 +1,45 @@
#ifndef __ASM_GENERIC_MMU_CONTEXT_H
#define __ASM_GENERIC_MMU_CONTEXT_H
/*
* Generic hooks for NOMMU architectures, which do not need to do
* anything special here.
*/
#include <asm-generic/mm_hooks.h>
struct task_struct;
struct mm_struct;
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
return 0;
}
static inline void destroy_context(struct mm_struct *mm)
{
}
static inline void deactivate_mm(struct task_struct *task,
struct mm_struct *mm)
{
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
}
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
}
#endif /* __ASM_GENERIC_MMU_CONTEXT_H */

View file

@ -0,0 +1,48 @@
#ifndef __ASM_GENERIC_MODULE_H
#define __ASM_GENERIC_MODULE_H
/*
* Many architectures just need a simple module
* loader without arch specific data.
*/
#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC
struct mod_arch_specific
{
};
#endif
#ifdef CONFIG_64BIT
#define Elf_Shdr Elf64_Shdr
#define Elf_Phdr Elf64_Phdr
#define Elf_Sym Elf64_Sym
#define Elf_Dyn Elf64_Dyn
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Addr Elf64_Addr
#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf64_Rel
#endif
#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf64_Rela
#endif
#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
#define ELF_R_SYM(X) ELF64_R_SYM(X)
#else /* CONFIG_64BIT */
#define Elf_Shdr Elf32_Shdr
#define Elf_Phdr Elf32_Phdr
#define Elf_Sym Elf32_Sym
#define Elf_Dyn Elf32_Dyn
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Addr Elf32_Addr
#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf32_Rel
#endif
#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf32_Rela
#endif
#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
#define ELF_R_SYM(X) ELF32_R_SYM(X)
#endif
#endif /* __ASM_GENERIC_MODULE_H */

View file

@ -0,0 +1,88 @@
/*
* include/asm-generic/mutex-dec.h
*
* Generic implementation of the mutex fastpath, based on atomic
* decrement/increment.
*/
#ifndef _ASM_GENERIC_MUTEX_DEC_H
#define _ASM_GENERIC_MUTEX_DEC_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
*
* Change the count from 1 to a value lower than 1. This function returns 0
* if the fastpath succeeds, or -1 otherwise.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_dec_return(count) < 0))
return -1;
return 0;
}
/**
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than 1.
*
* If the implementation sets it to a value of lower than 1, then the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: fallback function
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
return 0;
}
#endif

View file

@ -0,0 +1,19 @@
/*
* include/asm-generic/mutex-null.h
*
* Generic implementation of the mutex fastpath, based on NOP :-)
*
* This is used by the mutex-debugging infrastructure, but it can also
* be used by architectures that (for whatever reason) want to use the
* spinlock based slowpath.
*/
#ifndef _ASM_GENERIC_MUTEX_NULL_H
#define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_lock_retval(count) (-1)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1
#endif

View file

@ -0,0 +1,116 @@
/*
* include/asm-generic/mutex-xchg.h
*
* Generic implementation of the mutex fastpath, based on xchg().
*
* NOTE: An xchg based implementation might be less optimal than an atomic
* decrement/increment based implementation. If your architecture
* has a reasonable atomic dec/inc then you should probably use
* asm-generic/mutex-dec.h instead, or you could open-code an
* optimized version in asm/mutex.h.
*/
#ifndef _ASM_GENERIC_MUTEX_XCHG_H
#define _ASM_GENERIC_MUTEX_XCHG_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
/*
* We failed to acquire the lock, so mark it contended
* to ensure that any waiting tasks are woken up by the
* unlock slow path.
*/
if (likely(atomic_xchg(count, -1) != 1))
fail_fn(count);
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
*
* Change the count from 1 to a value lower than 1. This function returns 0
* if the fastpath succeeds, or -1 otherwise.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_xchg(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1))
return -1;
return 0;
}
/**
* __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than one.
* If the implementation sets it to a value of lower than one, the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 0
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: spinlock based trylock implementation
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int prev = atomic_xchg(count, 0);
if (unlikely(prev < 0)) {
/*
* The lock was marked contended so we must restore that
* state. If while doing so we get back a prev value of 1
* then we just own it.
*
* [ In the rare case of the mutex going to 1, to 0, to -1
* and then back to 0 in this few-instructions window,
* this has the potential to trigger the slowpath for the
* owner's unlock path needlessly, but that's not a problem
* in practice. ]
*/
prev = atomic_xchg(count, prev);
if (prev < 0)
prev = 0;
}
return prev;
}
#endif

View file

@ -0,0 +1,9 @@
#ifndef __ASM_GENERIC_MUTEX_H
#define __ASM_GENERIC_MUTEX_H
/*
* Pull in the generic implementation for the mutex fastpath,
* which is a reasonable default on many architectures.
*/
#include <asm-generic/mutex-dec.h>
#endif /* __ASM_GENERIC_MUTEX_H */

103
include/asm-generic/page.h Normal file
View file

@ -0,0 +1,103 @@
#ifndef __ASM_GENERIC_PAGE_H
#define __ASM_GENERIC_PAGE_H
/*
* Generic page.h implementation, for NOMMU architectures.
* This provides the dummy definitions for the memory management.
*/
#ifdef CONFIG_MMU
#error need to prove a real asm/page.h
#endif
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#ifdef __ASSEMBLY__
#define PAGE_SIZE (1 << PAGE_SHIFT)
#else
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <asm/setup.h>
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* These are used to make use of C type-checking..
*/
typedef struct {
unsigned long pte;
} pte_t;
typedef struct {
unsigned long pmd[16];
} pmd_t;
typedef struct {
unsigned long pgd;
} pgd_t;
typedef struct {
unsigned long pgprot;
} pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((&x)->pmd[0])
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
extern unsigned long memory_start;
extern unsigned long memory_end;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
#else
#define PAGE_OFFSET (0)
#endif
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif
#ifndef __ASSEMBLY__
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#ifndef page_to_phys
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
#endif /* __ASSEMBLY__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* __ASM_GENERIC_PAGE_H */

View file

@ -0,0 +1,10 @@
#ifndef __ASM_GENERIC_PARAM_H
#define __ASM_GENERIC_PARAM_H
#include <uapi/asm-generic/param.h>
# undef HZ
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
# define USER_HZ 100 /* some user interfaces are */
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif /* __ASM_GENERIC_PARAM_H */

View file

@ -0,0 +1,23 @@
#ifndef __ASM_GENERIC_PARPORT_H
#define __ASM_GENERIC_PARPORT_H
/*
* An ISA bus may have i8255 parallel ports at well-known
* locations in the I/O space, which are scanned by
* parport_pc_find_isa_ports.
*
* Without ISA support, the driver will only attach
* to devices on the PCI bus.
*/
static int parport_pc_find_isa_ports(int autoirq, int autodma);
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
#ifdef CONFIG_ISA
return parport_pc_find_isa_ports(autoirq, autodma);
#else
return 0;
#endif
}
#endif /* __ASM_GENERIC_PARPORT_H */

View file

@ -0,0 +1,74 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_GENERIC_PCI_BRIDGE_H
#define _ASM_GENERIC_PCI_BRIDGE_H
#ifdef __KERNEL__
enum {
/* Force re-assigning all resources (ignore firmware
* setup completely)
*/
PCI_REASSIGN_ALL_RSRC = 0x00000001,
/* Re-assign all bus numbers */
PCI_REASSIGN_ALL_BUS = 0x00000002,
/* Do not try to assign, just use existing setup */
PCI_PROBE_ONLY = 0x00000004,
/* Don't bother with ISA alignment unless the bridge has
* ISA forwarding enabled
*/
PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
/* Enable domain numbers in /proc */
PCI_ENABLE_PROC_DOMAINS = 0x00000010,
/* ... except for domain 0 */
PCI_COMPAT_DOMAIN_0 = 0x00000020,
/* PCIe downstream ports are bridges that normally lead to only a
* device 0, but if this is set, we scan all possible devices, not
* just device 0.
*/
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040,
};
#ifdef CONFIG_PCI
extern unsigned int pci_flags;
static inline void pci_set_flags(int flags)
{
pci_flags = flags;
}
static inline void pci_add_flags(int flags)
{
pci_flags |= flags;
}
static inline void pci_clear_flags(int flags)
{
pci_flags &= ~flags;
}
static inline int pci_has_flag(int flag)
{
return pci_flags & flag;
}
#else
static inline void pci_set_flags(int flags) { }
static inline void pci_add_flags(int flags) { }
static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag)
{
return 0;
}
#endif /* CONFIG_PCI */
#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_PCI_BRIDGE_H */

Some files were not shown because too many files have changed in this diff Show more