Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,9 @@
generic-y += clkdev.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += trace_clock.h

View file

@ -0,0 +1,103 @@
/*
* Copyright IBM Corp. 2002, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_AIRQ_H
#define _ASM_S390_AIRQ_H
#include <linux/bit_spinlock.h>
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
u8 flags;
};
#define AIRQ_PTR_ALLOCATED 0x01
int register_adapter_interrupt(struct airq_struct *airq);
void unregister_adapter_interrupt(struct airq_struct *airq);
/* Adapter interrupt bit vector */
struct airq_iv {
unsigned long *vector; /* Adapter interrupt bit vector */
unsigned long *avail; /* Allocation bit mask for the bit vector */
unsigned long *bitlock; /* Lock bit mask for the bit vector */
unsigned long *ptr; /* Pointer associated with each bit */
unsigned int *data; /* 32 bit value associated with each bit */
unsigned long bits; /* Number of bits in the vector */
unsigned long end; /* Number of highest allocated bit + 1 */
spinlock_t lock; /* Lock to protect alloc & free */
};
#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
#define AIRQ_IV_DATA 8 /* Allocate the data array */
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end);
static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
{
return airq_iv_alloc(iv, 1);
}
static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
{
airq_iv_free(iv, bit, 1);
}
static inline unsigned long airq_iv_end(struct airq_iv *iv)
{
return iv->end;
}
static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
bit_spin_lock(bit ^ be_to_le, iv->bitlock);
}
static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
{
const unsigned long be_to_le = BITS_PER_LONG - 1;
bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
}
static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
unsigned int data)
{
iv->data[bit] = data;
}
static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
unsigned long bit)
{
return iv->data[bit];
}
static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
unsigned long ptr)
{
iv->ptr[bit] = ptr;
}
static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
unsigned long bit)
{
return iv->ptr[bit];
}
#endif /* _ASM_S390_AIRQ_H */

View file

@ -0,0 +1,88 @@
/*
* Copyright IBM Corp. 2006
*
* Author(s): Melissa Howland <melissah@us.ibm.com>
*/
#ifndef _ASM_S390_APPLDATA_H
#define _ASM_S390_APPLDATA_H
#include <asm/io.h>
#ifndef CONFIG_64BIT
#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
#define APPLDATA_GEN_EVENT_REC 0x02
#define APPLDATA_START_CONFIG_REC 0x03
/*
* Parameter list for DIAGNOSE X'DC'
*/
struct appldata_parameter_list {
u16 diag; /* The DIAGNOSE code X'00DC' */
u8 function; /* The function code for the DIAGNOSE */
u8 parlist_length; /* Length of the parameter list */
u32 product_id_addr; /* Address of the 16-byte product ID */
u16 reserved;
u16 buffer_length; /* Length of the application data buffer */
u32 buffer_addr; /* Address of the application data buffer */
} __attribute__ ((packed));
#else /* CONFIG_64BIT */
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_REC 0x82
#define APPLDATA_START_CONFIG_REC 0x83
/*
* Parameter list for DIAGNOSE X'DC'
*/
struct appldata_parameter_list {
u16 diag;
u8 function;
u8 parlist_length;
u32 unused01;
u16 reserved;
u16 buffer_length;
u32 unused02;
u64 product_id_addr;
u64 buffer_addr;
} __attribute__ ((packed));
#endif /* CONFIG_64BIT */
struct appldata_product_id {
char prod_nr[7]; /* product number */
u16 prod_fn; /* product function */
u8 record_nr; /* record number */
u16 version_nr; /* version */
u16 release_nr; /* release */
u16 mod_lvl; /* modification level */
} __attribute__ ((packed));
static inline int appldata_asm(struct appldata_product_id *id,
unsigned short fn, void *buffer,
unsigned short length)
{
struct appldata_parameter_list parm_list;
int ry;
if (!MACHINE_IS_VM)
return -EOPNOTSUPP;
parm_list.diag = 0xdc;
parm_list.function = fn;
parm_list.parlist_length = sizeof(parm_list);
parm_list.buffer_length = length;
parm_list.product_id_addr = (unsigned long) id;
parm_list.buffer_addr = virt_to_phys(buffer);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
: "d" (&parm_list), "m" (parm_list), "m" (*id)
: "cc");
return ry;
}
#endif /* _ASM_S390_APPLDATA_H */

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,415 @@
/*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
* Arnd Bergmann <arndb@de.ibm.com>,
*
* Atomic operations that C can't guarantee us.
* Useful for resource counting etc.
* s390 uses 'Compare And Swap' for atomicity in SMP environment.
*
*/
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
#define __ATOMIC_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
#define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
__barrier \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
#define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val, new_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int atomic_read(const atomic_t *v)
{
int c;
asm volatile(
" l %0,%1\n"
: "=d" (c) : "Q" (v->counter));
return c;
}
static inline void atomic_set(atomic_t *v, int i)
{
asm volatile(
" st %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
}
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"asi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
return;
}
#endif
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
#define atomic_inc(_v) atomic_add(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
: "+d" (old), "+Q" (v->counter)
: "d" (new)
: "cc", "memory");
return old;
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == u))
break;
old = atomic_cmpxchg(v, c, c + a);
if (likely(old == c))
break;
c = old;
}
return c;
}
#undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
#define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
__barrier \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
#define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val, new_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline long long atomic64_read(const atomic64_t *v)
{
long long c;
asm volatile(
" lg %0,%1\n"
: "=d" (c) : "Q" (v->counter));
return c;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
}
static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"agsi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
return;
}
#endif
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
asm volatile(
" csg %0,%2,%1"
: "+d" (old), "+Q" (v->counter)
: "d" (new)
: "cc", "memory");
return old;
}
#undef __ATOMIC64_LOOP
#else /* CONFIG_64BIT */
typedef struct {
long long counter;
} atomic64_t;
static inline long long atomic64_read(const atomic64_t *v)
{
register_pair rp;
asm volatile(
" lm %0,%N0,%1"
: "=&d" (rp) : "Q" (v->counter) );
return rp.pair;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
register_pair rp = {.pair = i};
asm volatile(
" stm %1,%N1,%0"
: "=Q" (v->counter) : "d" (rp) );
}
static inline long long atomic64_xchg(atomic64_t *v, long long new)
{
register_pair rp_new = {.pair = new};
register_pair rp_old;
asm volatile(
" lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n"
" jl 0b\n"
: "=&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new)
: "cc");
return rp_old.pair;
}
static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
register_pair rp_old = {.pair = old};
register_pair rp_new = {.pair = new};
asm volatile(
" cds %0,%2,%1"
: "+&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new)
: "cc");
return rp_old.pair;
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old + i;
} while (atomic64_cmpxchg(v, old, new) != old);
return new;
}
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old | mask;
} while (atomic64_cmpxchg(v, old, new) != old);
}
static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old & mask;
} while (atomic64_cmpxchg(v, old, new) != old);
}
static inline void atomic64_add(long long i, atomic64_t *v)
{
atomic64_add_return(i, v);
}
#endif /* CONFIG_64BIT */
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == u))
break;
old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c))
break;
c = old;
}
return c != u;
}
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long c, old, dec;
c = atomic64_read(v);
for (;;) {
dec = c - 1;
if (unlikely(dec < 0))
break;
old = atomic64_cmpxchg((v), c, dec);
if (likely(old == c))
break;
c = old;
}
return dec;
}
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* __ARCH_S390_ATOMIC__ */

View file

@ -0,0 +1,53 @@
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BARRIER "bcr 14,0\n"
#else
#define __ASM_BARRIER "bcr 15,0\n"
#endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif /* __ASM_BARRIER_H */

View file

@ -0,0 +1,482 @@
/*
* Copyright IBM Corp. 1999,2013
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
* The description below was taken in large parts from the powerpc
* bitops header file:
* Within a word, bits are numbered LSB first. Lot's of places make
* this assumption by directly testing bits with (val & (1<<nr)).
* This can cause confusion for large (> 1 word) bitmaps on a
* big-endian system because, unlike little endian, the number of each
* bit depends on the word size.
*
* The bitop functions are defined to work on unsigned longs, so for an
* s390x system the bits end up numbered:
* |63..............0|127............64|191...........128|255...........192|
* and on s390:
* |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
*
* There are a few little-endian macros used mostly for filesystem
* bitmaps, these work on similar bit arrays layouts, but
* byte-oriented:
* |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
*
* The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
* number field needs to be reversed compared to the big-endian bit
* fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
*
* We also have special functions which work with an MSB0 encoding:
* on an s390x system the bits are numbered:
* |0..............63|64............127|128...........191|192...........255|
* and on s390:
* |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
*
* The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
* number field needs to be reversed compared to the LSB0 encoded bit
* fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
*
*/
#ifndef _S390_BITOPS_H
#define _S390_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/typecheck.h>
#include <linux/compiler.h>
#include <asm/barrier.h>
#define __BITOPS_NO_BARRIER "\n"
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
#define __BITOPS_BARRIER "\n"
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old, __new; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \
: "cc", "memory"); \
__old; \
})
#else /* CONFIG_64BIT */
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __BITOPS_OR "laog"
#define __BITOPS_AND "lang"
#define __BITOPS_XOR "laxg"
#define __BITOPS_BARRIER "bcr 14,0\n"
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
__barrier \
__op_string " %0,%2,%1\n" \
__barrier \
: "=d" (__old), "+Q" (*(__addr)) \
: "d" (__val) \
: "cc", "memory"); \
__old; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
#define __BITOPS_BARRIER "\n"
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old, __new; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \
: "cc", "memory"); \
__old; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
__bitops_word(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
return (unsigned long *)addr;
}
static inline unsigned char *
__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
{
return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}
static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"oi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
: "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"ni %0,%b1\n"
: "+Q" (*caddr)
: "i" (~(1 << (nr & 7)))
: "cc", "memory");
return;
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
if (__builtin_constant_p(nr)) {
unsigned char *caddr = __bitops_byte(nr, ptr);
asm volatile(
"xi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
: "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
}
static inline int
test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
return (old & mask) != 0;
}
static inline int
test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
return (old & ~mask) != 0;
}
static inline int
test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
return (old & mask) != 0;
}
static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr |= 1 << (nr & 7);
}
static inline void
__clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr &= ~(1 << (nr & 7));
}
static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr ^= 1 << (nr & 7);
}
static inline int
__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
ch = *addr;
*addr |= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
static inline int
__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
ch = *addr;
*addr &= ~(1 << (nr & 7));
return (ch >> (nr & 7)) & 1;
}
static inline int
__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
ch = *addr;
*addr ^= 1 << (nr & 7);
return (ch >> (nr & 7)) & 1;
}
static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
{
const volatile unsigned char *addr;
addr = ((const volatile unsigned char *)ptr);
addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
return (*addr >> (nr & 7)) & 1;
}
/*
* Functions which use MSB0 bit numbering.
* On an s390x system the bits are numbered:
* |0..............63|64............127|128...........191|192...........255|
* and on s390:
* |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
*/
unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
unsigned long offset);
static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
static inline int test_bit_inv(unsigned long nr,
const volatile unsigned long *ptr)
{
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
/**
* __flogr - find leftmost one
* @word - The word to search
*
* Returns the bit number of the most significant bit set,
* where the most significant bit has bit number 0.
* If no bit is set this function returns 64.
*/
static inline unsigned char __flogr(unsigned long word)
{
if (__builtin_constant_p(word)) {
unsigned long bit = 0;
if (!word)
return 64;
if (!(word & 0xffffffff00000000UL)) {
word <<= 32;
bit += 32;
}
if (!(word & 0xffff000000000000UL)) {
word <<= 16;
bit += 16;
}
if (!(word & 0xff00000000000000UL)) {
word <<= 8;
bit += 8;
}
if (!(word & 0xf000000000000000UL)) {
word <<= 4;
bit += 4;
}
if (!(word & 0xc000000000000000UL)) {
word <<= 2;
bit += 2;
}
if (!(word & 0x8000000000000000UL)) {
word <<= 1;
bit += 1;
}
return bit;
} else {
register unsigned long bit asm("4") = word;
register unsigned long out asm("5");
asm volatile(
" flogr %[bit],%[bit]\n"
: [bit] "+d" (bit), [out] "=d" (out) : : "cc");
return bit;
}
}
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
}
/**
* ffs - find first bit set
* @word: the word to search
*
* This is defined the same way as the libc and
* compiler builtin ffs routines (man ffs).
*/
static inline int ffs(int word)
{
unsigned long mask = 2 * BITS_PER_LONG - 1;
unsigned int val = (unsigned int)word;
return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
}
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
return __flogr(word) ^ (BITS_PER_LONG - 1);
}
/**
* fls64 - find last set bit in a 64-bit word
* @word: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
static inline int fls64(unsigned long word)
{
unsigned long mask = 2 * BITS_PER_LONG - 1;
return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
}
/**
* fls - find last (most-significant) bit set
* @word: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline int fls(int word)
{
return fls64((unsigned int)word);
}
#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* _S390_BITOPS_H */

View file

@ -0,0 +1,71 @@
#ifndef _ASM_S390_BUG_H
#define _ASM_S390_BUG_H
#include <linux/kernel.h>
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define __EMIT_BUG(x) do { \
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
".section __bug_table,\"a\"\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
".previous\n" \
: : "i" (__LINE__), \
"i" (x), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#else /* CONFIG_DEBUG_BUGVERBOSE */
#define __EMIT_BUG(x) do { \
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section __bug_table,\"a\"\n" \
"2: .long 1b-2b\n" \
" .short %0\n" \
" .org 2b+%1\n" \
".previous\n" \
: : "i" (x), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define BUG() do { \
__EMIT_BUG(0); \
unreachable(); \
} while (0)
#define __WARN_TAINT(taint) do { \
__EMIT_BUG(BUGFLAG_TAINT(taint)); \
} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
__WARN(); \
} \
unlikely(__ret_warn_on); \
})
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
#endif /* _ASM_S390_BUG_H */

View file

@ -0,0 +1,20 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/bugs.h"
* Copyright (C) 1994 Linus Torvalds
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
static inline void check_bugs(void)
{
/* s390 has no bugs ... */
}

View file

@ -0,0 +1,18 @@
/*
* S390 version
* Copyright IBM Corp. 1999
*
* Derived from "include/asm-i386/cache.h"
* Copyright (C) 1992, Linus Torvalds
*/
#ifndef __ARCH_S390_CACHE_H
#define __ARCH_S390_CACHE_H
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
#define NET_SKB_PAD 32
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif

View file

@ -0,0 +1,16 @@
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
/* Caches aren't brain-dead on the s390. */
#include <asm-generic/cacheflush.h>
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
#endif /* _S390_CACHEFLUSH_H */

View file

@ -0,0 +1,233 @@
/*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Arnd Bergmann <arndb@de.ibm.com>
*
* Interface for CCW device drivers
*/
#ifndef _S390_CCWDEV_H_
#define _S390_CCWDEV_H_
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <asm/fcx.h>
#include <asm/irq.h>
#include <asm/schid.h>
/* structs from asm/cio.h */
struct irb;
struct ccw1;
struct ccw_dev_id;
/* simplified initializers for struct ccw_device:
* CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
* entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
#define CCW_DEVICE(cu, cum) \
.cu_type=(cu), .cu_model=(cum), \
.match_flags=(CCW_DEVICE_ID_MATCH_CU_TYPE \
| (cum ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0))
#define CCW_DEVICE_DEVTYPE(cu, cum, dev, devm) \
.cu_type=(cu), .cu_model=(cum), .dev_type=(dev), .dev_model=(devm),\
.match_flags=CCW_DEVICE_ID_MATCH_CU_TYPE \
| ((cum) ? CCW_DEVICE_ID_MATCH_CU_MODEL : 0) \
| CCW_DEVICE_ID_MATCH_DEVICE_TYPE \
| ((devm) ? CCW_DEVICE_ID_MATCH_DEVICE_MODEL : 0)
/* scan through an array of device ids and return the first
* entry that matches the device.
*
* the array must end with an entry containing zero match_flags
*/
static inline const struct ccw_device_id *
ccw_device_id_match(const struct ccw_device_id *array,
const struct ccw_device_id *match)
{
const struct ccw_device_id *id = array;
for (id = array; id->match_flags; id++) {
if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_TYPE)
&& (id->cu_type != match->cu_type))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_CU_MODEL)
&& (id->cu_model != match->cu_model))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_TYPE)
&& (id->dev_type != match->dev_type))
continue;
if ((id->match_flags & CCW_DEVICE_ID_MATCH_DEVICE_MODEL)
&& (id->dev_model != match->dev_model))
continue;
return id;
}
return NULL;
}
/**
* struct ccw_device - channel attached device
* @ccwlock: pointer to device lock
* @id: id of this device
* @drv: ccw driver for this device
* @dev: embedded device structure
* @online: online status of device
* @handler: interrupt handler
*
* @handler is a member of the device rather than the driver since a driver
* can have different interrupt handlers for different ccw devices
* (multi-subchannel drivers).
*/
struct ccw_device {
spinlock_t *ccwlock;
/* private: */
struct ccw_device_private *private; /* cio private information */
/* public: */
struct ccw_device_id id;
struct ccw_driver *drv;
struct device dev;
int online;
void (*handler) (struct ccw_device *, unsigned long, struct irb *);
};
/*
* Possible events used by the path_event notifier.
*/
#define PE_NONE 0x0
#define PE_PATH_GONE 0x1 /* A path is no longer available. */
#define PE_PATH_AVAILABLE 0x2 /* A path has become available and
was successfully verified. */
#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had
to be established again. */
/*
* Possible CIO actions triggered by the unit check handler.
*/
enum uc_todo {
UC_TODO_RETRY,
UC_TODO_RETRY_ON_NEW_PATH,
UC_TODO_STOP
};
/**
* struct ccw driver - device driver for channel attached devices
* @ids: ids supported by this driver
* @probe: function called on probe
* @remove: function called on remove
* @set_online: called when setting device online
* @set_offline: called when setting device offline
* @notify: notify driver of device state changes
* @path_event: notify driver of channel path events
* @shutdown: called at device shutdown
* @prepare: prepare for pm state transition
* @complete: undo work done in @prepare
* @freeze: callback for freezing during hibernation snapshotting
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @uc_handler: callback for unit check handler
* @driver: embedded device driver structure
* @int_class: interruption class to use for accounting interrupts
*/
struct ccw_driver {
struct ccw_device_id *ids;
int (*probe) (struct ccw_device *);
void (*remove) (struct ccw_device *);
int (*set_online) (struct ccw_device *);
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
void (*path_event) (struct ccw_device *, int *);
void (*shutdown) (struct ccw_device *);
int (*prepare) (struct ccw_device *);
void (*complete) (struct ccw_device *);
int (*freeze)(struct ccw_device *);
int (*thaw) (struct ccw_device *);
int (*restore)(struct ccw_device *);
enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
struct device_driver driver;
enum interruption_class int_class;
};
extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
const char *bus_id);
/* devices drivers call these during module load and unload.
* When a driver is registered, its probe method is called
* when new devices for its type pop up */
extern int ccw_driver_register (struct ccw_driver *driver);
extern void ccw_driver_unregister (struct ccw_driver *driver);
struct ccw1;
extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
extern int ccw_device_set_options(struct ccw_device *, unsigned long);
extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
int ccw_device_is_pathgroup(struct ccw_device *cdev);
int ccw_device_is_multipath(struct ccw_device *cdev);
/* Allow for i/o completion notification after primary interrupt status. */
#define CCWDEV_EARLY_NOTIFICATION 0x0001
/* Report all interrupt conditions. */
#define CCWDEV_REPORT_ALL 0x0002
/* Try to perform path grouping. */
#define CCWDEV_DO_PATHGROUP 0x0004
/* Allow forced onlining of boxed devices. */
#define CCWDEV_ALLOW_FORCE 0x0008
/* Try to use multipath mode. */
#define CCWDEV_DO_MULTIPATH 0x0010
extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long);
extern int ccw_device_start_timeout(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long, int);
extern int ccw_device_start_key(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, __u8, unsigned long);
extern int ccw_device_start_timeout_key(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, __u8,
unsigned long, int);
extern int ccw_device_resume(struct ccw_device *);
extern int ccw_device_halt(struct ccw_device *, unsigned long);
extern int ccw_device_clear(struct ccw_device *, unsigned long);
int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key);
int ccw_device_tm_start_key(struct ccw_device *, struct tcw *,
unsigned long, u8, u8);
int ccw_device_tm_start_timeout_key(struct ccw_device *, struct tcw *,
unsigned long, u8, u8, int);
int ccw_device_tm_start(struct ccw_device *, struct tcw *,
unsigned long, u8);
int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
unsigned long, u8, int);
int ccw_device_tm_intrg(struct ccw_device *cdev);
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
extern int ccw_device_set_online(struct ccw_device *cdev);
extern int ccw_device_set_offline(struct ccw_device *cdev);
extern struct ciw *ccw_device_get_ciw(struct ccw_device *, __u32 cmd);
extern __u8 ccw_device_get_path_mask(struct ccw_device *);
extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define get_ccwdev_lock(x) (x)->ccwlock
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
extern void ccw_device_destroy_console(struct ccw_device *);
extern int ccw_device_enable_console(struct ccw_device *);
extern void ccw_device_wait_idle(struct ccw_device *);
extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);
extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int);
#endif /* _S390_CCWDEV_H_ */

View file

@ -0,0 +1,73 @@
#ifndef S390_CCWGROUP_H
#define S390_CCWGROUP_H
struct ccw_device;
struct ccw_driver;
/**
* struct ccwgroup_device - ccw group device
* @state: online/offline state
* @count: number of attached slave devices
* @dev: embedded device structure
* @cdev: variable number of slave devices, allocated as needed
* @ungroup_work: work to be done when a ccwgroup notifier has action
* type %BUS_NOTIFY_UNBIND_DRIVER
*/
struct ccwgroup_device {
enum {
CCWGROUP_OFFLINE,
CCWGROUP_ONLINE,
} state;
/* private: */
atomic_t onoff;
struct mutex reg_mutex;
/* public: */
unsigned int count;
struct device dev;
struct work_struct ungroup_work;
struct ccw_device *cdev[0];
};
/**
* struct ccwgroup_driver - driver for ccw group devices
* @setup: function called during device creation to setup the device
* @remove: function called on remove
* @set_online: function called when device is set online
* @set_offline: function called when device is set offline
* @shutdown: function called when device is shut down
* @prepare: prepare for pm state transition
* @complete: undo work done in @prepare
* @freeze: callback for freezing during hibernation snapshotting
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @driver: embedded driver structure
*/
struct ccwgroup_driver {
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
void (*shutdown)(struct ccwgroup_device *);
int (*prepare) (struct ccwgroup_device *);
void (*complete) (struct ccwgroup_device *);
int (*freeze)(struct ccwgroup_device *);
int (*thaw) (struct ccwgroup_device *);
int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
};
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
#define to_ccwgroupdev(x) container_of((x), struct ccwgroup_device, dev)
#define to_ccwgroupdrv(x) container_of((x), struct ccwgroup_driver, driver)
#endif

View file

@ -0,0 +1,140 @@
/*
* S390 fast network checksum routines
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Ulrich Hild (first version)
* Martin Schwidefsky (heavily optimized CKSM version)
* D.J. Barrow (third attempt)
*/
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
static inline __wsum
csum_partial(const void *buff, int len, __wsum sum)
{
register unsigned long reg2 asm("2") = (unsigned long) buff;
register unsigned long reg3 asm("3") = (unsigned long) len;
asm volatile(
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
return sum;
}
/*
* the same as csum_partial_copy, but copies from user space.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* Copy from userspace and compute checksum.
*/
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum,
int *err_ptr)
{
if (unlikely(copy_from_user(dst, src, len)))
*err_ptr = -EFAULT;
return csum_partial(dst, len, sum);
}
static inline __wsum
csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst,src,len);
return csum_partial(dst, len, sum);
}
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
{
u32 csum = (__force u32) sum;
csum += (csum >> 16) + (csum << 16);
csum >>= 16;
return (__force __sum16) ~csum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return csum_fold(csum_partial(iph, ihl*4, 0));
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 32-bit checksum
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len, unsigned short proto,
__wsum sum)
{
__u32 csum = (__force __u32)sum;
csum += (__force __u32)saddr;
if (csum < (__force __u32)saddr)
csum++;
csum += (__force __u32)daddr;
if (csum < (__force __u32)daddr)
csum++;
csum += len + proto;
if (csum < len + proto)
csum++;
return (__force __wsum)csum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len, unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* _S390_CHECKSUM_H */

View file

@ -0,0 +1,50 @@
/*
* Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_CHPID_H
#define _ASM_S390_CHPID_H
#include <uapi/asm/chpid.h>
#include <asm/cio.h>
struct channel_path_desc {
u8 flags;
u8 lsn;
u8 desc;
u8 chpid;
u8 swla;
u8 zeroes;
u8 chla;
u8 chpp;
} __packed;
static inline void chp_id_init(struct chp_id *chpid)
{
memset(chpid, 0, sizeof(struct chp_id));
}
static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
{
return (a->id == b->id) && (a->cssid == b->cssid);
}
static inline void chp_id_next(struct chp_id *chpid)
{
if (chpid->id < __MAX_CHPID)
chpid->id++;
else {
chpid->id = 0;
chpid->cssid++;
}
}
static inline int chp_id_is_valid(struct chp_id *chpid)
{
return (chpid->cssid <= __MAX_CSSID);
}
#define chp_id_for_each(c) \
for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
#endif /* _ASM_S390_CHPID_H */

315
arch/s390/include/asm/cio.h Normal file
View file

@ -0,0 +1,315 @@
/*
* Common interface for I/O on S/390
*/
#ifndef _ASM_S390_CIO_H_
#define _ASM_S390_CIO_H_
#include <linux/spinlock.h>
#include <asm/types.h>
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
#define __MAX_SUBCHANNEL 65535
#define __MAX_SSID 3
#include <asm/scsw.h>
/**
* struct ccw1 - channel command word
* @cmd_code: command code
* @flags: flags, like IDA addressing, etc.
* @count: byte count
* @cda: data address
*
* The ccw is the basic structure to build channel programs that perform
* operations with the device or the control unit. Only Format-1 channel
* command words are supported.
*/
struct ccw1 {
__u8 cmd_code;
__u8 flags;
__u16 count;
__u32 cda;
} __attribute__ ((packed,aligned(8)));
#define CCW_FLAG_DC 0x80
#define CCW_FLAG_CC 0x40
#define CCW_FLAG_SLI 0x20
#define CCW_FLAG_SKIP 0x10
#define CCW_FLAG_PCI 0x08
#define CCW_FLAG_IDA 0x04
#define CCW_FLAG_SUSPEND 0x02
#define CCW_CMD_READ_IPL 0x02
#define CCW_CMD_NOOP 0x03
#define CCW_CMD_BASIC_SENSE 0x04
#define CCW_CMD_TIC 0x08
#define CCW_CMD_STLCK 0x14
#define CCW_CMD_SENSE_PGID 0x34
#define CCW_CMD_SUSPEND_RECONN 0x5B
#define CCW_CMD_RDC 0x64
#define CCW_CMD_RELEASE 0x94
#define CCW_CMD_SET_PGID 0xAF
#define CCW_CMD_SENSE_ID 0xE4
#define CCW_CMD_DCTL 0xF3
#define SENSE_MAX_COUNT 0x20
/**
* struct erw - extended report word
* @res0: reserved
* @auth: authorization check
* @pvrf: path-verification-required flag
* @cpt: channel-path timeout
* @fsavf: failing storage address validity flag
* @cons: concurrent sense
* @scavf: secondary ccw address validity flag
* @fsaf: failing storage address format
* @scnt: sense count, if @cons == %1
* @res16: reserved
*/
struct erw {
__u32 res0 : 3;
__u32 auth : 1;
__u32 pvrf : 1;
__u32 cpt : 1;
__u32 fsavf : 1;
__u32 cons : 1;
__u32 scavf : 1;
__u32 fsaf : 1;
__u32 scnt : 6;
__u32 res16 : 16;
} __attribute__ ((packed));
/**
* struct erw_eadm - EADM Subchannel extended report word
* @b: aob error
* @r: arsb error
*/
struct erw_eadm {
__u32 : 16;
__u32 b : 1;
__u32 r : 1;
__u32 : 14;
} __packed;
/**
* struct sublog - subchannel logout area
* @res0: reserved
* @esf: extended status flags
* @lpum: last path used mask
* @arep: ancillary report
* @fvf: field-validity flags
* @sacc: storage access code
* @termc: termination code
* @devsc: device-status check
* @serr: secondary error
* @ioerr: i/o-error alert
* @seqc: sequence code
*/
struct sublog {
__u32 res0 : 1;
__u32 esf : 7;
__u32 lpum : 8;
__u32 arep : 1;
__u32 fvf : 5;
__u32 sacc : 2;
__u32 termc : 2;
__u32 devsc : 1;
__u32 serr : 1;
__u32 ioerr : 1;
__u32 seqc : 3;
} __attribute__ ((packed));
/**
* struct esw0 - Format 0 Extended Status Word (ESW)
* @sublog: subchannel logout
* @erw: extended report word
* @faddr: failing storage address
* @saddr: secondary ccw address
*/
struct esw0 {
struct sublog sublog;
struct erw erw;
__u32 faddr[2];
__u32 saddr;
} __attribute__ ((packed));
/**
* struct esw1 - Format 1 Extended Status Word (ESW)
* @zero0: reserved zeros
* @lpum: last path used mask
* @zero16: reserved zeros
* @erw: extended report word
* @zeros: three fullwords of zeros
*/
struct esw1 {
__u8 zero0;
__u8 lpum;
__u16 zero16;
struct erw erw;
__u32 zeros[3];
} __attribute__ ((packed));
/**
* struct esw2 - Format 2 Extended Status Word (ESW)
* @zero0: reserved zeros
* @lpum: last path used mask
* @dcti: device-connect-time interval
* @erw: extended report word
* @zeros: three fullwords of zeros
*/
struct esw2 {
__u8 zero0;
__u8 lpum;
__u16 dcti;
struct erw erw;
__u32 zeros[3];
} __attribute__ ((packed));
/**
* struct esw3 - Format 3 Extended Status Word (ESW)
* @zero0: reserved zeros
* @lpum: last path used mask
* @res: reserved
* @erw: extended report word
* @zeros: three fullwords of zeros
*/
struct esw3 {
__u8 zero0;
__u8 lpum;
__u16 res;
struct erw erw;
__u32 zeros[3];
} __attribute__ ((packed));
/**
* struct esw_eadm - EADM Subchannel Extended Status Word (ESW)
* @sublog: subchannel logout
* @erw: extended report word
*/
struct esw_eadm {
__u32 sublog;
struct erw_eadm erw;
__u32 : 32;
__u32 : 32;
__u32 : 32;
} __packed;
/**
* struct irb - interruption response block
* @scsw: subchannel status word
* @esw: extended status word
* @ecw: extended control word
*
* The irb that is handed to the device driver when an interrupt occurs. For
* solicited interrupts, the common I/O layer already performs checks whether
* a field is valid; a field not being valid is always passed as %0.
* If a unit check occurred, @ecw may contain sense data; this is retrieved
* by the common I/O layer itself if the device doesn't support concurrent
* sense (so that the device driver never needs to perform basic sene itself).
* For unsolicited interrupts, the irb is passed as-is (expect for sense data,
* if applicable).
*/
struct irb {
union scsw scsw;
union {
struct esw0 esw0;
struct esw1 esw1;
struct esw2 esw2;
struct esw3 esw3;
struct esw_eadm eadm;
} esw;
__u8 ecw[32];
} __attribute__ ((packed,aligned(4)));
/**
* struct ciw - command information word (CIW) layout
* @et: entry type
* @reserved: reserved bits
* @ct: command type
* @cmd: command code
* @count: command count
*/
struct ciw {
__u32 et : 2;
__u32 reserved : 2;
__u32 ct : 4;
__u32 cmd : 8;
__u32 count : 16;
} __attribute__ ((packed));
#define CIW_TYPE_RCD 0x0 /* read configuration data */
#define CIW_TYPE_SII 0x1 /* set interface identifier */
#define CIW_TYPE_RNI 0x2 /* read node identifier */
/*
* Flags used as input parameters for do_IO()
*/
#define DOIO_ALLOW_SUSPEND 0x0001 /* allow for channel prog. suspend */
#define DOIO_DENY_PREFETCH 0x0002 /* don't allow for CCW prefetch */
#define DOIO_SUPPRESS_INTER 0x0004 /* suppress intermediate inter. */
/* ... for suspended CCWs */
/* Device or subchannel gone. */
#define CIO_GONE 0x0001
/* No path to device. */
#define CIO_NO_PATH 0x0002
/* Device has appeared. */
#define CIO_OPER 0x0004
/* Sick revalidation of device. */
#define CIO_REVALIDATE 0x0008
/* Device did not respond in time. */
#define CIO_BOXED 0x0010
/**
* struct ccw_dev_id - unique identifier for ccw devices
* @ssid: subchannel set id
* @devno: device number
*
* This structure is not directly based on any hardware structure. The
* hardware identifies a device by its device number and its subchannel,
* which is in turn identified by its id. In order to get a unique identifier
* for ccw devices across subchannel sets, @struct ccw_dev_id has been
* introduced.
*/
struct ccw_dev_id {
u8 ssid;
u16 devno;
};
/**
* ccw_device_id_is_equal() - compare two ccw_dev_ids
* @dev_id1: a ccw_dev_id
* @dev_id2: another ccw_dev_id
* Returns:
* %1 if the two structures are equal field-by-field,
* %0 if not.
* Context:
* any
*/
static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
struct ccw_dev_id *dev_id2)
{
if ((dev_id1->ssid == dev_id2->ssid) &&
(dev_id1->devno == dev_id2->devno))
return 1;
return 0;
}
void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
struct cio_iplinfo {
u16 devno;
int is_qdio;
};
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
int chsc_sstpi(void *page, void *result, size_t size);
#endif

View file

@ -0,0 +1,28 @@
#ifndef _ASM_S390_CLP_H
#define _ASM_S390_CLP_H
/* CLP common request & response block size */
#define CLP_BLK_SIZE PAGE_SIZE
struct clp_req_hdr {
u16 len;
u16 cmd;
} __packed;
struct clp_rsp_hdr {
u16 len;
u16 rsp;
} __packed;
/* CLP Response Codes */
#define CLP_RC_OK 0x0010 /* Command request successfully */
#define CLP_RC_CMD 0x0020 /* Command code not recognized */
#define CLP_RC_PERM 0x0030 /* Command not authorized */
#define CLP_RC_FMT 0x0040 /* Invalid command request format */
#define CLP_RC_LEN 0x0050 /* Invalid command request length */
#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
#define CLP_RC_NODATA 0x0080 /* No data available */
#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
#endif

View file

@ -0,0 +1,12 @@
#ifndef S390_CMB_H
#define S390_CMB_H
#include <uapi/asm/cmb.h>
struct ccw_device;
extern int enable_cmf(struct ccw_device *cdev);
extern int disable_cmf(struct ccw_device *cdev);
extern u64 cmf_read(struct ccw_device *cdev, int index);
extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data);
#endif /* S390_CMB_H */

View file

@ -0,0 +1,304 @@
/*
* Copyright IBM Corp. 1999, 2011
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*/
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/mmdebug.h>
#include <linux/types.h>
#include <linux/bug.h>
extern void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
{
unsigned long addr, old;
int shift;
switch (size) {
case 1:
addr = (unsigned long) ptr;
shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3;
asm volatile(
" l %0,%4\n"
"0: lr 0,%0\n"
" nr 0,%3\n"
" or 0,%2\n"
" cs %0,0,%4\n"
" jl 0b\n"
: "=&d" (old), "=Q" (*(int *) addr)
: "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
"Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift;
case 2:
addr = (unsigned long) ptr;
shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2;
asm volatile(
" l %0,%4\n"
"0: lr 0,%0\n"
" nr 0,%3\n"
" or 0,%2\n"
" cs %0,0,%4\n"
" jl 0b\n"
: "=&d" (old), "=Q" (*(int *) addr)
: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
"Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift;
case 4:
asm volatile(
" l %0,%3\n"
"0: cs %0,%2,%3\n"
" jl 0b\n"
: "=&d" (old), "=Q" (*(int *) ptr)
: "d" (x), "Q" (*(int *) ptr)
: "memory", "cc");
return old;
#ifdef CONFIG_64BIT
case 8:
asm volatile(
" lg %0,%3\n"
"0: csg %0,%2,%3\n"
" jl 0b\n"
: "=&d" (old), "=m" (*(long *) ptr)
: "d" (x), "Q" (*(long *) ptr)
: "memory", "cc");
return old;
#endif /* CONFIG_64BIT */
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
__ret; \
})
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __HAVE_ARCH_CMPXCHG
extern void __cmpxchg_called_with_bad_pointer(void);
static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long addr, prev, tmp;
int shift;
switch (size) {
case 1:
addr = (unsigned long) ptr;
shift = (3 ^ (addr & 3)) << 3;
addr ^= addr & 3;
asm volatile(
" l %0,%2\n"
"0: nr %0,%5\n"
" lr %1,%0\n"
" or %0,%3\n"
" or %1,%4\n"
" cs %0,%1,%2\n"
" jnl 1f\n"
" xr %1,%0\n"
" nr %1,%5\n"
" jnz 0b\n"
"1:"
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
: "d" ((old & 0xff) << shift),
"d" ((new & 0xff) << shift),
"d" (~(0xff << shift))
: "memory", "cc");
return prev >> shift;
case 2:
addr = (unsigned long) ptr;
shift = (2 ^ (addr & 2)) << 3;
addr ^= addr & 2;
asm volatile(
" l %0,%2\n"
"0: nr %0,%5\n"
" lr %1,%0\n"
" or %0,%3\n"
" or %1,%4\n"
" cs %0,%1,%2\n"
" jnl 1f\n"
" xr %1,%0\n"
" nr %1,%5\n"
" jnz 0b\n"
"1:"
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
: "d" ((old & 0xffff) << shift),
"d" ((new & 0xffff) << shift),
"d" (~(0xffff << shift))
: "memory", "cc");
return prev >> shift;
case 4:
asm volatile(
" cs %0,%3,%1\n"
: "=&d" (prev), "=Q" (*(int *) ptr)
: "0" (old), "d" (new), "Q" (*(int *) ptr)
: "memory", "cc");
return prev;
#ifdef CONFIG_64BIT
case 8:
asm volatile(
" csg %0,%3,%1\n"
: "=&d" (prev), "=Q" (*(long *) ptr)
: "0" (old), "d" (new), "Q" (*(long *) ptr)
: "memory", "cc");
return prev;
#endif /* CONFIG_64BIT */
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
__ret; \
})
#ifdef CONFIG_64BIT
#define cmpxchg64(ptr, o, n) \
({ \
cmpxchg((ptr), (o), (n)); \
})
#else /* CONFIG_64BIT */
static inline unsigned long long __cmpxchg64(void *ptr,
unsigned long long old,
unsigned long long new)
{
register_pair rp_old = {.pair = old};
register_pair rp_new = {.pair = new};
unsigned long long *ullptr = ptr;
asm volatile(
" cds %0,%2,%1"
: "+d" (rp_old), "+Q" (*ullptr)
: "d" (rp_new)
: "memory", "cc");
return rp_old.pair;
}
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#endif /* CONFIG_64BIT */
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
register __typeof__(*(p2)) __old2 asm("3") = (o2); \
register __typeof__(*(p1)) __new1 asm("4") = (n1); \
register __typeof__(*(p2)) __new2 asm("5") = (n2); \
int cc; \
asm volatile( \
insn " %[old],%[new],%[ptr]\n" \
" ipm %[cc]\n" \
" srl %[cc],28" \
: [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
: [new] "d" (__new1), "d" (__new2), \
[ptr] "Q" (*(p1)), "Q" (*(p2)) \
: "memory", "cc"); \
!cc; \
})
#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
extern void __cmpxchg_double_called_with_bad_pointer(void);
#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
int __ret; \
switch (sizeof(*(p1))) { \
case 4: \
__ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
break; \
case 8: \
__ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
break; \
default: \
__cmpxchg_double_called_with_bad_pointer(); \
} \
__ret; \
})
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
int __ret; \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
if (sizeof(long) == 4) \
__ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
else \
__ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
__ret; \
})
#define system_has_cmpxchg_double() 1
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
case 2:
case 4:
#ifdef CONFIG_64BIT
case 8:
#endif
return __cmpxchg(ptr, old, new, size);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))); \
__ret; \
})
#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
#endif /* __ASM_CMPXCHG_H */

View file

@ -0,0 +1,359 @@
#ifndef _ASM_S390X_COMPAT_H
#define _ASM_S390X_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
#define __SC_DELOUSE(t,v) ({ \
BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
(t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
})
#define PSW32_MASK_PER 0x40000000UL
#define PSW32_MASK_DAT 0x04000000UL
#define PSW32_MASK_IO 0x02000000UL
#define PSW32_MASK_EXT 0x01000000UL
#define PSW32_MASK_KEY 0x00F00000UL
#define PSW32_MASK_BASE 0x00080000UL /* Always one */
#define PSW32_MASK_MCHECK 0x00040000UL
#define PSW32_MASK_WAIT 0x00020000UL
#define PSW32_MASK_PSTATE 0x00010000UL
#define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL
#define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20)
#define PSW32_ASC_PRIMARY 0x00000000UL
#define PSW32_ASC_ACCREG 0x00004000UL
#define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL
#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
PSW32_ASC_PRIMARY)
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
typedef struct {
u32 mask;
u32 addr;
} __aligned(8) psw_compat_t;
typedef struct {
psw_compat_t psw;
u32 gprs[NUM_GPRS];
u32 acrs[NUM_ACRS];
u32 orig_gpr2;
} s390_compat_regs;
typedef struct {
u32 gprs_high[NUM_GPRS];
} s390_compat_regs_high;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
struct compat_statfs {
u32 f_type;
u32 f_bsize;
u32 f_blocks;
u32 f_bfree;
u32 f_bavail;
u32 f_files;
u32 f_ffree;
compat_fsid_t f_fsid;
u32 f_namelen;
u32 f_frsize;
u32 f_flags;
u32 f_spare[4];
};
struct compat_statfs64 {
u32 f_type;
u32 f_bsize;
u64 f_blocks;
u64 f_bfree;
u64 f_bavail;
u64 f_files;
u64 f_ffree;
compat_fsid_t f_fsid;
u32 f_namelen;
u32 f_frsize;
u32 f_flags;
u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
} compat_sigval_t;
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[128/sizeof(int) - 3];
/* kill() */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status;/* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
__u32 _addr; /* faulting insn/memory ref. - pointer */
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} compat_siginfo_t;
/*
* How these fields are to be accessed.
*/
#define si_pid _sifields._kill._pid
#define si_uid _sifields._kill._uid
#define si_status _sifields._sigchld._status
#define si_utime _sifields._sigchld._utime
#define si_stime _sifields._sigchld._stime
#define si_value _sifields._rt._sigval
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#define si_tid _sifields._timer._tid
#define si_overrun _sifields._timer._overrun
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
#ifdef CONFIG_COMPAT
static inline int is_compat_task(void)
{
return is_32bit_task();
}
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
stack = KSTK_ESP(current);
if (is_compat_task())
stack &= 0x7fffffffUL;
return (void __user *) (stack - len);
}
#endif
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
compat_mode_t mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned int __unused1;
unsigned int __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __pad1;
compat_time_t sem_ctime;
compat_ulong_t __pad2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __pad1;
compat_time_t msg_rtime;
compat_ulong_t __pad2;
compat_time_t msg_ctime;
compat_ulong_t __pad3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __pad1;
compat_time_t shm_dtime;
compat_ulong_t __pad2;
compat_time_t shm_ctime;
compat_ulong_t __pad3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused1;
compat_ulong_t __unused2;
};
#endif /* _ASM_S390X_COMPAT_H */

View file

@ -0,0 +1,32 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Christian Borntraeger (cborntra@de.ibm.com),
*/
#ifndef _ASM_S390_CPCMD_H
#define _ASM_S390_CPCMD_H
/*
* the lowlevel function for cpcmd
* the caller of __cpcmd has to ensure that the response buffer is below 2 GB
*/
extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code);
/*
* cpcmd is the in-kernel interface for issuing CP commands
*
* cmd: null-terminated command string, max 240 characters
* response: response buffer for VM's textual response
* rlen: size of the response buffer, cpcmd will not exceed this size
* but will cap the output, if its too large. Everything that
* did not fit into the buffer will be silently dropped
* response_code: return pointer for VM's error code
* return value: the size of the response. The caller can check if the buffer
* was large enough by comparing the return value and rlen
* NOTE: If the response buffer is not below 2 GB, cpcmd can sleep
*/
extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code);
#endif /* _ASM_S390_CPCMD_H */

View file

@ -0,0 +1,26 @@
/*
* Copyright IBM Corp. 2000, 2009
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Christian Ehrhardt <ehrhardt@de.ibm.com>,
*/
#ifndef _ASM_S390_CPU_H
#define _ASM_S390_CPU_H
#define MAX_CPU_ADDRESS 255
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct cpuid
{
unsigned int version : 8;
unsigned int ident : 24;
unsigned int machine : 16;
unsigned int unused : 16;
} __attribute__ ((packed, aligned(8)));
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_CPU_H */

View file

@ -0,0 +1,283 @@
/*
* CPU-measurement facilities
*
* Copyright IBM Corp. 2012
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
#include <linux/errno.h>
#include <asm/facility.h>
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
#define CPU_MF_INT_SF_SACA (1 << 23) /* sampler auth. change alert */
#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
buffer full */
#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
CPU_MF_INT_SF_LSDA)
#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
/* CPU measurement facility support */
static inline int cpum_cf_avail(void)
{
return MACHINE_HAS_LPP && test_facility(67);
}
static inline int cpum_sf_avail(void)
{
return MACHINE_HAS_LPP && test_facility(68);
}
struct cpumf_ctr_info {
u16 cfvn;
u16 auth_ctl;
u16 enable_ctl;
u16 act_ctl;
u16 max_cpu;
u16 csvn;
u16 max_cg;
u16 reserved1;
u32 reserved2[12];
} __packed;
/* QUERY SAMPLING INFORMATION block */
struct hws_qsi_info_block { /* Bit(s) */
unsigned int b0_13:14; /* 0-13: zeros */
unsigned int as:1; /* 14: basic-sampling authorization */
unsigned int ad:1; /* 15: diag-sampling authorization */
unsigned int b16_21:6; /* 16-21: zeros */
unsigned int es:1; /* 22: basic-sampling enable control */
unsigned int ed:1; /* 23: diag-sampling enable control */
unsigned int b24_29:6; /* 24-29: zeros */
unsigned int cs:1; /* 30: basic-sampling activation control */
unsigned int cd:1; /* 31: diag-sampling activation control */
unsigned int bsdes:16; /* 4-5: size of basic sampling entry */
unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */
unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
unsigned long tear; /* 24-31: TEAR contents */
unsigned long dear; /* 32-39: DEAR contents */
unsigned int rsvrd0; /* 40-43: reserved */
unsigned int cpu_speed; /* 44-47: CPU speed */
unsigned long long rsvrd1; /* 48-55: reserved */
unsigned long long rsvrd2; /* 56-63: reserved */
} __packed;
/* SET SAMPLING CONTROLS request block */
struct hws_lsctl_request_block {
unsigned int s:1; /* 0: maximum buffer indicator */
unsigned int h:1; /* 1: part. level reserved for VM use*/
unsigned long long b2_53:52;/* 2-53: zeros */
unsigned int es:1; /* 54: basic-sampling enable control */
unsigned int ed:1; /* 55: diag-sampling enable control */
unsigned int b56_61:6; /* 56-61: - zeros */
unsigned int cs:1; /* 62: basic-sampling activation control */
unsigned int cd:1; /* 63: diag-sampling activation control */
unsigned long interval; /* 8-15: sampling interval */
unsigned long tear; /* 16-23: TEAR contents */
unsigned long dear; /* 24-31: DEAR contents */
/* 32-63: */
unsigned long rsvrd1; /* reserved */
unsigned long rsvrd2; /* reserved */
unsigned long rsvrd3; /* reserved */
unsigned long rsvrd4; /* reserved */
} __packed;
struct hws_basic_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:4; /* 16-19 reserved */
unsigned int U:4; /* 20-23 Number of unique instruct. */
unsigned int z:2; /* zeros */
unsigned int T:1; /* 26 PSW DAT mode */
unsigned int W:1; /* 27 PSW wait state */
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
unsigned int:16;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
unsigned long long hpp; /* Host Program Parameter */
} __packed;
struct hws_diag_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
unsigned int R:14; /* 16-19 and 20-30 reserved */
unsigned int I:1; /* 31 entry valid or invalid */
u8 data[]; /* Machine-dependent sample data */
} __packed;
struct hws_combined_entry {
struct hws_basic_entry basic; /* Basic-sampling data entry */
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
} __packed;
struct hws_trailer_entry {
union {
struct {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned int t:1; /* 2 - Timestamp format */
unsigned long long:61; /* 3 - 63: Reserved */
};
unsigned long long flags; /* 0 - 63: All indicators */
};
unsigned long long overflow; /* 64 - sample Overflow count */
unsigned char timestamp[16]; /* 16 - 31 timestamp */
unsigned long long reserved1; /* 32 -Reserved */
unsigned long long reserved2; /* */
unsigned long long progusage1; /* 48 - reserved for programming use */
unsigned long long progusage2; /* */
} __packed;
/* Query counter information */
static inline int qctri(struct cpumf_ctr_info *info)
{
int rc = -EINVAL;
asm volatile (
"0: .insn s,0xb28e0000,%1\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(1b, 2b)
: "+d" (rc), "=Q" (*info));
return rc;
}
/* Load CPU-counter-set controls */
static inline int lcctl(u64 ctl)
{
int cc;
asm volatile (
" .insn s,0xb2840000,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc) : "m" (ctl) : "cc");
return cc;
}
/* Extract CPU counter */
static inline int ecctr(u64 ctr, u64 *val)
{
register u64 content asm("4") = 0;
int cc;
asm volatile (
" .insn rre,0xb2e40000,%0,%2\n"
" ipm %1\n"
" srl %1,28\n"
: "=d" (content), "=d" (cc) : "d" (ctr) : "cc");
if (!cc)
*val = content;
return cc;
}
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
int cc;
cc = 1;
asm volatile(
"0: .insn s,0xb2860000,0(%1)\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (cc), "+a" (info)
: "m" (*info)
: "cc", "memory");
return cc ? -EINVAL : 0;
}
/* Load sampling controls */
static inline int lsctl(struct hws_lsctl_request_block *req)
{
int cc;
cc = 1;
asm volatile(
"0: .insn s,0xb2870000,0(%1)\n"
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (cc), "+a" (req)
: "m" (*req)
: "cc", "memory");
return cc ? -EINVAL : 0;
}
/* Sampling control helper functions */
#include <linux/time.h>
static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
unsigned long freq)
{
return (USEC_PER_SEC / freq) * qsi->cpu_speed;
}
static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
unsigned long rate)
{
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
/* Return TOD timestamp contained in an trailer entry */
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
{
/* TOD in STCKE format */
if (te->t)
return *((unsigned long long *) &te->timestamp[1]);
/* TOD in STCK format */
return *((unsigned long long *) &te->timestamp[0]);
}
/* Return pointer to trailer entry of an sample data block */
static inline unsigned long *trailer_entry_ptr(unsigned long v)
{
void *ret;
ret = (void *) v;
ret += PAGE_SIZE;
ret -= sizeof(struct hws_trailer_entry);
return (unsigned long *) ret;
}
/* Return if the entry in the sample data block table (sdbt)
* is a link to the next sdbt */
static inline int is_link_entry(unsigned long *s)
{
return *s & 0x1ul ? 1 : 0;
}
/* Return pointer to the linked sdbt */
static inline unsigned long *get_next_sdbt(unsigned long *s)
{
return (unsigned long *) (*s & ~0x1ul);
}
#endif /* _ASM_S390_CPU_MF_H */

View file

@ -0,0 +1,172 @@
/*
* Copyright IBM Corp. 2004
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _S390_CPUTIME_H
#define _S390_CPUTIME_H
#include <linux/types.h>
#include <asm/div64.h>
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long __nocast cputime_t;
typedef unsigned long long __nocast cputime64_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd;
#else /* CONFIG_64BIT */
return n / base;
#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
/*
* Convert cputime to jiffies and back.
*/
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{
return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
}
static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
return (__force cputime_t)(jif * (4096000000ULL / HZ));
}
static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
{
unsigned long long jif = (__force unsigned long long) cputime;
do_div(jif, 4096000000ULL / HZ);
return jif;
}
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
return (__force cputime64_t)(jif * (4096000000ULL / HZ));
}
/*
* Convert cputime to microseconds and back.
*/
static inline unsigned int cputime_to_usecs(const cputime_t cputime)
{
return (__force unsigned long long) cputime >> 12;
}
static inline cputime_t usecs_to_cputime(const unsigned int m)
{
return (__force cputime_t)(m * 4096ULL);
}
#define usecs_to_cputime64(m) usecs_to_cputime(m)
/*
* Convert cputime to milliseconds and back.
*/
static inline unsigned int cputime_to_secs(const cputime_t cputime)
{
return __div((__force unsigned long long) cputime, 2048000000) >> 1;
}
static inline cputime_t secs_to_cputime(const unsigned int s)
{
return (__force cputime_t)(s * 4096000000ULL);
}
/*
* Convert cputime to timespec and back.
*/
static inline cputime_t timespec_to_cputime(const struct timespec *value)
{
unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
}
static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd;
#else
value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
value->tv_sec = __cputime / 4096000000ULL;
#endif
}
/*
* Convert cputime to timeval and back.
* Since cputime and timeval have the same resolution (microseconds)
* this is easy.
*/
static inline cputime_t timeval_to_cputime(const struct timeval *value)
{
unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
}
static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd;
#else
value->tv_usec = (__cputime % 4096000000ULL) / 4096;
value->tv_sec = __cputime / 4096000000ULL;
#endif
}
/*
* Convert cputime to clock and back.
*/
static inline clock_t cputime_to_clock_t(cputime_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
}
static inline cputime_t clock_t_to_cputime(unsigned long x)
{
return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
}
/*
* Convert cputime64 to clock.
*/
static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
}
cputime64_t arch_cpu_idle_time(int cpu);
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
#endif /* _S390_CPUTIME_H */

View file

@ -0,0 +1,69 @@
/*
* Data definitions for channel report processing
* Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef _ASM_S390_CRW_H
#define _ASM_S390_CRW_H
#include <linux/types.h>
/*
* Channel Report Word
*/
struct crw {
__u32 res1 : 1; /* reserved zero */
__u32 slct : 1; /* solicited */
__u32 oflw : 1; /* overflow */
__u32 chn : 1; /* chained */
__u32 rsc : 4; /* reporting source code */
__u32 anc : 1; /* ancillary report */
__u32 res2 : 1; /* reserved zero */
__u32 erc : 6; /* error-recovery code */
__u32 rsid : 16; /* reporting-source ID */
} __attribute__ ((packed));
typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
extern int crw_register_handler(int rsc, crw_handler_t handler);
extern void crw_unregister_handler(int rsc);
extern void crw_handle_channel_report(void);
void crw_wait_for_channel_report(void);
#define NR_RSCS 16
#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
#define CRW_RSC_SCH 0x3 /* subchannel */
#define CRW_RSC_CPATH 0x4 /* channel path */
#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
#define CRW_RSC_CSS 0xB /* channel subsystem */
#define CRW_ERC_EVENT 0x00 /* event information pending */
#define CRW_ERC_AVAIL 0x01 /* available */
#define CRW_ERC_INIT 0x02 /* initialized */
#define CRW_ERC_TERROR 0x03 /* temporary error */
#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
#define CRW_ERC_TERM 0x05 /* terminal */
#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
static inline int stcrw(struct crw *pcrw)
{
int ccode;
asm volatile(
" stcrw 0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "=m" (*pcrw)
: "a" (pcrw)
: "cc" );
return ccode;
}
#endif /* _ASM_S390_CRW_H */

View file

@ -0,0 +1,38 @@
#ifndef _ASM_CSS_CHARS_H
#define _ASM_CSS_CHARS_H
#include <linux/types.h>
struct css_general_char {
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 4;
u32 eadm : 1; /* bit 17 */
u32 : 23;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 12;
u32 eadm_rf : 1; /* bit 80 */
u32 : 1;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 19;
u32 alt_ssi : 1; /* bit 108 */
u32:1;
u32 narf:1; /* bit 110 */
} __packed;
extern struct css_general_char css_general_characteristics;
#endif

View file

@ -0,0 +1,82 @@
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
#include <linux/bug.h>
#ifdef CONFIG_64BIT
# define __CTL_LOAD "lctlg"
# define __CTL_STORE "stctg"
#else
# define __CTL_LOAD "lctl"
# define __CTL_STORE "stctl"
#endif
#define __ctl_load(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
__CTL_LOAD " %1,%2,%0\n" \
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
}
#define __ctl_store(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
__CTL_STORE " %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
}
static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
__ctl_store(reg, cr, cr);
reg |= 1UL << bit;
__ctl_load(reg, cr, cr);
}
static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
__ctl_store(reg, cr, cr);
reg &= ~(1UL << bit);
__ctl_load(reg, cr, cr);
}
void smp_ctl_set_bit(int cr, int bit);
void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
#ifdef CONFIG_64BIT
unsigned long : 32;
#endif
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
unsigned long : 23;
};
};
#ifdef CONFIG_SMP
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif
#endif /* __ASM_CTL_REG_H */

View file

@ -0,0 +1,18 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/current.h"
*/
#ifndef _S390_CURRENT_H
#define _S390_CURRENT_H
#include <asm/lowcore.h>
struct task_struct;
#define current ((struct task_struct *const)S390_lowcore.current_task)
#endif /* !(_S390_CURRENT_H) */

View file

@ -0,0 +1,237 @@
/*
* S/390 debug facility
*
* Copyright IBM Corp. 1999, 2000
*/
#ifndef DEBUG_H
#define DEBUG_H
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <uapi/asm/debug.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
/* the entry information */
typedef struct __debug_entry debug_entry_t;
struct debug_view;
typedef struct debug_info {
struct debug_info* next;
struct debug_info* prev;
atomic_t ref_count;
spinlock_t lock;
int level;
int nr_areas;
int pages_per_area;
int buf_size;
int entry_size;
debug_entry_t*** areas;
int active_area;
int *active_pages;
int *active_entries;
struct dentry* debugfs_root_entry;
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
umode_t mode;
} debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id,
struct debug_view* view,
int area,
debug_entry_t* entry,
char* out_buf);
typedef int (debug_format_proc_t) (debug_info_t* id,
struct debug_view* view, char* out_buf,
const char* in_buf);
typedef int (debug_prolog_proc_t) (debug_info_t* id,
struct debug_view* view,
char* out_buf);
typedef int (debug_input_proc_t) (debug_info_t* id,
struct debug_view* view,
struct file* file,
const char __user *user_buf,
size_t in_buf_size, loff_t* offset);
int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
int area, debug_entry_t* entry, char* out_buf);
struct debug_view {
char name[DEBUG_MAX_NAME_LEN];
debug_prolog_proc_t* prolog_proc;
debug_header_proc_t* header_proc;
debug_format_proc_t* format_proc;
debug_input_proc_t* input_proc;
void* private_data;
};
extern struct debug_view debug_hex_ascii_view;
extern struct debug_view debug_raw_view;
extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
debug_entry_t* debug_event_common(debug_info_t* id, int level,
const void* data, int length);
debug_entry_t* debug_exception_common(debug_info_t* id, int level,
const void* data, int length);
/* Debug Feature API: */
debug_info_t *debug_register(const char *name, int pages, int nr_areas,
int buf_size);
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
int buf_size, umode_t mode, uid_t uid,
gid_t gid);
void debug_unregister(debug_info_t* id);
void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
static inline bool debug_level_enabled(debug_info_t* id, int level)
{
return level <= id->level;
}
static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_event_common(id,level,data,length);
}
static inline debug_entry_t*
debug_int_event(debug_info_t* id, int level, unsigned int tag)
{
unsigned int t=tag;
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_event_common(id,level,&t,sizeof(unsigned int));
}
static inline debug_entry_t *
debug_long_event (debug_info_t* id, int level, unsigned long tag)
{
unsigned long t=tag;
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_event_common(id,level,&t,sizeof(unsigned long));
}
static inline debug_entry_t*
debug_text_event(debug_info_t* id, int level, const char* txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_event_common(id,level,txt,strlen(txt));
}
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
debug_sprintf_event(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
static inline debug_entry_t*
debug_exception(debug_info_t* id, int level, void* data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_exception_common(id,level,data,length);
}
static inline debug_entry_t*
debug_int_exception(debug_info_t* id, int level, unsigned int tag)
{
unsigned int t=tag;
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_exception_common(id,level,&t,sizeof(unsigned int));
}
static inline debug_entry_t *
debug_long_exception (debug_info_t* id, int level, unsigned long tag)
{
unsigned long t=tag;
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_exception_common(id,level,&t,sizeof(unsigned long));
}
static inline debug_entry_t*
debug_text_exception(debug_info_t* id, int level, const char* txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
return debug_exception_common(id,level,txt,strlen(txt));
}
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
/*
define the debug levels:
- 0 No debugging output to console or syslog
- 1 Log internal errors to syslog, ignore check conditions
- 2 Log internal errors and check conditions to syslog
- 3 Log internal errors to console, log check conditions to syslog
- 4 Log internal errors and check conditions to console
- 5 panic on internal errors, log check conditions to console
- 6 panic on both, internal errors and check conditions
*/
#ifndef DEBUG_LEVEL
#define DEBUG_LEVEL 4
#endif
#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
#if DEBUG_LEVEL > 0
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
#else
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */
#endif /* DEBUG_H */

View file

@ -0,0 +1,24 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/delay.h"
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/s390/lib/delay.c
*/
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
void __ndelay(unsigned long long nsecs);
void __udelay(unsigned long long usecs);
void udelay_simple(unsigned long long usecs);
void __delay(unsigned long loops);
#define ndelay(n) __ndelay((unsigned long long) (n))
#define udelay(n) __udelay((unsigned long long) (n))
#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
#endif /* defined(_S390_DELAY_H) */

View file

@ -0,0 +1,7 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>

View file

@ -0,0 +1,52 @@
/*
* s390 diagnose functions
*
* Copyright IBM Corp. 2007
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#ifndef _ASM_S390_DIAG_H
#define _ASM_S390_DIAG_H
/*
* Diagnose 10: Release page range
*/
static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
{
unsigned long start_addr, end_addr;
start_addr = start_pfn << PAGE_SHIFT;
end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
asm volatile(
"0: diag %0,%1,0x10\n"
"1:\n"
EX_TABLE(0b, 1b)
EX_TABLE(1b, 1b)
: : "a" (start_addr), "a" (end_addr));
}
/*
* Diagnose 14: Input spool file manipulation
*/
extern int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode);
/*
* Diagnose 210: Get information about a virtual device
*/
struct diag210 {
u16 vrdcdvno; /* device number (input) */
u16 vrdclen; /* data block length (input) */
u8 vrdcvcla; /* virtual device class (output) */
u8 vrdcvtyp; /* virtual device type (output) */
u8 vrdcvsta; /* virtual device status (output) */
u8 vrdcvfla; /* virtual device flags (output) */
u8 vrdcrccl; /* real device class (output) */
u8 vrdccrty; /* real device type (output) */
u8 vrdccrmd; /* real device model (output) */
u8 vrdccrft; /* real device feature (output) */
} __attribute__((packed, aligned(4)));
extern int diag210(struct diag210 *addr);
#endif /* _ASM_S390_DIAG_H */

View file

@ -0,0 +1,53 @@
/*
* Disassemble s390 instructions.
*
* Copyright IBM Corp. 2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
/* Type of operand */
#define OPERAND_GPR 0x1 /* Operand printed as %rx */
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */
#define OPERAND_VR 0x10 /* Operand printed as %vx */
#define OPERAND_DISP 0x20 /* Operand printed as displacement */
#define OPERAND_BASE 0x40 /* Operand printed as base register */
#define OPERAND_INDEX 0x80 /* Operand printed as index register */
#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
struct s390_operand {
int bits; /* The number of bits in the operand. */
int shift; /* The number of bits to shift. */
int flags; /* One bit syntax flags. */
};
struct s390_insn {
const char name[5];
unsigned char opfrag;
unsigned char format;
};
static inline int insn_length(unsigned char code)
{
return ((((int) code + 64) >> 7) + 1) << 1;
}
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
struct s390_insn *find_insn(unsigned char *code);
static inline int is_known_insn(unsigned char *code)
{
return !!find_insn(code);
}
#endif /* __ASM_S390_DIS_H__ */

View file

@ -0,0 +1 @@
#include <asm-generic/div64.h>

View file

@ -0,0 +1,90 @@
#ifndef _ASM_S390_DMA_MAPPING_H
#define _ASM_S390_DMA_MAPPING_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
extern struct dma_map_ops s390_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &s390_dma_ops;
}
extern int dma_set_mask(struct device *dev, u64 mask);
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#include <asm-generic/dma-mapping-common.h>
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (dma_ops->dma_supported == NULL)
return 1;
return dma_ops->dma_supported(dev, mask);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
return dma_addr == DMA_ERROR_CODE;
}
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
#endif /* _ASM_S390_DMA_MAPPING_H */

View file

@ -0,0 +1,19 @@
#ifndef _ASM_S390_DMA_H
#define _ASM_S390_DMA_H
#include <asm/io.h>
/*
* MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
* to DMA. It _is_ used for the s390 memory zone split at 2GB caused
* by the 31 bit heritage.
*/
#define MAX_DMA_ADDRESS 0x80000000
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_S390_DMA_H */

View file

@ -0,0 +1,117 @@
#ifndef _ASM_S390_EADM_H
#define _ASM_S390_EADM_H
#include <linux/types.h>
#include <linux/device.h>
struct arqb {
u64 data;
u16 fmt:4;
u16:12;
u16 cmd_code;
u16:16;
u16 msb_count;
u32 reserved[12];
} __packed;
#define ARQB_CMD_MOVE 1
struct arsb {
u16 fmt:4;
u32:28;
u8 ef;
u8:8;
u8 ecbi;
u8:8;
u8 fvf;
u16:16;
u8 eqc;
u32:32;
u64 fail_msb;
u64 fail_aidaw;
u64 fail_ms;
u64 fail_scm;
u32 reserved[4];
} __packed;
#define EQC_WR_PROHIBIT 22
struct msb {
u8 fmt:4;
u8 oc:4;
u8 flags;
u16:12;
u16 bs:4;
u32 blk_count;
u64 data_addr;
u64 scm_addr;
u64:64;
} __packed;
struct aidaw {
u8 flags;
u32 :24;
u32 :32;
u64 data_addr;
} __packed;
#define MSB_OC_CLEAR 0
#define MSB_OC_READ 1
#define MSB_OC_WRITE 2
#define MSB_OC_RELEASE 3
#define MSB_FLAG_BNM 0x80
#define MSB_FLAG_IDA 0x40
#define MSB_BS_4K 0
#define MSB_BS_1M 1
#define AOB_NR_MSB 124
struct aob {
struct arqb request;
struct arsb response;
struct msb msb[AOB_NR_MSB];
} __packed __aligned(PAGE_SIZE);
struct aob_rq_header {
struct scm_device *scmdev;
char data[0];
};
struct scm_device {
u64 address;
u64 size;
unsigned int nr_max_block;
struct device dev;
struct {
unsigned int persistence:4;
unsigned int oper_state:4;
unsigned int data_state:4;
unsigned int rank:4;
unsigned int release:1;
unsigned int res_id:8;
} __packed attrs;
};
#define OP_STATE_GOOD 1
#define OP_STATE_TEMP_ERR 2
#define OP_STATE_PERM_ERR 3
enum scm_event {SCM_CHANGE, SCM_AVAIL};
struct scm_driver {
struct device_driver drv;
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev, enum scm_event event);
void (*handler) (struct scm_device *scmdev, void *data, int error);
};
int scm_driver_register(struct scm_driver *scmdrv);
void scm_driver_unregister(struct scm_driver *scmdrv);
int eadm_start_aob(struct aob *aob);
void scm_irq_handler(struct aob *aob, int error);
#endif /* _ASM_S390_EADM_H */

View file

@ -0,0 +1,48 @@
/*
* EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _EBCDIC_H
#define _EBCDIC_H
#ifndef _S390_TYPES_H
#include <types.h>
#endif
extern __u8 _ascebc_500[256]; /* ASCII -> EBCDIC 500 conversion table */
extern __u8 _ebcasc_500[256]; /* EBCDIC 500 -> ASCII conversion table */
extern __u8 _ascebc[256]; /* ASCII -> EBCDIC conversion table */
extern __u8 _ebcasc[256]; /* EBCDIC -> ASCII conversion table */
extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{
if (nr-- <= 0)
return;
asm volatile(
" bras 1,1f\n"
" tr 0(1,%0),0(%2)\n"
"0: tr 0(256,%0),0(%2)\n"
" la %0,256(%0)\n"
"1: ahi %1,-256\n"
" jnm 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1");
}
#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
#define EBCASC(addr,nr) codepage_convert(_ebcasc, addr, nr)
#define ASCEBC_500(addr,nr) codepage_convert(_ascebc_500, addr, nr)
#define EBCASC_500(addr,nr) codepage_convert(_ebcasc_500, addr, nr)
#define EBC_TOLOWER(addr,nr) codepage_convert(_ebc_tolower, addr, nr)
#define EBC_TOUPPER(addr,nr) codepage_convert(_ebc_toupper, addr, nr)
#endif

231
arch/s390/include/asm/elf.h Normal file
View file

@ -0,0 +1,231 @@
/*
* S390 version
*
* Derived from "include/asm-i386/elf.h"
*/
#ifndef __ASMS390_ELF_H
#define __ASMS390_ELF_H
/* s390 relocations defined by the ABIs */
#define R_390_NONE 0 /* No reloc. */
#define R_390_8 1 /* Direct 8 bit. */
#define R_390_12 2 /* Direct 12 bit. */
#define R_390_16 3 /* Direct 16 bit. */
#define R_390_32 4 /* Direct 32 bit. */
#define R_390_PC32 5 /* PC relative 32 bit. */
#define R_390_GOT12 6 /* 12 bit GOT offset. */
#define R_390_GOT32 7 /* 32 bit GOT offset. */
#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
#define R_390_COPY 9 /* Copy symbol at runtime. */
#define R_390_GLOB_DAT 10 /* Create GOT entry. */
#define R_390_JMP_SLOT 11 /* Create PLT entry. */
#define R_390_RELATIVE 12 /* Adjust by program base. */
#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
#define R_390_GOT16 15 /* 16 bit GOT offset. */
#define R_390_PC16 16 /* PC relative 16 bit. */
#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
#define R_390_64 22 /* Direct 64 bit. */
#define R_390_PC64 23 /* PC relative 64 bit. */
#define R_390_GOT64 24 /* 64 bit GOT offset. */
#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
#define R_390_TLS_GDCALL 38 /* Tag for function call in general
dynamic TLS code. */
#define R_390_TLS_LDCALL 39 /* Tag for function call in local
dynamic TLS code. */
#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
thread local data. */
#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
thread local data. */
#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
block offset. */
#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
thread local data in LD code. */
#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
thread local data in LD code. */
#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
negated static TLS block offset. */
#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
negated static TLS block offset. */
#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
negated static TLS block offset. */
#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
static TLS block. */
#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
static TLS block. */
#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
block. */
#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
block. */
#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
block. */
#define R_390_20 57 /* Direct 20 bit. */
#define R_390_GOT20 58 /* 20 bit GOT offset. */
#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */
#define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS
block offset. */
/* Keep this the last entry. */
#define R_390_NUM 61
/* Bits present in AT_HWCAP. */
#define HWCAP_S390_ESAN3 1
#define HWCAP_S390_ZARCH 2
#define HWCAP_S390_STFLE 4
#define HWCAP_S390_MSA 8
#define HWCAP_S390_LDISP 16
#define HWCAP_S390_EIMM 32
#define HWCAP_S390_DFP 64
#define HWCAP_S390_HPAGE 128
#define HWCAP_S390_ETF3EH 256
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
/*
* These are used to set parameters in the core dumps.
*/
#ifndef CONFIG_64BIT
#define ELF_CLASS ELFCLASS32
#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/compat.h>
#include <asm/syscall.h>
#include <asm/user.h>
typedef s390_fp_regs elf_fpregset_t;
typedef s390_regs elf_gregset_t;
typedef s390_fp_regs compat_elf_fpregset_t;
typedef s390_compat_regs compat_elf_gregset_t;
#include <linux/sched.h> /* for task_struct */
#include <asm/mmu_context.h>
#include <asm/vdso.h>
extern unsigned int vdso_enabled;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
#define compat_elf_check_arch(x) \
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
#define compat_start_thread start_thread31
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->gprs[14] = 0; \
} while (0)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
extern unsigned long randomize_et_dyn(unsigned long base);
#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
extern unsigned long elf_hwcap;
#define ELF_HWCAP (elf_hwcap)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM_SIZE 8
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso_base); \
} while (0)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
int arch_setup_additional_pages(struct linux_binprm *, int);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif

View file

@ -0,0 +1,6 @@
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */

256
arch/s390/include/asm/etr.h Normal file
View file

@ -0,0 +1,256 @@
/*
* Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef __S390_ETR_H
#define __S390_ETR_H
/* ETR attachment control register */
struct etr_eacr {
unsigned int e0 : 1; /* port 0 stepping control */
unsigned int e1 : 1; /* port 1 stepping control */
unsigned int _pad0 : 5; /* must be 00100 */
unsigned int dp : 1; /* data port control */
unsigned int p0 : 1; /* port 0 change recognition control */
unsigned int p1 : 1; /* port 1 change recognition control */
unsigned int _pad1 : 3; /* must be 000 */
unsigned int ea : 1; /* ETR alert control */
unsigned int es : 1; /* ETR sync check control */
unsigned int sl : 1; /* switch to local control */
} __attribute__ ((packed));
/* Port state returned by steai */
enum etr_psc {
etr_psc_operational = 0,
etr_psc_semi_operational = 1,
etr_psc_protocol_error = 4,
etr_psc_no_symbols = 8,
etr_psc_no_signal = 12,
etr_psc_pps_mode = 13
};
/* Logical port state returned by stetr */
enum etr_lpsc {
etr_lpsc_operational_step = 0,
etr_lpsc_operational_alt = 1,
etr_lpsc_semi_operational = 2,
etr_lpsc_protocol_error = 4,
etr_lpsc_no_symbol_sync = 8,
etr_lpsc_no_signal = 12,
etr_lpsc_pps_mode = 13
};
/* ETR status words */
struct etr_esw {
struct etr_eacr eacr; /* attachment control register */
unsigned int y : 1; /* stepping mode */
unsigned int _pad0 : 5; /* must be 00000 */
unsigned int p : 1; /* stepping port number */
unsigned int q : 1; /* data port number */
unsigned int psc0 : 4; /* port 0 state code */
unsigned int psc1 : 4; /* port 1 state code */
} __attribute__ ((packed));
/* Second level data register status word */
struct etr_slsw {
unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
unsigned int _pad0 : 19; /* must by all zeroes */
unsigned int n : 1; /* EAF port number */
unsigned int v1 : 1; /* validity bit ETR data frame 1 */
unsigned int v2 : 1; /* validity bit ETR data frame 2 */
unsigned int v3 : 1; /* validity bit ETR data frame 3 */
unsigned int v4 : 1; /* validity bit ETR data frame 4 */
unsigned int _pad1 : 4; /* must be 0000 */
} __attribute__ ((packed));
/* ETR data frames */
struct etr_edf1 {
unsigned int u : 1; /* untuned bit */
unsigned int _pad0 : 1; /* must be 0 */
unsigned int r : 1; /* service request bit */
unsigned int _pad1 : 4; /* must be 0000 */
unsigned int a : 1; /* time adjustment bit */
unsigned int net_id : 8; /* ETR network id */
unsigned int etr_id : 8; /* id of ETR which sends data frames */
unsigned int etr_pn : 8; /* port number of ETR output port */
} __attribute__ ((packed));
struct etr_edf2 {
unsigned int etv : 32; /* Upper 32 bits of TOD. */
} __attribute__ ((packed));
struct etr_edf3 {
unsigned int rc : 8; /* failure reason code */
unsigned int _pad0 : 3; /* must be 000 */
unsigned int c : 1; /* ETR coupled bit */
unsigned int tc : 4; /* ETR type code */
unsigned int blto : 8; /* biased local time offset */
/* (blto - 128) * 15 = minutes */
unsigned int buo : 8; /* biased utc offset */
/* (buo - 128) = leap seconds */
} __attribute__ ((packed));
struct etr_edf4 {
unsigned int ed : 8; /* ETS device dependent data */
unsigned int _pad0 : 1; /* must be 0 */
unsigned int buc : 5; /* biased ut1 correction */
/* (buc - 16) * 0.1 seconds */
unsigned int em : 6; /* ETS error magnitude */
unsigned int dc : 6; /* ETS drift code */
unsigned int sc : 6; /* ETS steering code */
} __attribute__ ((packed));
/*
* ETR attachment information block, two formats
* format 1 has 4 reserved words with a size of 64 bytes
* format 2 has 16 reserved words with a size of 96 bytes
*/
struct etr_aib {
struct etr_esw esw;
struct etr_slsw slsw;
unsigned long long tsp;
struct etr_edf1 edf1;
struct etr_edf2 edf2;
struct etr_edf3 edf3;
struct etr_edf4 edf4;
unsigned int reserved[16];
} __attribute__ ((packed,aligned(8)));
/* ETR interruption parameter */
struct etr_irq_parm {
unsigned int _pad0 : 8;
unsigned int pc0 : 1; /* port 0 state change */
unsigned int pc1 : 1; /* port 1 state change */
unsigned int _pad1 : 3;
unsigned int eai : 1; /* ETR alert indication */
unsigned int _pad2 : 18;
} __attribute__ ((packed));
/* Query TOD offset result */
struct etr_ptff_qto {
unsigned long long physical_clock;
unsigned long long tod_offset;
unsigned long long logical_tod_offset;
unsigned long long tod_epoch_difference;
} __attribute__ ((packed));
/* Inline assembly helper functions */
static inline int etr_setr(struct etr_eacr *ctrl)
{
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2160000,%1\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "Q" (*ctrl));
return rc;
}
/* Stores a format 1 aib with 64 bytes */
static inline int etr_stetr(struct etr_aib *aib)
{
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2170000,%1\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "Q" (*aib));
return rc;
}
/* Stores a format 2 aib with 96 bytes for specified port */
static inline int etr_steai(struct etr_aib *aib, unsigned int func)
{
register unsigned int reg0 asm("0") = func;
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2b30000,%1\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "Q" (*aib), "d" (reg0));
return rc;
}
/* Function codes for the steai instruction. */
#define ETR_STEAI_STEPPING_PORT 0x10
#define ETR_STEAI_ALTERNATE_PORT 0x11
#define ETR_STEAI_PORT_0 0x12
#define ETR_STEAI_PORT_1 0x13
static inline int etr_ptff(void *ptff_block, unsigned int func)
{
register unsigned int reg0 asm("0") = func;
register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
int rc = -EOPNOTSUPP;
asm volatile(
" .word 0x0104\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "=m" (ptff_block)
: "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
return rc;
}
/* Function codes for the ptff instruction. */
#define ETR_PTFF_QAF 0x00 /* query available functions */
#define ETR_PTFF_QTO 0x01 /* query tod offset */
#define ETR_PTFF_QSI 0x02 /* query steering information */
#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
#define ETR_PTFF_STO 0x41 /* set tod offset */
#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
/* Functions needed by the machine check handler */
void etr_switch_to_local(void);
void etr_sync_check(void);
/* STP interruption parameter */
struct stp_irq_parm {
unsigned int _pad0 : 14;
unsigned int tsc : 1; /* Timing status change */
unsigned int lac : 1; /* Link availability change */
unsigned int tcpc : 1; /* Time control parameter change */
unsigned int _pad2 : 15;
} __attribute__ ((packed));
#define STP_OP_SYNC 1
#define STP_OP_CTRL 3
struct stp_sstpi {
unsigned int rsvd0;
unsigned int rsvd1 : 8;
unsigned int stratum : 8;
unsigned int vbits : 16;
unsigned int leaps : 16;
unsigned int tmd : 4;
unsigned int ctn : 4;
unsigned int rsvd2 : 3;
unsigned int c : 1;
unsigned int tst : 4;
unsigned int tzo : 16;
unsigned int dsto : 16;
unsigned int ctrl : 16;
unsigned int rsvd3 : 16;
unsigned int tto;
unsigned int rsvd4;
unsigned int ctnid[3];
unsigned int rsvd5;
unsigned int todoff[4];
unsigned int rsvd6[48];
} __attribute__ ((packed));
/* Functions needed by the machine check handler */
void stp_sync_check(void);
void stp_island_check(void);
#endif /* __S390_ETR_H */

View file

@ -0,0 +1,12 @@
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_EXEC_H
#define __ASM_EXEC_H
extern unsigned long arch_align_stack(unsigned long sp);
#endif /* __ASM_EXEC_H */

View file

@ -0,0 +1,31 @@
/*
* definitions for external memory segment support
* Copyright IBM Corp. 2003
*/
#ifndef _ASM_S390X_DCSS_H
#define _ASM_S390X_DCSS_H
#ifndef __ASSEMBLY__
/* possible values for segment type as returned by segment_info */
#define SEG_TYPE_SW 0
#define SEG_TYPE_EW 1
#define SEG_TYPE_SR 2
#define SEG_TYPE_ER 3
#define SEG_TYPE_SN 4
#define SEG_TYPE_EN 5
#define SEG_TYPE_SC 6
#define SEG_TYPE_EWEN 7
#define SEGMENT_SHARED 0
#define SEGMENT_EXCLUSIVE 1
int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
void segment_unload(char *name);
void segment_save(char *name);
int segment_type (char* name);
int segment_modify_shared (char *name, int do_nonshared);
void segment_warning(int rc, char *seg_name);
#endif
#endif

View file

@ -0,0 +1,67 @@
/*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_FACILITY_H
#define __ASM_FACILITY_H
#include <linux/string.h>
#include <linux/preempt.h>
#include <asm/lowcore.h>
#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
static inline int __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
if (nr >= MAX_FACILITY_BIT)
return 0;
ptr = (unsigned char *) facilities + (nr >> 3);
return (*ptr & (0x80 >> (nr & 7))) != 0;
}
/*
* The test_facility function uses the bit odering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation.
*/
static inline int test_facility(unsigned long nr)
{
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
}
/**
* stfle - Store facility list extended
* @stfle_fac_list: array where facility list can be stored
* @size: size of passed in array in double words
*/
static inline void stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
preempt_disable();
asm volatile(
" .insn s,0xb2b10000,0(0)\n" /* stfl */
"0:\n"
EX_TABLE(0b, 0b)
: "+m" (S390_lowcore.stfl_fac_list));
nr = 4; /* bytes stored by stfl */
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
if (S390_lowcore.stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
register unsigned long reg0 asm("0") = size - 1;
asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
: "+d" (reg0)
: "a" (stfle_fac_list)
: "memory", "cc");
nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
}
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
preempt_enable();
}
#endif /* __ASM_FACILITY_H */

View file

@ -0,0 +1,12 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#define fb_pgprotect(...) do {} while (0)
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */

311
arch/s390/include/asm/fcx.h Normal file
View file

@ -0,0 +1,311 @@
/*
* Functions for assembling fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_FCX_H
#define _ASM_S390_FCX_H _ASM_S390_FCX_H
#include <linux/types.h>
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
/**
* struct tcw - Transport Control Word (TCW)
* @format: TCW format
* @flags: TCW flags
* @tccbl: Transport-Command-Control-Block Length
* @r: Read Operations
* @w: Write Operations
* @output: Output-Data Address
* @input: Input-Data Address
* @tsb: Transport-Status-Block Address
* @tccb: Transport-Command-Control-Block Address
* @output_count: Output Count
* @input_count: Input Count
* @intrg: Interrogate TCW Address
*/
struct tcw {
u32 format:2;
u32 :6;
u32 flags:24;
u32 :8;
u32 tccbl:6;
u32 r:1;
u32 w:1;
u32 :16;
u64 output;
u64 input;
u64 tsb;
u64 tccb;
u32 output_count;
u32 input_count;
u32 :32;
u32 :32;
u32 :32;
u32 intrg;
} __attribute__ ((packed, aligned(64)));
#define TIDAW_FLAGS_LAST (1 << (7 - 0))
#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
/**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
* @flags: TIDAW flags. Can be an arithmetic OR of the following constants:
* %TIDAW_FLAGS_LAST, %TIDAW_FLAGS_SKIP, %TIDAW_FLAGS_DATA_INT,
* %TIDAW_FLAGS_TTIC, %TIDAW_FLAGS_INSERT_CBC
* @count: Count
* @addr: Address
*/
struct tidaw {
u32 flags:8;
u32 :24;
u32 count;
u64 addr;
} __attribute__ ((packed, aligned(16)));
/**
* struct tsa_iostat - I/O-Status Transport-Status Area (IO-Stat TSA)
* @dev_time: Device Time
* @def_time: Defer Time
* @queue_time: Queue Time
* @dev_busy_time: Device-Busy Time
* @dev_act_time: Device-Active-Only Time
* @sense: Sense Data (if present)
*/
struct tsa_iostat {
u32 dev_time;
u32 def_time;
u32 queue_time;
u32 dev_busy_time;
u32 dev_act_time;
u8 sense[32];
} __attribute__ ((packed));
/**
* struct tsa_ddpcs - Device-Detected-Program-Check Transport-Status Area (DDPC TSA)
* @rc: Reason Code
* @rcq: Reason Code Qualifier
* @sense: Sense Data (if present)
*/
struct tsa_ddpc {
u32 :24;
u32 rc:8;
u8 rcq[16];
u8 sense[32];
} __attribute__ ((packed));
#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
/**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
* @format: Format
* @flags: Flags. Can be an arithmetic OR of the following constants:
* %TSA_INTRG_FLAGS_CU_STATE_VALID, %TSA_INTRG_FLAGS_DEV_STATE_VALID,
* %TSA_INTRG_FLAGS_OP_STATE_VALID
* @cu_state: Controle-Unit State
* @dev_state: Device State
* @op_state: Operation State
* @sd_info: State-Dependent Information
* @dl_id: Device-Level Identifier
* @dd_data: Device-Dependent Data
*/
struct tsa_intrg {
u32 format:8;
u32 flags:8;
u32 cu_state:8;
u32 dev_state:8;
u32 op_state:8;
u32 :24;
u8 sd_info[12];
u32 dl_id;
u8 dd_data[28];
} __attribute__ ((packed));
#define TSB_FORMAT_NONE 0
#define TSB_FORMAT_IOSTAT 1
#define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3
#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
#define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7)
/**
* struct tsb - Transport-Status Block (TSB)
* @length: Length
* @flags: Flags. Can be an arithmetic OR of the following constants:
* %TSB_FLAGS_DCW_OFFSET_VALID, %TSB_FLAGS_COUNT_VALID, %TSB_FLAGS_CACHE_MISS,
* %TSB_FLAGS_TIME_VALID
* @dcw_offset: DCW Offset
* @count: Count
* @tsa: Transport-Status-Area
*/
struct tsb {
u32 length:8;
u32 flags:8;
u32 dcw_offset:16;
u32 count;
u32 :32;
union {
struct tsa_iostat iostat;
struct tsa_ddpc ddpc;
struct tsa_intrg intrg;
} __attribute__ ((packed)) tsa;
} __attribute__ ((packed, aligned(8)));
#define DCW_INTRG_FORMAT_DEFAULT 0
#define DCW_INTRG_RC_UNSPECIFIED 0
#define DCW_INTRG_RC_TIMEOUT 1
#define DCW_INTRG_RCQ_UNSPECIFIED 0
#define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2
#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
/**
* struct dcw_intrg_data - Interrogate DCW data
* @format: Format. Should be %DCW_INTRG_FORMAT_DEFAULT
* @rc: Reason Code. Can be one of %DCW_INTRG_RC_UNSPECIFIED,
* %DCW_INTRG_RC_TIMEOUT
* @rcq: Reason Code Qualifier: Can be one of %DCW_INTRG_RCQ_UNSPECIFIED,
* %DCW_INTRG_RCQ_PRIMARY, %DCW_INTRG_RCQ_SECONDARY
* @lpm: Logical-Path Mask
* @pam: Path-Available Mask
* @pim: Path-Installed Mask
* @timeout: Timeout
* @flags: Flags. Can be an arithmetic OR of %DCW_INTRG_FLAGS_MPM,
* %DCW_INTRG_FLAGS_PPR, %DCW_INTRG_FLAGS_CRIT
* @time: Time
* @prog_id: Program Identifier
* @prog_data: Program-Dependent Data
*/
struct dcw_intrg_data {
u32 format:8;
u32 rc:8;
u32 rcq:8;
u32 lpm:8;
u32 pam:8;
u32 pim:8;
u32 timeout:16;
u32 flags:8;
u32 :24;
u32 :32;
u64 time;
u64 prog_id;
u8 prog_data[0];
} __attribute__ ((packed));
#define DCW_FLAGS_CC (1 << (7 - 1))
#define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02
#define DCW_CMD_CONTROL 0x03
#define DCW_CMD_SENSE 0x04
#define DCW_CMD_SENSE_ID 0xe4
#define DCW_CMD_INTRG 0x40
/**
* struct dcw - Device-Command Word (DCW)
* @cmd: Command Code. Can be one of %DCW_CMD_WRITE, %DCW_CMD_READ,
* %DCW_CMD_CONTROL, %DCW_CMD_SENSE, %DCW_CMD_SENSE_ID, %DCW_CMD_INTRG
* @flags: Flags. Can be an arithmetic OR of %DCW_FLAGS_CC
* @cd_count: Control-Data Count
* @count: Count
* @cd: Control Data
*/
struct dcw {
u32 cmd:8;
u32 flags:8;
u32 :8;
u32 cd_count:8;
u32 count;
u8 cd[0];
} __attribute__ ((packed));
#define TCCB_FORMAT_DEFAULT 0x7f
#define TCCB_MAX_DCW 30
#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
TCCB_MAX_DCW * sizeof(struct dcw) + \
sizeof(struct tccb_tcat))
#define TCCB_SAC_DEFAULT 0x1ffe
#define TCCB_SAC_INTRG 0x1fff
/**
* struct tccb_tcah - Transport-Command-Area Header (TCAH)
* @format: Format. Should be %TCCB_FORMAT_DEFAULT
* @tcal: Transport-Command-Area Length
* @sac: Service-Action Code. Can be one of %TCCB_SAC_DEFAULT, %TCCB_SAC_INTRG
* @prio: Priority
*/
struct tccb_tcah {
u32 format:8;
u32 :24;
u32 :24;
u32 tcal:8;
u32 sac:16;
u32 :8;
u32 prio:8;
u32 :32;
} __attribute__ ((packed));
/**
* struct tccb_tcat - Transport-Command-Area Trailer (TCAT)
* @count: Transport Count
*/
struct tccb_tcat {
u32 :32;
u32 count;
} __attribute__ ((packed));
/**
* struct tccb - (partial) Transport-Command-Control Block (TCCB)
* @tcah: TCAH
* @tca: Transport-Command Area
*/
struct tccb {
struct tccb_tcah tcah;
u8 tca[0];
} __attribute__ ((packed, aligned(8)));
struct tcw *tcw_get_intrg(struct tcw *tcw);
void *tcw_get_data(struct tcw *tcw);
struct tccb *tcw_get_tccb(struct tcw *tcw);
struct tsb *tcw_get_tsb(struct tcw *tcw);
void tcw_init(struct tcw *tcw, int r, int w);
void tcw_finalize(struct tcw *tcw, int num_tidaws);
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw);
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal);
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb);
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb);
void tccb_init(struct tccb *tccb, size_t tccb_size, u32 sac);
void tsb_init(struct tsb *tsb);
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
void *cd, u8 cd_count, u32 count);
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
void *addr, u32 count);
#endif /* _ASM_S390_FCX_H */

View file

@ -0,0 +1,25 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
#ifndef __ASSEMBLY__
extern void _mcount(void);
extern char ftrace_graph_caller_end;
struct dyn_arch_ftrace { };
#define MCOUNT_ADDR ((long)_mcount)
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
#endif /* __ASSEMBLY__ */
#define MCOUNT_INSN_SIZE 18
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif /* _ASM_S390_FTRACE_H */

View file

@ -0,0 +1,96 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
#include <linux/uaccess.h>
#include <linux/futex.h>
#include <asm/mmu_context.h>
#include <asm/errno.h>
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
asm volatile( \
" sacf 256\n" \
"0: l %1,0(%6)\n" \
"1:"insn \
"2: cs %1,%2,0(%6)\n" \
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, newval, ret;
load_kernel_asce();
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("lr %2,%1\nar %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("lr %2,%1\nor %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret;
load_kernel_asce();
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
"1: la %0,0\n"
"2: sacf 768\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
*uval = oldval;
return ret;
}
#endif /* _ASM_S390_FUTEX_H */

View file

@ -0,0 +1,26 @@
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* Derived from "include/asm-i386/hardirq.h"
*/
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <asm/lowcore.h>
#define local_softirq_pending() (S390_lowcore.softirq_pending)
#define __ARCH_IRQ_STAT
#define __ARCH_HAS_DO_SOFTIRQ
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void ack_bad_irq(unsigned int irq)
{
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
#endif /* __ASM_HARDIRQ_H */

View file

@ -0,0 +1,115 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright IBM Corp. 2008
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#ifndef _ASM_S390_HUGETLB_H
#define _ASM_S390_HUGETLB_H
#include <asm/page.h>
#include <asm/pgtable.h>
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range free_pgd_range
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
#define hugetlb_prefault_arch_hook(mm) do { } while (0)
#define arch_clear_hugepage_flags(page) do { } while (0)
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int changed = !pte_same(huge_ptep_get(ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline int huge_pte_write(pte_t pte)
{
return pte_write(pte);
}
static inline int huge_pte_dirty(pte_t pte)
{
return pte_dirty(pte);
}
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
return pte_mkwrite(pte);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
return pte_mkdirty(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
return pte_modify(pte, newprot);
}
#endif /* _ASM_S390_HUGETLB_H */

View file

@ -0,0 +1,11 @@
#ifndef _HW_IRQ_H
#define _HW_IRQ_H
#include <linux/msi.h>
#include <linux/pci.h>
void __init init_airq_interrupts(void);
void __init init_cio_interrupts(void);
void __init init_ext_interrupts(void);
#endif

View file

@ -0,0 +1,248 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 2000
*
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
*/
#ifndef _S390_IDALS_H
#define _S390_IDALS_H
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/cio.h>
#include <asm/uaccess.h>
#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#else
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
#endif
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
/*
* Test if an address/length pair needs an idal list.
*/
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
#else
return 0;
#endif
}
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
{
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
}
/*
* Create the list of idal words for an address/length pair.
*/
static inline unsigned long *idal_create_words(unsigned long *idaws,
void *vaddr, unsigned int length)
{
unsigned long paddr;
unsigned int cidaw;
paddr = __pa(vaddr);
cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
paddr &= -IDA_BLOCK_SIZE;
while (--cidaw > 0) {
paddr += IDA_BLOCK_SIZE;
*idaws++ = paddr;
}
return idaws;
}
/*
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
if (ccw->flags & CCW_FLAG_IDA)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
idal = kmalloc(nridaws * sizeof(unsigned long),
GFP_ATOMIC | GFP_DMA );
if (idal == NULL)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
#endif
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
/*
* Releases any allocated IDAL related to the CCW.
*/
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
#endif
ccw->cda = 0;
}
/*
* Idal buffer extension
*/
struct idal_buffer {
size_t size;
size_t page_order;
void *data[0];
};
/*
* Allocate an idal buffer
*/
static inline struct idal_buffer *
idal_buffer_alloc(size_t size, int page_order)
{
struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
GFP_DMA | GFP_KERNEL);
if (ib == NULL)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
if ((i & (nr_chunks - 1)) != 0) {
ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
continue;
}
ib->data[i] = (void *)
__get_free_pages(GFP_KERNEL, page_order);
if (ib->data[i] != NULL)
continue;
// Not enough memory
while (i >= nr_chunks) {
i -= nr_chunks;
free_pages((unsigned long) ib->data[i],
ib->page_order);
}
kfree(ib);
return ERR_PTR(-ENOMEM);
}
return ib;
}
/*
* Free an idal buffer.
*/
static inline void
idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
for (i = 0; i < nr_ptrs; i += nr_chunks)
free_pages((unsigned long) ib->data[i], ib->page_order);
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
return ib->size > (4096ul << ib->page_order);
#endif
}
/*
* Set channel data address to idal buffer.
*/
static inline void
idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
if (__idal_buffer_is_needed(ib)) {
// setup idals;
ccw->cda = (u32)(addr_t) ib->data;
ccw->flags |= CCW_FLAG_IDA;
} else
// we do not need idals - use direct addressing
ccw->cda = (u32)(addr_t) ib->data[0];
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
static inline size_t
idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
{
size_t left;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
to = (void __user *) to + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_to_user(to, ib->data[i], count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
static inline size_t
idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
{
size_t left;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
from = (void __user *) from + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_from_user(ib->data[i], from, count);
}
#endif

View file

@ -0,0 +1,26 @@
/*
* Copyright IBM Corp. 2014
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _S390_IDLE_H
#define _S390_IDLE_H
#include <linux/types.h>
#include <linux/device.h>
struct s390_idle_data {
unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
};
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
#endif /* _S390_IDLE_H */

View file

@ -0,0 +1,72 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/io.h"
*/
#ifndef _S390_IO_H
#define _S390_IO_H
#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/pci_io.h>
void *xlate_dev_mem_ptr(unsigned long phys);
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#define IO_SPACE_LIMIT 0
#ifdef CONFIG_PCI
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
}
static inline void iounmap(volatile void __iomem *addr)
{
}
/*
* s390 needs a private implementation of pci_iomap since ioremap with its
* offset parameter isn't sufficient. That's because BAR spaces are not
* disjunctive on s390 so we need the bar parameter of pci_iomap to find
* the corresponding device and create the mapping cookie.
*/
#define pci_iomap pci_iomap
#define pci_iounmap pci_iounmap
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
#define __raw_readb zpci_read_u8
#define __raw_readw zpci_read_u16
#define __raw_readl zpci_read_u32
#define __raw_readq zpci_read_u64
#define __raw_writeb zpci_write_u8
#define __raw_writew zpci_write_u16
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#define readq_relaxed readq
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
#endif

182
arch/s390/include/asm/ipl.h Normal file
View file

@ -0,0 +1,182 @@
/*
* s390 (re)ipl support
*
* Copyright IBM Corp. 2007
*/
#ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H
#include <asm/lowcore.h>
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
#define IPL_PARMBLOCK_ORIGIN 0x2000
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_fcp))
#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_ccw))
#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
#define IPL_MAX_SUPPORTED_VERSION (0)
#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
IPL_PARMBLOCK_ORIGIN)
#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
struct ipl_list_hdr {
u32 len;
u8 reserved1[3];
u8 version;
u32 blk0_len;
u8 pbt;
u8 flags;
u16 reserved2;
u8 loadparm[8];
} __attribute__((packed));
struct ipl_block_fcp {
u8 reserved1[305-1];
u8 opt;
u8 reserved2[3];
u16 reserved3;
u16 devno;
u8 reserved4[4];
u64 wwpn;
u64 lun;
u32 bootprog;
u8 reserved5[12];
u64 br_lba;
u32 scp_data_len;
u8 reserved6[260];
u8 scp_data[];
} __attribute__((packed));
#define DIAG308_VMPARM_SIZE 64
#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
u8 reserved1[84];
u8 reserved2[2];
u16 devno;
u8 vm_flags;
u8 reserved3[3];
u32 vm_parm_len;
u8 nss_name[8];
u8 vm_parm[DIAG308_VMPARM_SIZE];
u8 reserved4[8];
} __attribute__((packed));
struct ipl_parameter_block {
struct ipl_list_hdr hdr;
union {
struct ipl_block_fcp fcp;
struct ipl_block_ccw ccw;
} ipl_info;
} __attribute__((packed,aligned(4096)));
/*
* IPL validity flags
*/
extern u32 ipl_flags;
extern u32 dump_prefix_page;
struct dump_save_areas {
struct save_area_ext **areas;
int count;
};
extern struct dump_save_areas dump_save_areas;
struct save_area_ext *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
extern void do_poff(void);
extern void ipl_save_parameters(void);
extern void ipl_update_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,
IPL_PARMBLOCK_VALID = 2,
IPL_NSS_VALID = 4,
};
enum ipl_type {
IPL_TYPE_UNKNOWN = 1,
IPL_TYPE_CCW = 2,
IPL_TYPE_FCP = 4,
IPL_TYPE_FCP_DUMP = 8,
IPL_TYPE_NSS = 16,
};
struct ipl_info
{
enum ipl_type type;
union {
struct {
struct ccw_dev_id dev_id;
} ccw;
struct {
struct ccw_dev_id dev_id;
u64 wwpn;
u64 lun;
} fcp;
struct {
char name[NSS_NAME_SIZE + 1];
} nss;
} data;
};
extern struct ipl_info ipl_info;
extern void setup_ipl(void);
/*
* DIAG 308 support
*/
enum diag308_subcode {
DIAG308_REL_HSA = 2,
DIAG308_IPL = 3,
DIAG308_DUMP = 4,
DIAG308_SET = 5,
DIAG308_STORE = 6,
};
enum diag308_ipl_type {
DIAG308_IPL_TYPE_FCP = 0,
DIAG308_IPL_TYPE_CCW = 2,
};
enum diag308_opt {
DIAG308_IPL_OPT_IPL = 0x10,
DIAG308_IPL_OPT_DUMP = 0x20,
};
enum diag308_flags {
DIAG308_FLAGS_LP_VALID = 0x80,
};
enum diag308_vm_flags {
DIAG308_VM_FLAGS_NSS_VALID = 0x80,
DIAG308_VM_FLAGS_VP_VALID = 0x40,
};
enum diag308_rc {
DIAG308_RC_OK = 0x0001,
DIAG308_RC_NOCONFIG = 0x0102,
};
extern int diag308(unsigned long subcode, void *addr);
extern void diag308_reset(void);
extern void store_status(void);
extern void lgr_info_log(void);
#endif /* _ASM_S390_IPL_H */

110
arch/s390/include/asm/irq.h Normal file
View file

@ -0,0 +1,110 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
#define EXT_INTERRUPT 1
#define IO_INTERRUPT 2
#define THIN_INTERRUPT 3
#define NR_IRQS_BASE 4
#ifdef CONFIG_PCI_NR_MSI
# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
#else
# define NR_IRQS NR_IRQS_BASE
#endif
/* This number is used when no interrupt has been assigned */
#define NO_IRQ 0
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
#define EXT_IRQ_CLK_COMP 0x1004
#define EXT_IRQ_CPU_TIMER 0x1005
#define EXT_IRQ_WARNING_TRACK 0x1007
#define EXT_IRQ_MALFUNC_ALERT 0x1200
#define EXT_IRQ_EMERGENCY_SIG 0x1201
#define EXT_IRQ_EXTERNAL_CALL 0x1202
#define EXT_IRQ_TIMING_ALERT 0x1406
#define EXT_IRQ_MEASURE_ALERT 0x1407
#define EXT_IRQ_SERVICE_SIG 0x2401
#define EXT_IRQ_CP_SERVICE 0x2603
#define EXT_IRQ_IUCV 0x4000
#ifndef __ASSEMBLY__
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>
enum interruption_class {
IRQEXT_CLK,
IRQEXT_EXC,
IRQEXT_EMS,
IRQEXT_TMR,
IRQEXT_TLA,
IRQEXT_PFL,
IRQEXT_DSD,
IRQEXT_VRT,
IRQEXT_SCP,
IRQEXT_IUC,
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_CMR,
IRQEXT_FTP,
IRQIO_CIO,
IRQIO_QAI,
IRQIO_DAS,
IRQIO_C15,
IRQIO_C70,
IRQIO_TAP,
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CLW,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,
IRQIO_CSC,
IRQIO_PCI,
IRQIO_MSI,
IRQIO_VIR,
IRQIO_VAI,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
};
struct irq_stat {
unsigned int irqs[NR_ARCH_IRQS];
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
static __always_inline void inc_irq_stat(enum interruption_class irq)
{
__this_cpu_inc(irq_stat.irqs[irq]);
}
struct ext_code {
unsigned short subcode;
unsigned short code;
};
typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
int register_external_irq(u16 code, ext_int_handler_t handler);
int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
};
void irq_subclass_register(enum irq_subclass subclass);
void irq_subclass_unregister(enum irq_subclass subclass);
#define irq_canonicalize(irq) (irq)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IRQ_H */

View file

@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>

View file

@ -0,0 +1,72 @@
/*
* Copyright IBM Corp. 2006, 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
#include <linux/types.h>
/* store then OR system mask. */
#define __arch_local_irq_stosm(__or) \
({ \
unsigned long __mask; \
asm volatile( \
" stosm %0,%1" \
: "=Q" (__mask) : "i" (__or) : "memory"); \
__mask; \
})
/* store then AND system mask. */
#define __arch_local_irq_stnsm(__and) \
({ \
unsigned long __mask; \
asm volatile( \
" stnsm %0,%1" \
: "=Q" (__mask) : "i" (__and) : "memory"); \
__mask; \
})
/* set system mask. */
static inline notrace void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
static inline notrace unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stosm(0x00);
}
static inline notrace unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
static inline notrace void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
static inline notrace void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__arch_local_irq_ssm(flags);
}
static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (3UL << (BITS_PER_LONG - 8)));
}
static inline notrace bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif /* __ASM_IRQFLAGS_H */

View file

@ -0,0 +1,28 @@
#ifndef _ASM_S390_ISC_H
#define _ASM_S390_ISC_H
#include <linux/types.h>
/*
* I/O interruption subclasses used by drivers.
* Please add all used iscs here so that it is possible to distribute
* isc usage between drivers.
* Reminder: 0 is highest priority, 7 lowest.
*/
#define MAX_ISC 7
/* Regular I/O interrupts. */
#define IO_SCH_ISC 3 /* regular I/O subchannels */
#define CONSOLE_ISC 1 /* console I/O subchannel */
#define EADM_SCH_ISC 4 /* EADM subchannels */
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
void isc_register(unsigned int isc);
void isc_unregister(unsigned int isc);
#endif /* _ASM_S390_ISC_H */

View file

@ -0,0 +1,30 @@
/*
* Functions for incremental construction of fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_ITCW_H
#define _ASM_S390_ITCW_H
#include <linux/types.h>
#include <asm/fcx.h>
#define ITCW_OP_READ 0
#define ITCW_OP_WRITE 1
struct itcw;
struct tcw *itcw_get_tcw(struct itcw *itcw);
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws);
struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
int max_tidaws, int intrg_max_tidaws);
struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
u8 cd_count, u32 count);
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr,
u32 count);
void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal);
void itcw_finalize(struct itcw *itcw);
#endif /* _ASM_S390_ITCW_H */

View file

@ -0,0 +1,37 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
#ifdef CONFIG_64BIT
#define ASM_PTR ".quad"
#define ASM_ALIGN ".balign 8"
#else
#define ASM_PTR ".long"
#define ASM_ALIGN ".balign 4"
#endif
static __always_inline bool arch_static_branch(struct static_key *key)
{
asm_volatile_goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
".popsection\n"
: : "X" (key) : : label);
return false;
label:
return true;
}
typedef unsigned long jump_label_t;
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#endif

View file

@ -0,0 +1,27 @@
#ifndef _S390_KDEBUG_H
#define _S390_KDEBUG_H
/*
* Feb 2006 Ported to s390 <grundym@us.ibm.com>
*/
struct pt_regs;
enum die_val {
DIE_OOPS = 1,
DIE_BPT,
DIE_SSTEP,
DIE_PANIC,
DIE_NMI,
DIE_DIE,
DIE_NMIWATCHDOG,
DIE_KERNELDEBUG,
DIE_TRAP,
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
};
extern void die(struct pt_regs *, const char *);
#endif

View file

@ -0,0 +1,63 @@
/*
* Copyright IBM Corp. 2005
*
* Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
*
*/
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
#include <asm/processor.h>
#include <asm/page.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
* and kmap is not required.
*/
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control pages */
/* Not more than 2GB */
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
/* Maximum address we can use for the crash control pages */
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
/* Allocate one page for the pdp and the second for the code */
#define KEXEC_CONTROL_PAGE_SIZE 4096
/* Alignment of crashkernel memory */
#define KEXEC_CRASH_MEM_ALIGN HPAGE_SIZE
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
/*
* Size for s390x ELF notes per CPU
*
* Seven notes plus zero note at the end: prstatus, fpregset, timer,
* tod_cmp, tod_reg, control regs, and prefix
*/
#define KEXEC_NOTE_BYTES \
(ALIGN(sizeof(struct elf_note), 4) * 8 + \
ALIGN(sizeof("CORE"), 4) * 7 + \
ALIGN(sizeof(struct elf_prstatus), 4) + \
ALIGN(sizeof(elf_fpregset_t), 4) + \
ALIGN(sizeof(u64), 4) + \
ALIGN(sizeof(u64), 4) + \
ALIGN(sizeof(u32), 4) + \
ALIGN(sizeof(u64) * 16, 4) + \
ALIGN(sizeof(u32), 4) \
)
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
#endif /*_S390_KEXEC_H */

View file

@ -0,0 +1,6 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
#include <asm-generic/kmap_types.h>
#endif

View file

@ -0,0 +1,93 @@
#ifndef _ASM_S390_KPROBES_H
#define _ASM_S390_KPROBES_H
/*
* Kernel Probes (KProbes)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corp. 2002, 2006
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation ( includes suggestions from
* Rusty Russell).
* 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
* <ananth@in.ibm.com>
* 2005-Dec Used as a template for s390 by Mike Grundy
* <grundym@us.ibm.com>
*/
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs;
struct kprobe;
typedef u16 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0x0002
/* Maximum instruction size is 3 (16bit) halfwords: */
#define MAX_INSN_SIZE 0x0003
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define kretprobe_blacklist_size 0
#define KPROBE_SWAP_INST 0x10
#define FIXUP_PSW_NORMAL 0x08
#define FIXUP_BRANCH_NOT_TAKEN 0x04
#define FIXUP_RETURN_REGISTER 0x02
#define FIXUP_NOT_REQUIRED 0x01
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
};
void arch_remove_kprobe(struct kprobe *p);
void kretprobe_trampoline(void);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#define flush_insn_slot(p) do { } while (0)
#endif /* _ASM_S390_KPROBES_H */

View file

@ -0,0 +1,481 @@
/*
* definition for kernel virtual machines on s390
*
* Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
#include <linux/types.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <asm/debug.h>
#include <asm/cpu.h>
#include <asm/isc.h>
#define KVM_MAX_VCPUS 64
#define KVM_USER_MEM_SLOTS 32
/*
* These seem to be used for allocating ->chip in the routing table,
* which we don't use. 4096 is an out-of-thin-air value. If we need
* to look at ->chip later on, we'll need to revisit this.
*/
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
#define SIGP_CTRL_C 0x00800000
struct sca_entry {
atomic_t ctrl;
__u32 reserved;
__u64 sda;
__u64 reserved2[2];
} __attribute__((packed));
union ipte_control {
unsigned long val;
struct {
unsigned long k : 1;
unsigned long kh : 31;
unsigned long kg : 32;
};
};
struct sca_block {
union ipte_control ipte_control;
__u64 reserved[5];
__u64 mcn;
__u64 reserved2;
struct sca_entry cpu[64];
} __attribute__((packed));
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT 0x04000000
#define CPUSTAT_IO_INT 0x02000000
#define CPUSTAT_EXT_INT 0x01000000
#define CPUSTAT_RUNNING 0x00800000
#define CPUSTAT_RETAINED 0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB 0x00010000
#define CPUSTAT_RRF 0x00008000
#define CPUSTAT_SLSV 0x00004000
#define CPUSTAT_SLSR 0x00002000
#define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
#define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002
#define CPUSTAT_P 0x00000001
struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */
__u32 : 1; /* 0x0004 */
__u32 prefix : 18;
__u32 : 13;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
__u8 reserved10[16]; /* 0x0010 */
#define PROG_BLOCK_SIE 0x00000001
atomic_t prog20; /* 0x0020 */
__u8 reserved24[4]; /* 0x0024 */
__u64 cputm; /* 0x0028 */
__u64 ckc; /* 0x0030 */
__u64 epoch; /* 0x0038 */
__u8 reserved40[4]; /* 0x0040 */
#define LCTL_CR0 0x8000
#define LCTL_CR6 0x0200
#define LCTL_CR9 0x0040
#define LCTL_CR10 0x0020
#define LCTL_CR11 0x0010
#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
#define ICTL_PINT 0x20000000
#define ICTL_LPSW 0x00400000
#define ICTL_STCTL 0x00040000
#define ICTL_ISKE 0x00004000
#define ICTL_SSKE 0x00002000
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
__u8 icptcode; /* 0x0050 */
__u8 reserved51; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54[2]; /* 0x0054 */
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
__u8 reserved60; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
__u8 reserved63[1]; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
__u8 reserved70[32]; /* 0x0070 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
__u8 reservedb0[20]; /* 0x00b0 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
__u32 reservedc8; /* 0x00c8 */
__u16 pgmilc; /* 0x00cc */
__u16 iprcc; /* 0x00ce */
__u32 dxc; /* 0x00d0 */
__u16 mcn; /* 0x00d4 */
__u8 perc; /* 0x00d6 */
__u8 peratmid; /* 0x00d7 */
__u64 peraddr; /* 0x00d8 */
__u8 eai; /* 0x00e0 */
__u8 peraid; /* 0x00e1 */
__u8 oai; /* 0x00e2 */
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
__u8 reserved188[24]; /* 0x0188 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[30]; /* 0x01c0 */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u8 reserved1f0[16]; /* 0x01f0 */
} __attribute__((packed));
struct kvm_s390_itdb {
__u8 data[256];
} __packed;
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */
} __packed;
struct kvm_vcpu_stat {
u32 exit_userspace;
u32 exit_null;
u32 exit_external_request;
u32 exit_external_interrupt;
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
u32 halt_wakeup;
u32 instruction_lctl;
u32 instruction_lctlg;
u32 instruction_stctl;
u32 instruction_stctg;
u32 exit_program_interruption;
u32 exit_instr_and_program;
u32 deliver_external_call;
u32 deliver_emergency_signal;
u32 deliver_service_signal;
u32 deliver_virtio_interrupt;
u32 deliver_stop_signal;
u32 deliver_prefix_signal;
u32 deliver_restart_signal;
u32 deliver_program_int;
u32 deliver_io_int;
u32 exit_wait_state;
u32 instruction_pfmf;
u32 instruction_stidp;
u32 instruction_spx;
u32 instruction_stpx;
u32 instruction_stap;
u32 instruction_storage_key;
u32 instruction_ipte_interlock;
u32 instruction_stsch;
u32 instruction_chsc;
u32 instruction_stsi;
u32 instruction_stfl;
u32 instruction_tprot;
u32 instruction_essa;
u32 instruction_sigp_sense;
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call;
u32 instruction_sigp_emergency;
u32 instruction_sigp_stop;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
u32 diagnose_10;
u32 diagnose_44;
u32 diagnose_9c;
};
#define PGM_OPERATION 0x01
#define PGM_PRIVILEGED_OP 0x02
#define PGM_EXECUTE 0x03
#define PGM_PROTECTION 0x04
#define PGM_ADDRESSING 0x05
#define PGM_SPECIFICATION 0x06
#define PGM_DATA 0x07
#define PGM_FIXED_POINT_OVERFLOW 0x08
#define PGM_FIXED_POINT_DIVIDE 0x09
#define PGM_DECIMAL_OVERFLOW 0x0a
#define PGM_DECIMAL_DIVIDE 0x0b
#define PGM_HFP_EXPONENT_OVERFLOW 0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d
#define PGM_HFP_SIGNIFICANCE 0x0e
#define PGM_HFP_DIVIDE 0x0f
#define PGM_SEGMENT_TRANSLATION 0x10
#define PGM_PAGE_TRANSLATION 0x11
#define PGM_TRANSLATION_SPEC 0x12
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
#define PGM_AFX_TRANSLATION 0x20
#define PGM_ASX_TRANSLATION 0x21
#define PGM_LX_TRANSLATION 0x22
#define PGM_EX_TRANSLATION 0x23
#define PGM_PRIMARY_AUTHORITY 0x24
#define PGM_SECONDARY_AUTHORITY 0x25
#define PGM_LFX_TRANSLATION 0x26
#define PGM_LSX_TRANSLATION 0x27
#define PGM_ALET_SPECIFICATION 0x28
#define PGM_ALEN_TRANSLATION 0x29
#define PGM_ALE_SEQUENCE 0x2a
#define PGM_ASTE_VALIDITY 0x2b
#define PGM_ASTE_SEQUENCE 0x2c
#define PGM_EXTENDED_AUTHORITY 0x2d
#define PGM_LSTE_SEQUENCE 0x2e
#define PGM_ASTE_INSTANCE 0x2f
#define PGM_STACK_FULL 0x30
#define PGM_STACK_EMPTY 0x31
#define PGM_STACK_SPECIFICATION 0x32
#define PGM_STACK_TYPE 0x33
#define PGM_STACK_OPERATION 0x34
#define PGM_ASCE_TYPE 0x38
#define PGM_REGION_FIRST_TRANS 0x39
#define PGM_REGION_SECOND_TRANS 0x3a
#define PGM_REGION_THIRD_TRANS 0x3b
#define PGM_MONITOR 0x40
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
union {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
struct kvm_s390_pgm_info pgm;
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
struct kvm_s390_mchk_info mchk;
};
};
/* for local_interrupt.action_flags */
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
struct kvm_s390_local_interrupt {
spinlock_t lock;
struct list_head list;
atomic_t active;
struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq;
atomic_t *cpuflags;
unsigned int action_bits;
};
struct kvm_s390_float_interrupt {
spinlock_t lock;
struct list_head list;
atomic_t active;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
unsigned long addr;
unsigned long phys_addr;
int len;
char *old_data;
};
struct kvm_hw_bp_info_arch {
unsigned long addr;
int len;
};
/*
* Only the upper 16 bits of kvm_guest_debug->control are arch specific.
* Further KVM_GUESTDBG flags which an be used from userspace can be found in
* arch/s390/include/uapi/asm/kvm.h
*/
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000
#define guestdbg_enabled(vcpu) \
(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
struct kvm_guestdbg_info_arch {
unsigned long cr0;
unsigned long cr9;
unsigned long cr10;
unsigned long cr11;
struct kvm_hw_bp_info_arch *hw_bp_info;
struct kvm_hw_wp_info_arch *hw_wp_info;
int nr_hw_bp;
int nr_hw_wp;
unsigned long last_bp;
};
struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
union {
struct cpuid cpu_id;
u64 stidp_data;
};
struct gmap *gmap;
struct kvm_guestdbg_info_arch guestdbg;
#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
unsigned long pfault_token;
unsigned long pfault_select;
unsigned long pfault_compare;
};
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
struct kvm_arch_memory_slot {
};
struct s390_map_info {
struct list_head list;
__u64 guest_addr;
__u64 addr;
struct page *page;
};
struct s390_io_adapter {
unsigned int id;
int isc;
bool maskable;
bool masked;
bool swap;
struct rw_semaphore maps_lock;
struct list_head maps;
atomic_t nr_maps;
};
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256
struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb;
__u32 crycbd;
};
struct kvm_s390_crypto_cb {
__u8 reserved00[128]; /* 0x0000 */
};
struct kvm_arch{
struct sca_block *sca;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic;
struct gmap *gmap;
int css_support;
int use_irqchip;
int use_cmma;
int user_cpu_state_ctrl;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto;
};
#define KVM_HVA_ERR_BAD (-1UL)
#define KVM_HVA_ERR_RO_BAD (-2UL)
static inline bool kvm_is_error_hva(unsigned long addr)
{
return IS_ERR_VALUE(addr);
}
#define ASYNC_PF_PER_VCPU 64
struct kvm_arch_async_pf {
unsigned long pfault_token;
};
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_exit(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
#endif

View file

@ -0,0 +1,157 @@
/*
* definition for paravirtual devices on s390
*
* Copyright IBM Corp. 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
/*
* Hypercalls for KVM on s390. The calling convention is similar to the
* s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
* as hypercall number and R7 as parameter 6. The return value is
* written to R2. We use the diagnose instruction as hypercall. To avoid
* conflicts with existing diagnoses for LPAR and z/VM, we do not use
* the instruction encoded number, but specify the number in R1 and
* use 0x500 as KVM hypercall
*
* Copyright IBM Corp. 2007,2008
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
#ifndef __S390_KVM_PARA_H
#define __S390_KVM_PARA_H
#include <uapi/asm/kvm_para.h>
static inline long kvm_hypercall0(unsigned long nr)
{
register unsigned long __nr asm("1") = nr;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr): "memory", "cc");
return __rc;
}
static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
return __rc;
}
static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
unsigned long p2)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register unsigned long __p2 asm("3") = p2;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
: "memory", "cc");
return __rc;
}
static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register unsigned long __p2 asm("3") = p2;
register unsigned long __p3 asm("4") = p3;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
"d" (__p3) : "memory", "cc");
return __rc;
}
static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register unsigned long __p2 asm("3") = p2;
register unsigned long __p3 asm("4") = p3;
register unsigned long __p4 asm("5") = p4;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
"d" (__p3), "d" (__p4) : "memory", "cc");
return __rc;
}
static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register unsigned long __p2 asm("3") = p2;
register unsigned long __p3 asm("4") = p3;
register unsigned long __p4 asm("5") = p4;
register unsigned long __p5 asm("6") = p5;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
"d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
return __rc;
}
static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5,
unsigned long p6)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
register unsigned long __p2 asm("3") = p2;
register unsigned long __p3 asm("4") = p3;
register unsigned long __p4 asm("5") = p4;
register unsigned long __p5 asm("6") = p5;
register unsigned long __p6 asm("7") = p6;
register long __rc asm("2");
asm volatile ("diag 2,4,0x500\n"
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
"d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
: "memory", "cc");
return __rc;
}
/* kvm on s390 is always paravirtualization enabled */
static inline int kvm_para_available(void)
{
return 1;
}
/* No feature bits are currently assigned for kvm on s390 */
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* __S390_KVM_PARA_H */

View file

@ -0,0 +1,9 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#include <linux/stringify.h>
#define __ALIGN .align 4, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif

View file

@ -0,0 +1 @@
#include <asm-generic/local.h>

View file

@ -0,0 +1 @@
#include <asm-generic/local64.h>

View file

@ -0,0 +1,376 @@
/*
* Copyright IBM Corp. 1999, 2012
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
*/
#ifndef _ASM_S390_LOWCORE_H
#define _ASM_S390_LOWCORE_H
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
#include <asm/types.h>
#ifdef CONFIG_32BIT
#define LC_ORDER 0
#define LC_PAGES 1
struct save_area {
u32 ext_save;
u64 timer;
u64 clk_cmp;
u8 pad1[24];
u8 psw[8];
u32 pref_reg;
u8 pad2[20];
u32 acc_regs[16];
u64 fp_regs[4];
u32 gp_regs[16];
u32 ctrl_regs[16];
} __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore {
psw_t restart_psw; /* 0x0000 */
psw_t restart_old_psw; /* 0x0008 */
__u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
psw_t external_old_psw; /* 0x0018 */
psw_t svc_old_psw; /* 0x0020 */
psw_t program_old_psw; /* 0x0028 */
psw_t mcck_old_psw; /* 0x0030 */
psw_t io_old_psw; /* 0x0038 */
__u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
psw_t external_new_psw; /* 0x0058 */
psw_t svc_new_psw; /* 0x0060 */
psw_t program_new_psw; /* 0x0068 */
psw_t mcck_new_psw; /* 0x0070 */
psw_t io_new_psw; /* 0x0078 */
__u32 ext_params; /* 0x0080 */
__u16 ext_cpu_addr; /* 0x0084 */
__u16 ext_int_code; /* 0x0086 */
__u16 svc_ilc; /* 0x0088 */
__u16 svc_code; /* 0x008a */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
__u32 trans_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u8 per_code; /* 0x0096 */
__u8 per_atmid; /* 0x0097 */
__u32 per_address; /* 0x0098 */
__u32 monitor_code; /* 0x009c */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
__u8 op_access_id; /* 0x00a2 */
__u8 ar_mode_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
__u32 io_int_word; /* 0x00c0 */
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
__u32 extended_save_area_addr; /* 0x00d4 */
__u32 cpu_timer_save_area[2]; /* 0x00d8 */
__u32 clock_comp_save_area[2]; /* 0x00e0 */
__u32 mcck_interruption_code[2]; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
__u32 failing_storage_address; /* 0x00f8 */
__u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
psw_t psw_save_area; /* 0x0100 */
__u32 prefixreg_save_area; /* 0x0108 */
__u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
/* CPU register save area: defined by architecture */
__u32 access_regs_save_area[16]; /* 0x0120 */
__u32 floating_pt_save_area[8]; /* 0x0160 */
__u32 gpregs_save_area[16]; /* 0x0180 */
__u32 cregs_save_area[16]; /* 0x01c0 */
/* Save areas. */
__u32 save_area_sync[8]; /* 0x0200 */
__u32 save_area_async[8]; /* 0x0220 */
__u32 save_area_restart[1]; /* 0x0240 */
/* CPU flags. */
__u32 cpu_flags; /* 0x0244 */
/* Return psws. */
psw_t return_psw; /* 0x0248 */
psw_t return_mcck_psw; /* 0x0250 */
/* CPU time accounting values */
__u64 sync_enter_timer; /* 0x0258 */
__u64 async_enter_timer; /* 0x0260 */
__u64 mcck_enter_timer; /* 0x0268 */
__u64 exit_timer; /* 0x0270 */
__u64 user_timer; /* 0x0278 */
__u64 system_timer; /* 0x0280 */
__u64 steal_timer; /* 0x0288 */
__u64 last_update_timer; /* 0x0290 */
__u64 last_update_clock; /* 0x0298 */
__u64 int_clock; /* 0x02a0 */
__u64 mcck_clock; /* 0x02a8 */
__u64 clock_comparator; /* 0x02b0 */
/* Current process. */
__u32 current_task; /* 0x02b8 */
__u32 thread_info; /* 0x02bc */
__u32 kernel_stack; /* 0x02c0 */
/* Interrupt, panic and restart stack. */
__u32 async_stack; /* 0x02c4 */
__u32 panic_stack; /* 0x02c8 */
__u32 restart_stack; /* 0x02cc */
/* Restart function and parameter. */
__u32 restart_fn; /* 0x02d0 */
__u32 restart_data; /* 0x02d4 */
__u32 restart_source; /* 0x02d8 */
/* Address space pointer. */
__u32 kernel_asce; /* 0x02dc */
__u32 user_asce; /* 0x02e0 */
__u32 current_pid; /* 0x02e4 */
/* SMP info area */
__u32 cpu_nr; /* 0x02e8 */
__u32 softirq_pending; /* 0x02ec */
__u32 percpu_offset; /* 0x02f0 */
__u32 machine_flags; /* 0x02f4 */
__u32 ftrace_func; /* 0x02f8 */
__u32 spinlock_lockval; /* 0x02fc */
__u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
/*
* 0xe00 contains the address of the IPL Parameter Information
* block. Dump tools need IPIB for IPL after dump.
* Note: do not change the position of any fields in 0x0e00-0x0f00
*/
__u32 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e04 */
__u32 vmcore_info; /* 0x0e08 */
__u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
__u32 os_info; /* 0x0e18 */
__u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
} __packed;
#else /* CONFIG_32BIT */
#define LC_ORDER 1
#define LC_PAGES 2
struct save_area {
u64 fp_regs[16];
u64 gp_regs[16];
u8 psw[16];
u8 pad1[8];
u32 pref_reg;
u32 fp_ctrl_reg;
u8 pad2[4];
u32 tod_reg;
u64 timer;
u64 clk_cmp;
u8 pad3[8];
u32 acc_regs[16];
u64 ctrl_regs[16];
} __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
__u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */
__u32 ext_params; /* 0x0080 */
__u16 ext_cpu_addr; /* 0x0084 */
__u16 ext_int_code; /* 0x0086 */
__u16 svc_ilc; /* 0x0088 */
__u16 svc_code; /* 0x008a */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
__u32 data_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u8 per_code; /* 0x0096 */
__u8 per_atmid; /* 0x0097 */
__u64 per_address; /* 0x0098 */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
__u8 op_access_id; /* 0x00a2 */
__u8 ar_mode_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
__u64 trans_exc_code; /* 0x00a8 */
__u64 monitor_code; /* 0x00b0 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
__u32 io_int_word; /* 0x00c0 */
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
__u32 mcck_interruption_code[2]; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
__u64 failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
__u64 breaking_event_addr; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
psw_t restart_old_psw; /* 0x0120 */
psw_t external_old_psw; /* 0x0130 */
psw_t svc_old_psw; /* 0x0140 */
psw_t program_old_psw; /* 0x0150 */
psw_t mcck_old_psw; /* 0x0160 */
psw_t io_old_psw; /* 0x0170 */
__u8 pad_0x0180[0x01a0-0x0180]; /* 0x0180 */
psw_t restart_psw; /* 0x01a0 */
psw_t external_new_psw; /* 0x01b0 */
psw_t svc_new_psw; /* 0x01c0 */
psw_t program_new_psw; /* 0x01d0 */
psw_t mcck_new_psw; /* 0x01e0 */
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
__u64 save_area_sync[8]; /* 0x0200 */
__u64 save_area_async[8]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
/* CPU flags. */
__u64 cpu_flags; /* 0x0288 */
/* Return psws. */
psw_t return_psw; /* 0x0290 */
psw_t return_mcck_psw; /* 0x02a0 */
/* CPU accounting and timing values. */
__u64 sync_enter_timer; /* 0x02b0 */
__u64 async_enter_timer; /* 0x02b8 */
__u64 mcck_enter_timer; /* 0x02c0 */
__u64 exit_timer; /* 0x02c8 */
__u64 user_timer; /* 0x02d0 */
__u64 system_timer; /* 0x02d8 */
__u64 steal_timer; /* 0x02e0 */
__u64 last_update_timer; /* 0x02e8 */
__u64 last_update_clock; /* 0x02f0 */
__u64 int_clock; /* 0x02f8 */
__u64 mcck_clock; /* 0x0300 */
__u64 clock_comparator; /* 0x0308 */
/* Current process. */
__u64 current_task; /* 0x0310 */
__u64 thread_info; /* 0x0318 */
__u64 kernel_stack; /* 0x0320 */
/* Interrupt, panic and restart stack. */
__u64 async_stack; /* 0x0328 */
__u64 panic_stack; /* 0x0330 */
__u64 restart_stack; /* 0x0338 */
/* Restart function and parameter. */
__u64 restart_fn; /* 0x0340 */
__u64 restart_data; /* 0x0348 */
__u64 restart_source; /* 0x0350 */
/* Address space pointer. */
__u64 kernel_asce; /* 0x0358 */
__u64 user_asce; /* 0x0360 */
__u64 current_pid; /* 0x0368 */
/* SMP info area */
__u32 cpu_nr; /* 0x0370 */
__u32 softirq_pending; /* 0x0374 */
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
__u64 ftrace_func; /* 0x0390 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
__u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
/*
* 0xe00 contains the address of the IPL Parameter Information
* block. Dump tools need IPIB for IPL after dump.
* Note: do not change the position of any fields in 0x0e00-0x0f00
*/
__u64 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e08 */
__u64 vmcore_info; /* 0x0e0c */
__u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
__u64 os_info; /* 0x0e18 */
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
__u64 vector_save_area_addr; /* 0x11b0 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */
__u8 pad_0x11c0[0x1200-0x11C0]; /* 0x11C0 */
/* CPU register save area: defined by architecture */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
psw_t psw_save_area; /* 0x1300 */
__u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
__u8 pad_0x1320[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
/* Software defined save area for vector registers */
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
#endif /* CONFIG_32BIT */
#define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address)
{
asm volatile("spx %0" : : "m" (address) : "memory");
}
static inline __u32 store_prefix(void)
{
__u32 address;
asm volatile("stpx %0" : "=m" (address));
return address;
}
#endif /* _ASM_S390_LOWCORE_H */

View file

@ -0,0 +1,28 @@
/*
* IEEE floating point emulation.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef __MATHEMU__
#define __MATHEMU__
extern int math_emu_b3(__u8 *, struct pt_regs *);
extern int math_emu_ed(__u8 *, struct pt_regs *);
extern int math_emu_ldr(__u8 *);
extern int math_emu_ler(__u8 *);
extern int math_emu_std(__u8 *, struct pt_regs *);
extern int math_emu_ld(__u8 *, struct pt_regs *);
extern int math_emu_ste(__u8 *, struct pt_regs *);
extern int math_emu_le(__u8 *, struct pt_regs *);
extern int math_emu_lfpc(__u8 *, struct pt_regs *);
extern int math_emu_stfpc(__u8 *, struct pt_regs *);
extern int math_emu_srnm(__u8 *, struct pt_regs *);
#endif /* __MATHEMU__ */

View file

@ -0,0 +1,15 @@
/*
* S390 version
*
* Derived from "include/asm-i386/mman.h"
*/
#ifndef __S390_MMAN_H__
#define __S390_MMAN_H__
#include <uapi/asm/mman.h>
#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
#endif
#endif /* __S390_MMAN_H__ */

View file

@ -0,0 +1,42 @@
#ifndef __MMU_H
#define __MMU_H
#include <linux/cpumask.h>
#include <linux/errno.h>
typedef struct {
cpumask_t cpu_attach_mask;
atomic_t attach_count;
unsigned int flush_mm;
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context has extended page tables. */
unsigned int has_pgste:1;
/* The mmu context uses storage keys. */
unsigned int use_skey:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
asm volatile(
" tprot 0(%1),0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "a" (addr) : "cc");
return rc;
}
#endif

View file

@ -0,0 +1,123 @@
/*
* S390 version
*
* Derived from "include/asm-i386/mmu_context.h"
*/
#ifndef __S390_MMU_CONTEXT_H
#define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
}
#define destroy_context(mm) do { } while (0)
static inline void set_user_asce(struct mm_struct *mm)
{
S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
}
static inline void clear_user_asce(void)
{
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
__ctl_load(S390_lowcore.user_asce, 1, 1);
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
static inline void load_kernel_asce(void)
{
unsigned long asce;
__ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
atomic_inc(&next->context.attach_count);
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
load_kernel_asce();
if (mm) {
preempt_disable();
while (atomic_read(&mm->context.attach_count) >> 16)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
}
set_fs(current->thread.mm_segment);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __S390_MMU_CONTEXT_H */

View file

@ -0,0 +1,34 @@
#ifndef _ASM_S390_MODULE_H
#define _ASM_S390_MODULE_H
#include <asm-generic/module.h>
/*
* This file contains the s390 architecture specific module code.
*/
struct mod_arch_syminfo
{
unsigned long got_offset;
unsigned long plt_offset;
int got_initialized;
int plt_initialized;
};
struct mod_arch_specific
{
/* Starting offset of got in the module core memory. */
unsigned long got_offset;
/* Starting offset of plt in the module core memory. */
unsigned long plt_offset;
/* Size of the got. */
unsigned long got_size;
/* Size of the plt. */
unsigned long plt_size;
/* Number of symbols in syminfo. */
int nsyms;
/* Additional symbol information (got and plt offsets). */
struct mod_arch_syminfo *syminfo;
};
#endif /* _ASM_S390_MODULE_H */

View file

@ -0,0 +1,9 @@
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>

View file

@ -0,0 +1,66 @@
/*
* Machine check handler definitions
*
* Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef _ASM_S390_NMI_H
#define _ASM_S390_NMI_H
#include <linux/types.h>
struct mci {
__u32 sd : 1; /* 00 system damage */
__u32 pd : 1; /* 01 instruction-processing damage */
__u32 sr : 1; /* 02 system recovery */
__u32 : 1; /* 03 */
__u32 cd : 1; /* 04 timing-facility damage */
__u32 ed : 1; /* 05 external damage */
__u32 : 1; /* 06 */
__u32 dg : 1; /* 07 degradation */
__u32 w : 1; /* 08 warning pending */
__u32 cp : 1; /* 09 channel-report pending */
__u32 sp : 1; /* 10 service-processor damage */
__u32 ck : 1; /* 11 channel-subsystem damage */
__u32 : 2; /* 12-13 */
__u32 b : 1; /* 14 backed up */
__u32 : 1; /* 15 */
__u32 se : 1; /* 16 storage error uncorrected */
__u32 sc : 1; /* 17 storage error corrected */
__u32 ke : 1; /* 18 storage-key error uncorrected */
__u32 ds : 1; /* 19 storage degradation */
__u32 wp : 1; /* 20 psw mwp validity */
__u32 ms : 1; /* 21 psw mask and key validity */
__u32 pm : 1; /* 22 psw program mask and cc validity */
__u32 ia : 1; /* 23 psw instruction address validity */
__u32 fa : 1; /* 24 failing storage address validity */
__u32 vr : 1; /* 25 vector register validity */
__u32 ec : 1; /* 26 external damage code validity */
__u32 fp : 1; /* 27 floating point register validity */
__u32 gr : 1; /* 28 general register validity */
__u32 cr : 1; /* 29 control register validity */
__u32 : 1; /* 30 */
__u32 st : 1; /* 31 storage logical validity */
__u32 ie : 1; /* 32 indirect storage error */
__u32 ar : 1; /* 33 access register validity */
__u32 da : 1; /* 34 delayed access exception */
__u32 : 7; /* 35-41 */
__u32 pr : 1; /* 42 tod programmable register validity */
__u32 fc : 1; /* 43 fp control register validity */
__u32 ap : 1; /* 44 ancillary report */
__u32 : 1; /* 45 */
__u32 ct : 1; /* 46 cpu timer validity */
__u32 cc : 1; /* 47 clock comparator validity */
__u32 : 16; /* 47-63 */
};
struct pt_regs;
extern void s390_handle_mcck(void);
extern void s390_do_machine_check(struct pt_regs *regs);
#endif /* _ASM_S390_NMI_H */

View file

@ -0,0 +1,49 @@
/*
* OS info memory interface
*
* Copyright IBM Corp. 2012
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#ifndef _ASM_S390_OS_INFO_H
#define _ASM_S390_OS_INFO_H
#define OS_INFO_VERSION_MAJOR 1
#define OS_INFO_VERSION_MINOR 1
#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
#define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1
struct os_info_entry {
u64 addr;
u64 size;
u32 csum;
} __packed;
struct os_info {
u64 magic;
u32 csum;
u16 version_major;
u16 version_minor;
u64 crashkernel_addr;
u64 crashkernel_size;
struct os_info_entry entry[2];
u8 reserved[4024];
} __packed;
void os_info_init(void);
void os_info_entry_add(int nr, void *ptr, u64 len);
void os_info_crashkernel_add(unsigned long base, unsigned long size);
u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
int copy_from_oldmem(void *dest, void *src, size_t count);
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{
return NULL;
}
#endif
#endif /* _ASM_S390_OS_INFO_H */

View file

@ -0,0 +1,165 @@
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
*/
#ifndef _S390_PAGE_H
#define _S390_PAGE_H
#include <linux/const.h>
#include <asm/types.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
#define HPAGE_SHIFT 20
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGE_PTE_TYPE
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
#include <asm/setup.h>
#ifndef __ASSEMBLY__
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
#if PAGE_DEFAULT_KEY
__storage_key_init_range(start, end);
#endif
}
static inline void clear_page(void *page)
{
register unsigned long reg1 asm ("1") = 0;
register void *reg2 asm ("2") = page;
register unsigned long reg3 asm ("3") = 4096;
asm volatile(
" mvcl 2,0"
: "+d" (reg2), "+d" (reg3) : "d" (reg1)
: "memory", "cc");
}
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
* bypass caches when copying a page. Especially when copying huge pages
* this keeps L1 and L2 data caches alive.
*/
static inline void copy_page(void *to, void *from)
{
register void *reg2 asm ("2") = to;
register unsigned long reg3 asm ("3") = 0x1000;
register void *reg4 asm ("4") = from;
register unsigned long reg5 asm ("5") = 0xb0001000;
asm volatile(
" mvcl 2,4"
: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
: : "memory", "cc");
}
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgste; } pgste_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef pte_t *pgtable_t;
#define pgprot_val(x) ((x).pgprot)
#define pgste_val(x) ((x).pgste)
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#define __pgste(x) ((pgste_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
static inline void page_set_storage_key(unsigned long addr,
unsigned char skey, int mapped)
{
if (!mapped)
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
: : "d" (skey), "a" (addr));
else
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
static inline unsigned char page_get_storage_key(unsigned long addr)
{
unsigned char skey;
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
return skey;
}
static inline int page_reset_referenced(unsigned long addr)
{
unsigned int ipm;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
: "=d" (ipm) : "a" (addr) : "cc");
return !!(ipm & 0x20000000);
}
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
void arch_set_page_states(int make_stable);
static inline int devmem_is_allowed(unsigned long pfn)
{
return 0;
}
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(unsigned long)(x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* _S390_PAGE_H */

192
arch/s390/include/asm/pci.h Normal file
View file

@ -0,0 +1,192 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
/* must be set before including asm-generic/pci.h */
#define PCI_DMA_BUS_IS_PHYS (0)
/* must be set before including pci_clp.h */
#define PCI_BAR_COUNT 6
#include <linux/pci.h>
#include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
#define pcibios_assign_all_busses() (0)
void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
void pci_iounmap(struct pci_dev *, void __iomem *);
int pci_domain_nr(struct pci_bus *);
int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
#define ZPCI_DEVFN 0 /* default device number */
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
#define ZPCI_FC_BLOCKED 0x20
#define ZPCI_FC_DMA_ENABLED 0x10
struct zpci_fmb {
u32 format : 8;
u32 dma_valid : 1;
u32 : 23;
u32 samples;
u64 last_update;
/* hardware counters */
u64 ld_ops;
u64 st_ops;
u64 stb_ops;
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
/* software counters */
atomic64_t allocated_pages;
atomic64_t mapped_pages;
atomic64_t unmapped_pages;
} __packed __aligned(16);
#define ZPCI_MSI_VEC_BITS 11
#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
ZPCI_FN_STATE_STANDBY,
ZPCI_FN_STATE_CONFIGURED,
ZPCI_FN_STATE_ONLINE,
NR_ZPCI_FN_STATES,
};
struct zpci_bar_struct {
struct resource *res; /* bus resource */
u32 val; /* bar start & 3 flag bits */
u16 map_idx; /* index into bar mapping array */
u8 size; /* order 2 exponent */
};
/* Private data per function */
struct zpci_dev {
struct pci_dev *pdev;
struct pci_bus *bus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
enum zpci_state state;
u32 fid; /* function ID, used by sclp */
u32 fh; /* function handle, used by insn's */
u16 vfn; /* virtual function number */
u16 pchid; /* physical channel ID */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
u16 domain;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
/* IRQ stuff */
u64 msi_addr; /* MSI address */
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned int aisb; /* number of the summary bit */
/* DMA stuff */
unsigned long *dma_table;
spinlock_t dma_table_lock;
int tlb_refresh;
spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap;
unsigned long iommu_size;
unsigned long iommu_pages;
unsigned int next_bit;
char res_name[16];
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
u64 end_dma; /* End of available DMA addresses */
u64 dma_mask; /* DMA address space mask */
/* Function measurement block */
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
enum pci_bus_speed max_bus_speed;
struct dentry *debugfs_dev;
struct dentry *debugfs_perf;
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
{
return (zdev->fh & (1UL << 31)) ? true : false;
}
extern const struct attribute_group *zpci_attr_groups[];
/* -----------------------------------------------------------------------------
Prototypes
----------------------------------------------------------------------------- */
/* Base stuff */
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
/* CLP */
int clp_scan_pci_devices(void);
int clp_rescan_pci_devices(void);
int clp_rescan_pci_devices_simple(void);
int clp_add_pci_device(u32, u32, int);
int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *);
#ifdef CONFIG_PCI
/* Error handling and recovery */
void zpci_event_error(void *);
void zpci_event_availability(void *);
void zpci_rescan(void);
bool zpci_is_enabled(void);
#else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {}
static inline void zpci_rescan(void) {}
#endif /* CONFIG_PCI */
#ifdef CONFIG_HOTPLUG_PCI_S390
int zpci_init_slot(struct zpci_dev *);
void zpci_exit_slot(struct zpci_dev *);
#else /* CONFIG_HOTPLUG_PCI_S390 */
static inline int zpci_init_slot(struct zpci_dev *zdev)
{
return 0;
}
static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
#endif /* CONFIG_HOTPLUG_PCI_S390 */
/* Helpers */
struct zpci_dev *get_zdev(struct pci_dev *);
struct zpci_dev *get_zdev_by_fid(u32);
/* DMA */
int zpci_dma_init(void);
void zpci_dma_exit(void);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
/* Debug */
int zpci_debug_init(void);
void zpci_debug_exit(void);
void zpci_debug_init_device(struct zpci_dev *);
void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *);
#endif

View file

@ -0,0 +1,186 @@
#ifndef _ASM_S390_PCI_CLP_H
#define _ASM_S390_PCI_CLP_H
#include <asm/clp.h>
/*
* Call Logical Processor - Command Codes
*/
#define CLP_LIST_PCI 0x0002
#define CLP_QUERY_PCI_FN 0x0003
#define CLP_QUERY_PCI_FNGRP 0x0004
#define CLP_SET_PCI_FN 0x0005
/* PCI function handle list entry */
struct clp_fh_list_entry {
u16 device_id;
u16 vendor_id;
u32 config_state : 1;
u32 : 31;
u32 fid; /* PCI function id */
u32 fh; /* PCI function handle */
} __packed;
#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
/* request or response block header length */
#define LIST_PCI_HDR_LEN 32
/* Number of function handles fitting in response block */
#define CLP_FH_LIST_NR_ENTRIES \
((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
/ sizeof(struct clp_fh_list_entry))
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token;
u64 reserved2;
} __packed;
/* List PCI functions response */
struct clp_rsp_list_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u64 resume_token;
u32 reserved2;
u16 max_fn;
u8 reserved3;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
/* Query PCI function request */
struct clp_req_query_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u32 reserved2;
u64 reserved3;
} __packed;
/* Query PCI function response */
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 : 64;
u16 vfn; /* virtual fn number */
u16 : 7;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
u8 bar_size[PCI_BAR_COUNT];
u16 pchid;
u32 bar[PCI_BAR_COUNT];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 : 24;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
u32 reserved[11];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
} __packed;
/* Query PCI function group request */
struct clp_req_query_pci_grp {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 : 24;
u32 pfgid : 8; /* function group id */
u32 reserved2;
u64 reserved3;
} __packed;
/* Query PCI function group response */
struct clp_rsp_query_pci_grp {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u16 : 4;
u16 noi : 12; /* number of interrupts */
u8 version;
u8 : 6;
u8 frame : 1;
u8 refresh : 1; /* TLB refresh mode */
u16 reserved2;
u16 mui;
u64 reserved3;
u64 dasm; /* dma address space mask */
u64 msia; /* MSI address */
u64 reserved4;
u64 reserved5;
} __packed;
/* Set PCI function request */
struct clp_req_set_pci {
struct clp_req_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u16 reserved2;
u8 oc; /* operation controls */
u8 ndas; /* number of dma spaces */
u64 reserved3;
} __packed;
/* Set PCI function response */
struct clp_rsp_set_pci {
struct clp_rsp_hdr hdr;
u32 fmt : 4; /* cmd request block format */
u32 : 28;
u64 reserved1;
u32 fh; /* function handle */
u32 reserved3;
u64 reserved4;
} __packed;
/* Combined request/response block structures used by clp insn */
struct clp_req_rsp_list_pci {
struct clp_req_list_pci request;
struct clp_rsp_list_pci response;
} __packed;
struct clp_req_rsp_set_pci {
struct clp_req_set_pci request;
struct clp_rsp_set_pci response;
} __packed;
struct clp_req_rsp_query_pci {
struct clp_req_query_pci request;
struct clp_rsp_query_pci response;
} __packed;
struct clp_req_rsp_query_pci_grp {
struct clp_req_query_pci_grp request;
struct clp_rsp_query_pci_grp response;
} __packed;
#endif

View file

@ -0,0 +1,28 @@
#ifndef _S390_ASM_PCI_DEBUG_H
#define _S390_ASM_PCI_DEBUG_H
#include <asm/debug.h>
extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#define zpci_err(text...) \
do { \
char debug_buffer[16]; \
snprintf(debug_buffer, 16, text); \
debug_text_event(pci_debug_err_id, 0, debug_buffer); \
} while (0)
static inline void zpci_err_hex(void *addr, int len)
{
while (len > 0) {
debug_event(pci_debug_err_id, 0, (void *) addr, len);
len -= pci_debug_err_id->buf_size;
addr += pci_debug_err_id->buf_size;
}
}
#endif

View file

@ -0,0 +1,196 @@
#ifndef _ASM_S390_PCI_DMA_H
#define _ASM_S390_PCI_DMA_H
/* I/O Translation Anchor (IOTA) */
enum zpci_ioat_dtype {
ZPCI_IOTA_STO = 0,
ZPCI_IOTA_RTTO = 1,
ZPCI_IOTA_RSTO = 2,
ZPCI_IOTA_RFTO = 3,
ZPCI_IOTA_PFAA = 4,
ZPCI_IOTA_IOPFAA = 5,
ZPCI_IOTA_IOPTO = 7
};
#define ZPCI_IOTA_IOT_ENABLED 0x800UL
#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
#define ZPCI_IOTA_FS_4K 0
#define ZPCI_IOTA_FS_1M 1
#define ZPCI_IOTA_FS_2G 2
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
/* I/O Region and segment tables */
#define ZPCI_INDEX_MASK 0x7ffUL
#define ZPCI_TABLE_TYPE_MASK 0xc
#define ZPCI_TABLE_TYPE_RFX 0xc
#define ZPCI_TABLE_TYPE_RSX 0x8
#define ZPCI_TABLE_TYPE_RTX 0x4
#define ZPCI_TABLE_TYPE_SX 0x0
#define ZPCI_TABLE_LEN_RFX 0x3
#define ZPCI_TABLE_LEN_RSX 0x3
#define ZPCI_TABLE_LEN_RTX 0x3
#define ZPCI_TABLE_OFFSET_MASK 0xc0
#define ZPCI_TABLE_SIZE 0x4000
#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_TABLE_BITS 11
#define ZPCI_PT_BITS 8
#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
#define ZPCI_RTE_FLAG_MASK 0x3fffUL
#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
#define ZPCI_STE_FLAG_MASK 0x7ffUL
#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
/* I/O Page tables */
#define ZPCI_PTE_VALID_MASK 0x400
#define ZPCI_PTE_INVALID 0x400
#define ZPCI_PTE_VALID 0x000
#define ZPCI_PT_SIZE 0x800
#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
#define ZPCI_PTE_FLAG_MASK 0xfffUL
#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
/* Shared bits */
#define ZPCI_TABLE_VALID 0x00
#define ZPCI_TABLE_INVALID 0x20
#define ZPCI_TABLE_PROTECTED 0x200
#define ZPCI_TABLE_UNPROTECTED 0x000
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
}
static inline unsigned int calc_sx(dma_addr_t ptr)
{
return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
}
static inline unsigned int calc_px(dma_addr_t ptr)
{
return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
}
static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
{
*entry &= ZPCI_PTE_FLAG_MASK;
*entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
}
static inline void set_rt_sto(unsigned long *entry, void *sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
*entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_RTX;
}
static inline void set_st_pto(unsigned long *entry, void *pto)
{
*entry &= ZPCI_STE_FLAG_MASK;
*entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_SX;
}
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry &= ~ZPCI_TABLE_OFFSET_MASK;
*entry |= ZPCI_TABLE_VALID;
*entry |= ZPCI_TABLE_LEN_RTX;
}
static inline void validate_st_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_VALID;
}
static inline void invalidate_table_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
*entry |= ZPCI_TABLE_INVALID;
}
static inline void invalidate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_INVALID;
}
static inline void validate_pt_entry(unsigned long *entry)
{
WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
*entry &= ~ZPCI_PTE_VALID_MASK;
*entry |= ZPCI_PTE_VALID;
}
static inline void entry_set_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_PROTECTED;
}
static inline void entry_clr_protected(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_PROT_MASK;
*entry |= ZPCI_TABLE_UNPROTECTED;
}
static inline int reg_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
}
static inline int pt_entry_isvalid(unsigned long entry)
{
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
static inline int entry_isprotected(unsigned long entry)
{
return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
}
static inline unsigned long *get_rt_sto(unsigned long entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
: NULL;
}
static inline unsigned long *get_st_pto(unsigned long entry)
{
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
: NULL;
}
/* Prototypes */
int zpci_dma_init_device(struct zpci_dev *);
void zpci_dma_exit_device(struct zpci_dev *);
#endif

View file

@ -0,0 +1,86 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
#define ZPCI_PCI_ST_BLOCKED 12
#define ZPCI_PCI_ST_INSUF_RES 16
#define ZPCI_PCI_ST_INVAL_AS 20
#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
/* Load/Store return codes */
#define ZPCI_PCI_LS_OK 0
#define ZPCI_PCI_LS_ERR 1
#define ZPCI_PCI_LS_BUSY 2
#define ZPCI_PCI_LS_INVAL_HANDLE 3
/* Load/Store address space identifiers */
#define ZPCI_PCIAS_MEMIO_0 0
#define ZPCI_PCIAS_MEMIO_1 1
#define ZPCI_PCIAS_MEMIO_2 2
#define ZPCI_PCIAS_MEMIO_3 3
#define ZPCI_PCIAS_MEMIO_4 4
#define ZPCI_PCIAS_MEMIO_5 5
#define ZPCI_PCIAS_CFGSPC 15
/* Modify PCI Function Controls */
#define ZPCI_MOD_FC_REG_INT 2
#define ZPCI_MOD_FC_DEREG_INT 3
#define ZPCI_MOD_FC_REG_IOAT 4
#define ZPCI_MOD_FC_DEREG_IOAT 5
#define ZPCI_MOD_FC_REREG_IOAT 6
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
#define ZPCI_FIB_FC_ERROR 0x40
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* Function Information Block */
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
u32 : 32;
u8 fc; /* function controls */
u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
u32 : 1;
u32 isc : 3; /* Interrupt subclass */
u32 noi : 12; /* Number of interrupts */
u32 : 2;
u32 aibvo : 6; /* Adapter interrupt bit vector offset */
u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */
u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */
u32 : 32;
u32 gd;
} __packed __aligned(8);
int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif

View file

@ -0,0 +1,198 @@
#ifndef _ASM_S390_PCI_IO_H
#define _ASM_S390_PCI_IO_H
#ifdef CONFIG_PCI
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/pci_insn.h>
/* I/O Map */
#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
struct zpci_iomap_entry {
u32 fh;
u8 bar;
};
extern struct zpci_iomap_entry *zpci_iomap_start;
#define ZPCI_IDX(addr) \
(((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
#define ZPCI_OFFSET(addr) \
((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
#define ZPCI_CREATE_REQ(handle, space, len) \
((u64) handle << 32 | space << 16 | len)
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
}
#define zpci_write(LENGTH, VALTYPE) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
zpci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
zpci_read(4, u32)
zpci_read(2, u16)
zpci_read(1, u8)
zpci_write(8, u64)
zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
{
u64 val;
switch (len) {
case 1:
val = (u64) *((u8 *) data);
break;
case 2:
val = (u64) *((u16 *) data);
break;
case 4:
val = (u64) *((u32 *) data);
break;
case 8:
val = (u64) *((u64 *) data);
break;
default:
val = 0; /* let FW report error */
break;
}
return zpci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
int cc;
cc = zpci_load(&data, req, offset);
if (cc)
goto out;
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
break;
case 2:
*((u16 *) dst) = (u16) data;
break;
case 4:
*((u32 *) dst) = (u32) data;
break;
case 8:
*((u64 *) dst) = (u64) data;
break;
}
out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
return zpci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
int count = len > max ? max : len, size = 1;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
dst = dst >> 1;
src = src >> 1;
size = size << 1;
}
return size;
}
static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
if (rc)
break;
offset += size;
dst += size;
n -= size;
}
return rc;
}
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
rc = zpci_write_block(req, src, offset);
else
rc = zpci_write_single(req, src, offset, size);
if (rc)
break;
offset += size;
src += size;
n -= size;
}
return rc;
}
static inline int zpci_memset_io(volatile void __iomem *dst,
unsigned char val, size_t count)
{
u8 *src = kmalloc(count, GFP_KERNEL);
int rc;
if (src == NULL)
return -ENOMEM;
memset(src, val, count);
rc = zpci_memcpy_toio(dst, src, count);
kfree(src);
return rc;
}
#endif /* CONFIG_PCI */
#endif /* _ASM_S390_PCI_IO_H */

View file

@ -0,0 +1,190 @@
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
#ifdef CONFIG_64BIT
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
#if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
/*
* We use a compare-and-swap loop since that uses less cpu cycles than
* disabling and enabling interrupts like the generic variant would do.
*/
#define arch_this_cpu_to_op_simple(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
old__ = prev__; \
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
preempt_enable(); \
new__; \
})
#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
asm volatile( \
op2 " %[ptr__],%[val__]\n" \
: [ptr__] "+Q" (*ptr__) \
: [val__] "i" ((szcast)val__) \
: "cc"); \
} else { \
asm volatile( \
op1 " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
} \
preempt_enable(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
#define arch_this_cpu_add_return(pcp, val, op) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
preempt_enable(); \
old__ + val__; \
})
#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
#define arch_this_cpu_to_op(pcp, val, op) \
{ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
preempt_enable(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
})
#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define arch_this_cpu_xchg(pcp, nval) \
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
preempt_disable(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
preempt_enable(); \
ret__; \
})
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
typeof(pcp1) o1__ = (o1), n1__ = (n1); \
typeof(pcp2) o2__ = (o2), n2__ = (n2); \
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
preempt_disable(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
preempt_enable(); \
ret__; \
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */

View file

@ -0,0 +1,96 @@
/*
* Performance event support - s390 specific definitions.
*
* Copyright IBM Corp. 2009, 2013
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#ifndef _ASM_S390_PERF_EVENT_H
#define _ASM_S390_PERF_EVENT_H
#ifdef CONFIG_64BIT
#include <linux/perf_event.h>
#include <linux/device.h>
#include <asm/cpu_mf.h>
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
#define PMU_F_ENABLED 0x2000
#define PMU_F_IN_USE 0x4000
#define PMU_F_ERR_IBE 0x0100
#define PMU_F_ERR_LSDA 0x0200
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
/* Perf defintions for PMU event attributes in sysfs */
extern __init const struct attribute_group **cpumf_cf_event_group(void);
extern ssize_t cpumf_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page);
#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name
#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr)
#define CPUMF_EVENT_ATTR(cat, name, id) \
PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show)
#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name)
/* Perf callbacks */
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
/* Perf pt_regs extension for sample-data-entry indicators */
struct perf_sf_sde_regs {
unsigned char in_guest:1; /* guest sample */
unsigned long reserved:63; /* reserved */
};
/* Perf PMU definitions for the counter facility */
#define PERF_CPUM_CF_MAX_CTR 256
/* Perf PMU definitions for the sampling facility */
#define PERF_CPUM_SF_MAX_CTR 2
#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define REG_NONE 0
#define REG_OVERFLOW 1
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
#define RAWSAMPLE_REG(hwc) ((hwc)->config)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
/* Structure for sampling data entries to be passed as perf raw sample data
* to user space. Note that raw sample data must be aligned and, thus, might
* be padded with zeros.
*/
struct sf_raw_sample {
#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
u64 format;
u32 size; /* Size of sf_raw_sample */
u16 bsdes; /* Basic-sampling data entry size */
u16 dsdes; /* Diagnostic-sampling data entry size */
struct hws_basic_entry basic; /* Basic-sampling data entry */
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
u8 padding[]; /* Padding to next multiple of 8 */
} __packed;
/* Perf hardware reserve and release functions */
int perf_reserve_sampling(void);
void perf_release_sampling(void);
#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */

View file

@ -0,0 +1,156 @@
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/pgalloc.h"
* Copyright (C) 1994 Linus Torvalds
*/
#ifndef _S390_PGALLOC_H
#define _S390_PGALLOC_H
#include <linux/threads.h>
#include <linux/gfp.h>
#include <linux/mm.h>
unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
bool init_skey);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
typedef struct { char _[n]; } addrtype;
*s = val;
n = (n / 256) - 1;
asm volatile(
#ifdef CONFIG_64BIT
" mvc 8(248,%0),0(%0)\n"
#else
" mvc 4(252,%0),0(%0)\n"
#endif
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
: "m" (*(addrtype *) s));
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
#ifndef CONFIG_64BIT
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
return _SEGMENT_ENTRY_EMPTY;
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
#define pud_free(mm, x) do { } while (0)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pgd, pud) BUG()
#define pud_populate(mm, pud, pmd) BUG()
#else /* CONFIG_64BIT */
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
if (mm->context.asce_limit <= (1UL << 31))
return _SEGMENT_ENTRY_EMPTY;
if (mm->context.asce_limit <= (1UL << 42))
return _REGION3_ENTRY_EMPTY;
return _REGION2_ENTRY_EMPTY;
}
int crst_table_upgrade(struct mm_struct *, unsigned long limit);
void crst_table_downgrade(struct mm_struct *, unsigned long limit);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm);
if (table)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
}
#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *table = crst_table_alloc(mm);
if (!table)
return NULL;
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
crst_table_free(mm, table);
return NULL;
}
return (pmd_t *) table;
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
#endif /* CONFIG_64BIT */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
return (pgd_t *) crst_table_alloc(mm);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
{
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
}
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
#define pmd_pgtable(pmd) \
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
/*
* page table entry allocation/free routines.
*/
#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
extern void rcu_table_freelist_finish(void);
#endif /* _S390_PGALLOC_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,429 @@
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/processor.h"
* Copyright (C) 1994, Linus Torvalds
*/
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
#define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
static inline void set_cpu_flag(int flag)
{
S390_lowcore.cpu_flags |= (1U << flag);
}
static inline void clear_cpu_flag(int flag)
{
S390_lowcore.cpu_flags &= ~(1U << flag);
}
static inline int test_cpu_flag(int flag)
{
return !!(S390_lowcore.cpu_flags & (1U << flag));
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp %0" : "=Q" (*ptr));
}
extern void s390_adjust_jiffies(void);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
#ifndef CONFIG_64BIT
#define TASK_SIZE (1UL << 31)
#define TASK_MAX_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_MAX_SIZE (1UL << 53)
#endif /* CONFIG_64BIT */
#ifndef CONFIG_64BIT
#define STACK_TOP (1UL << 31)
#define STACK_TOP_MAX (1UL << 31)
#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
typedef struct {
__u32 ar4;
} mm_segment_t;
/*
* Thread structure
*/
struct thread_struct {
s390_fp_regs fp_regs;
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
int ri_signum;
#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
#endif
};
/* Flag to disable transactions. */
#define PER_FLAG_NO_TE 1UL
/* Flag to enable random transaction aborts. */
#define PER_FLAG_TE_ABORT_RAND 2UL
/* Flag to specify random transaction abort mode:
* - abort each transaction at a random instruction before TEND if set.
* - abort random transactions at a random instruction if cleared.
*/
#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
typedef struct thread_struct thread_struct;
/*
* Stack layout of a C stack frame.
*/
#ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain;
unsigned long empty1[5];
unsigned long gprs[10];
unsigned int empty2[8];
};
#else
struct stack_frame {
unsigned long empty1[5];
unsigned int empty2[8];
unsigned long gprs[10];
unsigned long back_chain;
};
#endif
#define ARCH_MIN_TASKALIGN 8
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
}
/*
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
crst_table_downgrade(current->mm, 1UL << 31); \
execve_tail(); \
} while (0)
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct seq_file;
#ifdef CONFIG_64BIT
extern void show_cacheinfo(struct seq_file *m);
#else
static inline void show_cacheinfo(struct seq_file *m) { }
#endif
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/*
* Return saved PC of a blocked thread.
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
static inline unsigned short stap(void)
{
unsigned short cpu_address;
asm volatile("stap %0" : "=m" (cpu_address));
return cpu_address;
}
/*
* Give up the time slice of the virtual PU.
*/
static inline void cpu_relax(void)
{
if (MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,68");
barrier();
}
#define cpu_relax_lowlatency() barrier()
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
}
/*
* Set PSW to specified value.
*/
static inline void __load_psw(psw_t psw)
{
#ifndef CONFIG_64BIT
asm volatile("lpsw %0" : : "Q" (psw) : "cc");
#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
#endif
}
/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask (unsigned long mask)
{
unsigned long addr;
psw_t psw;
psw.mask = mask;
#ifndef CONFIG_64BIT
asm volatile(
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,%O1+4(%R1)\n"
" lpsw %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
#endif /* CONFIG_64BIT */
}
/*
* Rewind PSW instruction address by specified number of bytes.
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
#ifndef CONFIG_64BIT
if (psw.addr & PSW_ADDR_AMODE)
/* 31 bit mode */
return (psw.addr - ilc) | PSW_ADDR_AMODE;
/* 24 bit mode */
return (psw.addr - ilc) & ((1UL << 24) - 1);
#else
unsigned long mask;
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
(1UL << 24) - 1;
return (psw.addr - ilc) & mask;
#endif
}
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
static inline void __noreturn disabled_wait(unsigned long code)
{
unsigned long ctl_buf;
psw_t dw_psw;
dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
dw_psw.addr = code;
/*
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
#ifndef CONFIG_64BIT
asm volatile(
" stctl 0,0,0(%2)\n"
" ni 0(%2),0xef\n" /* switch off protection */
" lctl 0,0,0(%2)\n"
" stpt 0xd8\n" /* store timer */
" stckc 0xe0\n" /* store clock comparator */
" stpx 0x108\n" /* store prefix register */
" stam 0,15,0x120\n" /* store access registers */
" std 0,0x160\n" /* store f0 */
" std 2,0x168\n" /* store f2 */
" std 4,0x170\n" /* store f4 */
" std 6,0x178\n" /* store f6 */
" stm 0,15,0x180\n" /* store general registers */
" stctl 0,15,0x1c0\n" /* store control registers */
" oi 0x1c0,0x10\n" /* fake protection bit */
" lpsw 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
" lctlg 0,0,0(%2)\n"
" lghi 1,0x1000\n"
" stpt 0x328(1)\n" /* store timer */
" stckc 0x330(1)\n" /* store clock comparator */
" stpx 0x318(1)\n" /* store prefix register */
" stam 0,15,0x340(1)\n"/* store access registers */
" stfpc 0x31c(1)\n" /* store fpu control */
" std 0,0x200(1)\n" /* store f0 */
" std 1,0x208(1)\n" /* store f1 */
" std 2,0x210(1)\n" /* store f2 */
" std 3,0x218(1)\n" /* store f3 */
" std 4,0x220(1)\n" /* store f4 */
" std 5,0x228(1)\n" /* store f5 */
" std 6,0x230(1)\n" /* store f6 */
" std 7,0x238(1)\n" /* store f7 */
" std 8,0x240(1)\n" /* store f8 */
" std 9,0x248(1)\n" /* store f9 */
" std 10,0x250(1)\n" /* store f10 */
" std 11,0x258(1)\n" /* store f11 */
" std 12,0x260(1)\n" /* store f12 */
" std 13,0x268(1)\n" /* store f13 */
" std 14,0x270(1)\n" /* store f14 */
" std 15,0x278(1)\n" /* store f15 */
" stmg 0,15,0x280(1)\n"/* store general registers */
" stctg 0,15,0x380(1)\n"/* store control registers */
" oi 0x384(1),0x10\n"/* fake protection bit */
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
#endif /* CONFIG_64BIT */
while (1);
}
/*
* Use to set psw mask except for the first byte which
* won't be changed by this function.
*/
static inline void
__set_psw_mask(unsigned long mask)
{
__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
}
#define local_mcck_enable() \
__set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \
__set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
/*
* Basic Machine Check/Program Check Handler.
*/
extern void s390_base_mcck_handler(void);
extern void s390_base_pgm_handler(void);
extern void s390_base_ext_handler(void);
extern void (*s390_base_mcck_handler_fn)(void);
extern void (*s390_base_pgm_handler_fn)(void);
extern void (*s390_base_ext_handler_fn)(void);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
extern int memcpy_real(void *, void *, size_t);
extern void memcpy_absolute(void *, void *, size_t);
#define mem_assign_absolute(dest, val) { \
__typeof__(dest) __tmp = (val); \
\
BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
/*
* Helper macro for exception table entries
*/
#define EX_TABLE(_fault, _target) \
".section __ex_table,\"a\"\n" \
".align 4\n" \
".long (" #_fault ") - .\n" \
".long (" #_target ") - .\n" \
".previous\n"
#else /* __ASSEMBLY__ */
#define EX_TABLE(_fault, _target) \
.section __ex_table,"a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */

View file

@ -0,0 +1,181 @@
/*
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#ifndef _S390_PTRACE_H
#define _S390_PTRACE_H
#include <uapi/asm/ptrace.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
#define _PIF_SYSCALL (1<<PIF_SYSCALL)
#define _PIF_PER_TRAP (1<<PIF_PER_TRAP)
#ifndef __ASSEMBLY__
#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
PSW_MASK_EA | PSW_MASK_BA)
#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
struct psw_bits {
unsigned long long : 1;
unsigned long long r : 1; /* PER-Mask */
unsigned long long : 3;
unsigned long long t : 1; /* DAT Mode */
unsigned long long i : 1; /* Input/Output Mask */
unsigned long long e : 1; /* External Mask */
unsigned long long key : 4; /* PSW Key */
unsigned long long : 1;
unsigned long long m : 1; /* Machine-Check Mask */
unsigned long long w : 1; /* Wait State */
unsigned long long p : 1; /* Problem State */
unsigned long long as : 2; /* Address Space Control */
unsigned long long cc : 2; /* Condition Code */
unsigned long long pm : 4; /* Program Mask */
unsigned long long ri : 1; /* Runtime Instrumentation */
unsigned long long : 6;
unsigned long long eaba : 2; /* Addressing Mode */
#ifdef CONFIG_64BIT
unsigned long long : 31;
unsigned long long ia : 64;/* Instruction Address */
#else
unsigned long long ia : 31;/* Instruction Address */
#endif
};
enum {
PSW_AMODE_24BIT = 0,
PSW_AMODE_31BIT = 1,
PSW_AMODE_64BIT = 3
};
enum {
PSW_AS_PRIMARY = 0,
PSW_AS_ACCREG = 1,
PSW_AS_SECONDARY = 2,
PSW_AS_HOME = 3
};
#define psw_bits(__psw) (*({ \
typecheck(psw_t, __psw); \
&(*(struct psw_bits *)(&(__psw))); \
}))
/*
* The pt_regs struct defines the way the registers are stored on
* the stack during a system call.
*/
struct pt_regs
{
unsigned long args[1];
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
unsigned int int_code;
unsigned int int_parm;
unsigned long int_parm_long;
unsigned long flags;
};
/*
* Program event recording (PER) register set.
*/
struct per_regs {
unsigned long control; /* PER control bits */
unsigned long start; /* PER starting address */
unsigned long end; /* PER ending address */
};
/*
* PER event contains information about the cause of the last PER exception.
*/
struct per_event {
unsigned short cause; /* PER code, ATMID and AI */
unsigned long address; /* PER address */
unsigned char paid; /* PER access identification */
};
/*
* Simplified per_info structure used to decode the ptrace user space ABI.
*/
struct per_struct_kernel {
unsigned long cr9; /* PER control bits */
unsigned long cr10; /* PER starting address */
unsigned long cr11; /* PER ending address */
unsigned long bits; /* Obsolete software bits */
unsigned long starting_addr; /* User specified start address */
unsigned long ending_addr; /* User specified end address */
unsigned short perc_atmid; /* PER trap ATMID */
unsigned long address; /* PER trap instruction address */
unsigned char access_id; /* PER trap access identification */
};
#define PER_EVENT_MASK 0xEB000000UL
#define PER_EVENT_BRANCH 0x80000000UL
#define PER_EVENT_IFETCH 0x40000000UL
#define PER_EVENT_STORE 0x20000000UL
#define PER_EVENT_STORE_REAL 0x08000000UL
#define PER_EVENT_TRANSACTION_END 0x02000000UL
#define PER_EVENT_NULLIFICATION 0x01000000UL
#define PER_CONTROL_MASK 0x00e00000UL
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
static inline void set_pt_regs_flag(struct pt_regs *regs, int flag)
{
regs->flags |= (1U << flag);
}
static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag)
{
regs->flags &= ~(1U << flag);
}
static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
{
return !!(regs->flags & (1U << flag));
}
/*
* These are defined as per linux/ptrace.h, which see.
*/
#define arch_has_single_step() (1)
#define arch_has_block_step() (1)
#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
#define user_stack_pointer(regs)((regs)->gprs[15])
#define profile_pc(regs) instruction_pointer(regs)
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->gprs[2];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->psw.addr = val | PSW_ADDR_AMODE;
}
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->gprs[15] & PSW_ADDR_INSN;
}
#endif /* __ASSEMBLY__ */
#endif /* _S390_PTRACE_H */

View file

@ -0,0 +1,440 @@
/*
* Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*
*/
#ifndef __QDIO_H__
#define __QDIO_H__
#include <linux/interrupt.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
/* only use 4 queues to save some cachelines */
#define QDIO_MAX_QUEUES_PER_IRQ 4
#define QDIO_MAX_BUFFERS_PER_Q 128
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define QDIO_SBAL_SIZE 256
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
/**
* struct qdesfmt0 - queue descriptor, format 0
* @sliba: storage list information block address
* @sla: storage list address
* @slsba: storage list state block address
* @akey: access key for DLIB
* @bkey: access key for SL
* @ckey: access key for SBALs
* @dkey: access key for SLSB
*/
struct qdesfmt0 {
u64 sliba;
u64 sla;
u64 slsba;
u32 : 32;
u32 akey : 4;
u32 bkey : 4;
u32 ckey : 4;
u32 dkey : 4;
u32 : 16;
} __attribute__ ((packed));
#define QDR_AC_MULTI_BUFFER_ENABLE 0x01
/**
* struct qdr - queue description record (QDR)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @ac: adapter characteristics
* @iqdcnt: input queue descriptor count
* @oqdcnt: output queue descriptor count
* @iqdsz: inpout queue descriptor size
* @oqdsz: output queue descriptor size
* @qiba: queue information block address
* @qkey: queue information block key
* @qdf0: queue descriptions
*/
struct qdr {
u32 qfmt : 8;
u32 pfmt : 8;
u32 : 8;
u32 ac : 8;
u32 : 8;
u32 iqdcnt : 8;
u32 : 8;
u32 oqdcnt : 8;
u32 : 8;
u32 iqdsz : 8;
u32 : 8;
u32 oqdsz : 8;
/* private: */
u32 res[9];
/* public: */
u64 qiba;
u32 : 32;
u32 qkey : 4;
u32 : 28;
struct qdesfmt0 qdf0[126];
} __attribute__ ((packed, aligned(4096)));
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
#define QIB_RFLAGS_ENABLE_QEBSM 0x80
#define QIB_RFLAGS_ENABLE_DATA_DIV 0x02
/**
* struct qib - queue information block (QIB)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @rflags: QEBSM
* @ac: adapter characteristics
* @isliba: absolute address of first input SLIB
* @osliba: absolute address of first output SLIB
* @ebcnam: adapter identifier in EBCDIC
* @parm: implementation dependent parameters
*/
struct qib {
u32 qfmt : 8;
u32 pfmt : 8;
u32 rflags : 8;
u32 ac : 8;
u32 : 32;
u64 isliba;
u64 osliba;
u32 : 32;
u32 : 32;
u8 ebcnam[8];
/* private: */
u8 res[88];
/* public: */
u8 parm[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(256)));
/**
* struct slibe - storage list information block element (SLIBE)
* @parms: implementation dependent parameters
*/
struct slibe {
u64 parms;
};
/**
* struct qaob - queue asynchronous operation block
* @res0: reserved parameters
* @res1: reserved parameter
* @res2: reserved parameter
* @res3: reserved parameter
* @aorc: asynchronous operation return code
* @flags: internal flags
* @cbtbs: control block type
* @sb_count: number of storage blocks
* @sba: storage block element addresses
* @dcount: size of storage block elements
* @user0: user defineable value
* @res4: reserved paramater
* @user1: user defineable value
* @user2: user defineable value
*/
struct qaob {
u64 res0[6];
u8 res1;
u8 res2;
u8 res3;
u8 aorc;
u8 flags;
u16 cbtbs;
u8 sb_count;
u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
u64 user1;
u64 user2;
} __attribute__ ((packed, aligned(256)));
/**
* struct slib - storage list information block (SLIB)
* @nsliba: next SLIB address (if any)
* @sla: SL address
* @slsba: SLSB address
* @slibe: SLIB elements
*/
struct slib {
u64 nsliba;
u64 sla;
u64 slsba;
/* private: */
u8 res[1000];
/* public: */
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(2048)));
#define SBAL_EFLAGS_LAST_ENTRY 0x40
#define SBAL_EFLAGS_CONTIGUOUS 0x20
#define SBAL_EFLAGS_FIRST_FRAG 0x04
#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
#define SBAL_EFLAGS_LAST_FRAG 0x0c
#define SBAL_EFLAGS_MASK 0x6f
#define SBAL_SFLAGS0_PCI_REQ 0x40
#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
/* Awesome OpenFCP extensions */
#define SBAL_SFLAGS0_TYPE_STATUS 0x00
#define SBAL_SFLAGS0_TYPE_WRITE 0x08
#define SBAL_SFLAGS0_TYPE_READ 0x10
#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
#define SBAL_SFLAGS0_MORE_SBALS 0x04
#define SBAL_SFLAGS0_COMMAND 0x02
#define SBAL_SFLAGS0_LAST_SBAL 0x00
#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
/**
* struct qdio_buffer_element - SBAL entry
* @eflags: SBAL entry flags
* @scount: SBAL count
* @sflags: whole SBAL flags
* @length: length
* @addr: address
*/
struct qdio_buffer_element {
u8 eflags;
/* private: */
u8 res1;
/* public: */
u8 scount;
u8 sflags;
u32 length;
#ifdef CONFIG_32BIT
/* private: */
void *res2;
/* public: */
#endif
void *addr;
} __attribute__ ((packed, aligned(16)));
/**
* struct qdio_buffer - storage block address list (SBAL)
* @element: SBAL entries
*/
struct qdio_buffer {
struct qdio_buffer_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed, aligned(256)));
/**
* struct sl_element - storage list entry
* @sbal: absolute SBAL address
*/
struct sl_element {
#ifdef CONFIG_32BIT
/* private: */
unsigned long reserved;
/* public: */
#endif
unsigned long sbal;
} __attribute__ ((packed));
/**
* struct sl - storage list (SL)
* @element: SL entries
*/
struct sl {
struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(1024)));
/**
* struct slsb - storage list state block (SLSB)
* @val: state per buffer
*/
struct slsb {
u8 val[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(256)));
/**
* struct qdio_outbuf_state - SBAL related asynchronous operation information
* (for communication with upper layer programs)
* (only required for use with completion queues)
* @flags: flags indicating state of buffer
* @aob: pointer to QAOB used for the particular SBAL
* @user: pointer to upper layer program's state information related to SBAL
* (stored in user1 data of QAOB)
*/
struct qdio_outbuf_state {
u8 flags;
struct qaob *aob;
void *user;
};
#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
/* qdio adapter-characteristics-1 flag */
#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
struct qdio_ssqd_desc {
u8 flags;
u8:8;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pcnt;
u8 icnt;
u8:8;
u8 ocnt;
u8:8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
u8 mro;
u8 mri;
u16 qdioac3;
u16:16;
u8:8;
u8 mmwc;
} __attribute__ ((packed));
/* params are: ccw_device, qdio_error, queue_number,
first element processed, number of elements processed, int_parm */
typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
int, int, unsigned long);
/* qdio errors reported to the upper-layer program */
#define QDIO_ERROR_ACTIVATE 0x0001
#define QDIO_ERROR_GET_BUF_STATE 0x0002
#define QDIO_ERROR_SET_BUF_STATE 0x0004
#define QDIO_ERROR_SLSB_STATE 0x0100
#define QDIO_ERROR_FATAL 0x00ff
#define QDIO_ERROR_TEMPORARY 0xff00
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
/**
* struct qdio_initialize - qdio initialization data
* @cdev: associated ccw device
* @q_format: queue format
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
* @qib_rflags: rflags to set
* @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
* @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
* @no_input_qs: number of input queues
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
* @queue_start_poll_array: polling handlers (one per input queue or NULL)
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
* @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
*/
struct qdio_initialize {
struct ccw_device *cdev;
unsigned char q_format;
unsigned char qdr_ac;
unsigned char adapter_name[8];
unsigned int qib_param_field_format;
unsigned char *qib_param_field;
unsigned char qib_rflags;
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
void (**queue_start_poll_array) (struct ccw_device *, int,
unsigned long);
int scan_threshold;
unsigned long int_parm;
void **input_sbal_addr_array;
void **output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array;
};
/**
* enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
* @l3_ipv6_addr: entry contains IPv6 address
* @l3_ipv4_addr: entry contains IPv4 address
* @l2_addr_lnid: entry contains MAC address and VLAN ID
*/
enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
/**
* struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
* @nit: Network interface token
* @addr: Address of one of the three types
*
* The struct is passed to the callback function by qdio_brinfo_desc()
*/
struct qdio_brinfo_entry_l3_ipv6 {
u64 nit;
struct { unsigned char _s6_addr[16]; } addr;
} __packed;
struct qdio_brinfo_entry_l3_ipv4 {
u64 nit;
struct { uint32_t _s_addr; } addr;
} __packed;
struct qdio_brinfo_entry_l2 {
u64 nit;
struct { u8 mac[6]; u16 lnid; } addr_lnid;
} __packed;
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count);
extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
extern void qdio_release_aob(struct qaob *);
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
unsigned int);
extern int qdio_start_irq(struct ccw_device *, int);
extern int qdio_stop_irq(struct ccw_device *, int);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
extern int qdio_pnso_brinfo(struct subchannel_id schid,
int cnc, u16 *response,
void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
void *entry),
void *priv);
#endif /* __QDIO_H__ */

View file

@ -0,0 +1,19 @@
/*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#ifndef _ASM_S390_RESET_H
#define _ASM_S390_RESET_H
#include <linux/list.h>
struct reset_call {
struct list_head list;
void (*fn)(void);
};
extern void register_reset_call(struct reset_call *reset);
extern void unregister_reset_call(struct reset_call *reset);
extern void s390_reset_system(void (*func)(void *), void *data);
#endif /* _ASM_S390_RESET_H */

View file

@ -0,0 +1,98 @@
#ifndef _RUNTIME_INSTR_H
#define _RUNTIME_INSTR_H
#define S390_RUNTIME_INSTR_START 0x1
#define S390_RUNTIME_INSTR_STOP 0x2
struct runtime_instr_cb {
__u64 buf_current;
__u64 buf_origin;
__u64 buf_limit;
__u32 valid : 1;
__u32 pstate : 1;
__u32 pstate_set_buf : 1;
__u32 home_space : 1;
__u32 altered : 1;
__u32 : 3;
__u32 pstate_sample : 1;
__u32 sstate_sample : 1;
__u32 pstate_collect : 1;
__u32 sstate_collect : 1;
__u32 : 1;
__u32 halted_int : 1;
__u32 int_requested : 1;
__u32 buffer_full_int : 1;
__u32 key : 4;
__u32 : 9;
__u32 rgs : 3;
__u32 mode : 4;
__u32 next : 1;
__u32 mae : 1;
__u32 : 2;
__u32 call_type_br : 1;
__u32 return_type_br : 1;
__u32 other_type_br : 1;
__u32 bc_other_type : 1;
__u32 emit : 1;
__u32 tx_abort : 1;
__u32 : 2;
__u32 bp_xn : 1;
__u32 bp_xt : 1;
__u32 bp_ti : 1;
__u32 bp_ni : 1;
__u32 suppr_y : 1;
__u32 suppr_z : 1;
__u32 dc_miss_extra : 1;
__u32 lat_lev_ignore : 1;
__u32 ic_lat_lev : 4;
__u32 dc_lat_lev : 4;
__u64 reserved1;
__u64 scaling_factor;
__u64 rsic;
__u64 reserved2;
} __packed __aligned(8);
extern struct runtime_instr_cb runtime_instr_empty_cb;
static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
{
asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
: : "Q" (*cb));
}
static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
{
asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
: "=Q" (*cb) : : "cc");
}
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
#ifdef CONFIG_64BIT
if (cb_prev)
store_runtime_instr_cb(cb_prev);
#endif
}
static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
struct runtime_instr_cb *cb_prev)
{
#ifdef CONFIG_64BIT
if (cb_next)
load_runtime_instr_cb(cb_next);
else if (cb_prev)
load_runtime_instr_cb(&runtime_instr_empty_cb);
#endif
}
#ifdef CONFIG_64BIT
extern void exit_thread_runtime_instr(void);
#else
static inline void exit_thread_runtime_instr(void) { }
#endif
#endif /* _RUNTIME_INSTR_H */

View file

@ -0,0 +1,318 @@
#ifndef _S390_RWSEM_H
#define _S390_RWSEM_H
/*
* S390 version
* Copyright IBM Corp. 2002
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
*/
/*
*
* The MSW of the count is the negated number of active writers and waiting
* lockers, and the LSW is the total number of active locks
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
* uncontended lock. This can be determined because XADD returns the old value.
* Readers increment by 1 and see a positive value when uncontended, negative
* if there are writers (and maybe) readers waiting (in which case it goes to
* sleep).
*
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
* be extended to 65534 by manually checking the whole MSW rather than relying
* on the S flag.
*
* The value of ACTIVE_BIAS supports up to 65535 active processes.
*
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifndef CONFIG_64BIT
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
signed long old, new;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
if (old < 0)
rwsem_down_read_failed(sem);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
signed long old, new;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: ltr %1,%0\n"
" jm 1f\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b\n"
"1:"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
return old >= 0 ? 1 : 0;
}
/*
* lock for writing
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
if (old != 0)
rwsem_down_write_failed(sem);
}
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
signed long old;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%1\n"
"0: ltr %0,%0\n"
" jnz 1f\n"
" cs %0,%3,%1\n"
" jl 0b\n"
#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
: "cc", "memory");
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
signed long old, new;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
if (new > 1)
rwsem_downgrade_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
signed long old, new;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
signed long old, new;
asm volatile(
#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
return new;
}
#endif /* _S390_RWSEM_H */

View file

@ -0,0 +1,21 @@
#ifndef ASM_SCHID_H
#define ASM_SCHID_H
#include <linux/string.h>
#include <uapi/asm/schid.h>
/* Helper function for sane state of pre-allocated subchannel_id. */
static inline void
init_subchannel_id(struct subchannel_id *schid)
{
memset(schid, 0, sizeof(struct subchannel_id));
schid->one = 1;
}
static inline int
schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
{
return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
}
#endif /* ASM_SCHID_H */

View file

@ -0,0 +1,71 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#ifndef _ASM_S390_SCLP_H
#define _ASM_S390_SCLP_H
#include <linux/types.h>
#include <asm/chpid.h>
#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
struct sclp_chp_info {
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
};
#define LOADPARM_LEN 8
struct sclp_ipl_info {
int is_valid;
int has_dump;
char loadparm[LOADPARM_LEN];
};
struct sclp_cpu_entry {
u8 address;
u8 reserved0[2];
u8 : 3;
u8 siif : 1;
u8 : 4;
u8 reserved2[10];
u8 type;
u8 reserved1;
} __attribute__((packed));
struct sclp_cpu_info {
unsigned int configured;
unsigned int standby;
unsigned int combined;
int has_cpu_type;
struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
};
int sclp_get_cpu_info(struct sclp_cpu_info *info);
int sclp_cpu_configure(u8 cpu);
int sclp_cpu_deconfigure(u8 cpu);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
unsigned int sclp_get_max_cpu(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
bool __init sclp_has_linemode(void);
bool __init sclp_has_vt220(void);
bool sclp_has_sprp(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void);
int sclp_has_siif(void);
unsigned int sclp_get_ibc(void);
#endif /* _ASM_S390_SCLP_H */

View file

@ -0,0 +1,988 @@
/*
* Helper functions for scsw access.
*
* Copyright IBM Corp. 2008, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_SCSW_H_
#define _ASM_S390_SCSW_H_
#include <linux/types.h>
#include <asm/css_chars.h>
#include <asm/cio.h>
/**
* struct cmd_scsw - command-mode subchannel status word
* @key: subchannel key
* @sctl: suspend control
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @pfch: prefetch
* @isic: initial-status interruption control
* @alcc: address-limit checking control
* @ssi: suppress-suspended interruption
* @zcc: zero condition code
* @ectl: extended control
* @pno: path not operational
* @res: reserved
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @cpa: channel program address
* @dstat: device status
* @cstat: subchannel status
* @count: residual count
*/
struct cmd_scsw {
__u32 key : 4;
__u32 sctl : 1;
__u32 eswf : 1;
__u32 cc : 2;
__u32 fmt : 1;
__u32 pfch : 1;
__u32 isic : 1;
__u32 alcc : 1;
__u32 ssi : 1;
__u32 zcc : 1;
__u32 ectl : 1;
__u32 pno : 1;
__u32 res : 1;
__u32 fctl : 3;
__u32 actl : 7;
__u32 stctl : 5;
__u32 cpa;
__u32 dstat : 8;
__u32 cstat : 8;
__u32 count : 16;
} __attribute__ ((packed));
/**
* struct tm_scsw - transport-mode subchannel status word
* @key: subchannel key
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @x: IRB-format control
* @q: interrogate-complete
* @ectl: extended control
* @pno: path not operational
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @tcw: TCW address
* @dstat: device status
* @cstat: subchannel status
* @fcxs: FCX status
* @schxs: subchannel-extended status
*/
struct tm_scsw {
u32 key:4;
u32 :1;
u32 eswf:1;
u32 cc:2;
u32 fmt:3;
u32 x:1;
u32 q:1;
u32 :1;
u32 ectl:1;
u32 pno:1;
u32 :1;
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
u32 tcw;
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
u32 schxs:8;
} __attribute__ ((packed));
/**
* struct eadm_scsw - subchannel status word for eadm subchannels
* @key: subchannel key
* @eswf: esw format
* @cc: deferred condition code
* @ectl: extended control
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @aob: AOB address
* @dstat: device status
* @cstat: subchannel status
*/
struct eadm_scsw {
u32 key:4;
u32:1;
u32 eswf:1;
u32 cc:2;
u32:6;
u32 ectl:1;
u32:2;
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
u32 aob;
u32 dstat:8;
u32 cstat:8;
u32:16;
} __packed;
/**
* union scsw - subchannel status word
* @cmd: command-mode SCSW
* @tm: transport-mode SCSW
* @eadm: eadm SCSW
*/
union scsw {
struct cmd_scsw cmd;
struct tm_scsw tm;
struct eadm_scsw eadm;
} __packed;
#define SCSW_FCTL_CLEAR_FUNC 0x1
#define SCSW_FCTL_HALT_FUNC 0x2
#define SCSW_FCTL_START_FUNC 0x4
#define SCSW_ACTL_SUSPENDED 0x1
#define SCSW_ACTL_DEVACT 0x2
#define SCSW_ACTL_SCHACT 0x4
#define SCSW_ACTL_CLEAR_PEND 0x8
#define SCSW_ACTL_HALT_PEND 0x10
#define SCSW_ACTL_START_PEND 0x20
#define SCSW_ACTL_RESUME_PEND 0x40
#define SCSW_STCTL_STATUS_PEND 0x1
#define SCSW_STCTL_SEC_STATUS 0x2
#define SCSW_STCTL_PRIM_STATUS 0x4
#define SCSW_STCTL_INTER_STATUS 0x8
#define SCSW_STCTL_ALERT_STATUS 0x10
#define DEV_STAT_ATTENTION 0x80
#define DEV_STAT_STAT_MOD 0x40
#define DEV_STAT_CU_END 0x20
#define DEV_STAT_BUSY 0x10
#define DEV_STAT_CHN_END 0x08
#define DEV_STAT_DEV_END 0x04
#define DEV_STAT_UNIT_CHECK 0x02
#define DEV_STAT_UNIT_EXCEP 0x01
#define SCHN_STAT_PCI 0x80
#define SCHN_STAT_INCORR_LEN 0x40
#define SCHN_STAT_PROG_CHECK 0x20
#define SCHN_STAT_PROT_CHECK 0x10
#define SCHN_STAT_CHN_DATA_CHK 0x08
#define SCHN_STAT_CHN_CTRL_CHK 0x04
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
/*
* architectured values for first sense byte
*/
#define SNS0_CMD_REJECT 0x80
#define SNS_CMD_REJECT SNS0_CMD_REJEC
#define SNS0_INTERVENTION_REQ 0x40
#define SNS0_BUS_OUT_CHECK 0x20
#define SNS0_EQUIPMENT_CHECK 0x10
#define SNS0_DATA_CHECK 0x08
#define SNS0_OVERRUN 0x04
#define SNS0_INCOMPL_DOMAIN 0x01
/*
* architectured values for second sense byte
*/
#define SNS1_PERM_ERR 0x80
#define SNS1_INV_TRACK_FORMAT 0x40
#define SNS1_EOC 0x20
#define SNS1_MESSAGE_TO_OPER 0x10
#define SNS1_NO_REC_FOUND 0x08
#define SNS1_FILE_PROTECTED 0x04
#define SNS1_WRITE_INHIBITED 0x02
#define SNS1_INPRECISE_END 0x01
/*
* architectured values for third sense byte
*/
#define SNS2_REQ_INH_WRITE 0x80
#define SNS2_CORRECTABLE 0x40
#define SNS2_FIRST_LOG_ERR 0x20
#define SNS2_ENV_DATA_PRESENT 0x10
#define SNS2_INPRECISE_END 0x04
/**
* scsw_is_tm - check for transport mode scsw
* @scsw: pointer to scsw
*
* Return non-zero if the specified scsw is a transport mode scsw, zero
* otherwise.
*/
static inline int scsw_is_tm(union scsw *scsw)
{
return css_general_characteristics.fcx && (scsw->tm.x == 1);
}
/**
* scsw_key - return scsw key field
* @scsw: pointer to scsw
*
* Return the value of the key field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.key;
else
return scsw->cmd.key;
}
/**
* scsw_eswf - return scsw eswf field
* @scsw: pointer to scsw
*
* Return the value of the eswf field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.eswf;
else
return scsw->cmd.eswf;
}
/**
* scsw_cc - return scsw cc field
* @scsw: pointer to scsw
*
* Return the value of the cc field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cc;
else
return scsw->cmd.cc;
}
/**
* scsw_ectl - return scsw ectl field
* @scsw: pointer to scsw
*
* Return the value of the ectl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.ectl;
else
return scsw->cmd.ectl;
}
/**
* scsw_pno - return scsw pno field
* @scsw: pointer to scsw
*
* Return the value of the pno field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.pno;
else
return scsw->cmd.pno;
}
/**
* scsw_fctl - return scsw fctl field
* @scsw: pointer to scsw
*
* Return the value of the fctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.fctl;
else
return scsw->cmd.fctl;
}
/**
* scsw_actl - return scsw actl field
* @scsw: pointer to scsw
*
* Return the value of the actl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.actl;
else
return scsw->cmd.actl;
}
/**
* scsw_stctl - return scsw stctl field
* @scsw: pointer to scsw
*
* Return the value of the stctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.stctl;
else
return scsw->cmd.stctl;
}
/**
* scsw_dstat - return scsw dstat field
* @scsw: pointer to scsw
*
* Return the value of the dstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.dstat;
else
return scsw->cmd.dstat;
}
/**
* scsw_cstat - return scsw cstat field
* @scsw: pointer to scsw
*
* Return the value of the cstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
static inline u32 scsw_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cstat;
else
return scsw->cmd.cstat;
}
/**
* scsw_cmd_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_key(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_sctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
/**
* scsw_cmd_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
/**
* scsw_cmd_is_valid_fmt - check fmt field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fmt field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_pfch - check pfch field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pfch field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_isic - check isic field validity
* @scsw: pointer to scsw
*
* Return non-zero if the isic field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_alcc - check alcc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the alcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_ssi - check ssi field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ssi field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_cmd_is_valid_zcc - check zcc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the zcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
}
/**
* scsw_cmd_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
}
/**
* scsw_cmd_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
{
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
}
/**
* scsw_cmd_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_cmd_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_cmd_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_cmd_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
/**
* scsw_cmd_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
/**
* scsw_tm_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_key(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
}
/**
* scsw_tm_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
/**
* scsw_tm_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_cc(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
/**
* scsw_tm_is_valid_fmt - check fmt field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fmt field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
{
return 1;
}
/**
* scsw_tm_is_valid_x - check x field validity
* @scsw: pointer to scsw
*
* Return non-zero if the x field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_x(union scsw *scsw)
{
return 1;
}
/**
* scsw_tm_is_valid_q - check q field validity
* @scsw: pointer to scsw
*
* Return non-zero if the q field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_q(union scsw *scsw)
{
return 1;
}
/**
* scsw_tm_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
}
/**
* scsw_tm_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_pno(union scsw *scsw)
{
return (scsw->tm.fctl != 0) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
}
/**
* scsw_tm_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_tm_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_tm_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
/**
* scsw_tm_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
/**
* scsw_tm_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
/**
* scsw_tm_is_valid_fcxs - check fcxs field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fcxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
{
return 1;
}
/**
* scsw_tm_is_valid_schxs - check schxs field validity
* @scsw: pointer to scsw
*
* Return non-zero if the schxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
{
return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_PROT_CHECK |
SCHN_STAT_CHN_DATA_CHK));
}
/**
* scsw_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_actl(scsw);
else
return scsw_cmd_is_valid_actl(scsw);
}
/**
* scsw_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cc(scsw);
else
return scsw_cmd_is_valid_cc(scsw);
}
/**
* scsw_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cstat(scsw);
else
return scsw_cmd_is_valid_cstat(scsw);
}
/**
* scsw_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_dstat(scsw);
else
return scsw_cmd_is_valid_dstat(scsw);
}
/**
* scsw_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_ectl(scsw);
else
return scsw_cmd_is_valid_ectl(scsw);
}
/**
* scsw_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_eswf(scsw);
else
return scsw_cmd_is_valid_eswf(scsw);
}
/**
* scsw_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_fctl(scsw);
else
return scsw_cmd_is_valid_fctl(scsw);
}
/**
* scsw_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_key(scsw);
else
return scsw_cmd_is_valid_key(scsw);
}
/**
* scsw_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_pno(scsw);
else
return scsw_cmd_is_valid_pno(scsw);
}
/**
* scsw_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
static inline int scsw_is_valid_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_stctl(scsw);
else
return scsw_cmd_is_valid_stctl(scsw);
}
/**
* scsw_cmd_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the command mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
static inline int scsw_cmd_is_solicited(union scsw *scsw)
{
return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
/**
* scsw_tm_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the transport mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
static inline int scsw_tm_is_solicited(union scsw *scsw)
{
return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
/**
* scsw_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the transport or command mode scsw indicates that the
* associated status condition is solicited, zero if it is unsolicited.
*/
static inline int scsw_is_solicited(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_solicited(scsw);
else
return scsw_cmd_is_solicited(scsw);
}
#endif /* _ASM_S390_SCSW_H_ */

View file

@ -0,0 +1,16 @@
#ifndef _ASM_S390_SECCOMP_H
#define _ASM_S390_SECCOMP_H
#include <linux/unistd.h>
#define __NR_seccomp_read __NR_read
#define __NR_seccomp_write __NR_write
#define __NR_seccomp_exit __NR_exit
#define __NR_seccomp_sigreturn __NR_sigreturn
#define __NR_seccomp_read_32 __NR_read
#define __NR_seccomp_write_32 __NR_write
#define __NR_seccomp_exit_32 __NR_exit
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
#endif /* _ASM_S390_SECCOMP_H */

Some files were not shown because too many files have changed in this diff Show more