mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-29 07:18:51 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
10
arch/s390/lib/Makefile
Normal file
10
arch/s390/lib/Makefile
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#
|
||||
# Makefile for s390-specific library files..
|
||||
#
|
||||
|
||||
lib-y += delay.o string.o uaccess.o find.o
|
||||
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
|
||||
obj-$(CONFIG_64BIT) += mem64.o
|
||||
lib-$(CONFIG_SMP) += spinlock.o
|
||||
lib-$(CONFIG_KPROBES) += probes.o
|
||||
lib-$(CONFIG_UPROBES) += probes.o
|
||||
129
arch/s390/lib/delay.c
Normal file
129
arch/s390/lib/delay.c
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Precise Delay Loops for S390
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2008
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/vtimer.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
void __delay(unsigned long loops)
|
||||
{
|
||||
/*
|
||||
* To end the bloody studid and useless discussion about the
|
||||
* BogoMips number I took the liberty to define the __delay
|
||||
* function in a way that that resulting BogoMips number will
|
||||
* yield the megahertz number of the cpu. The important function
|
||||
* is udelay and that is done using the tod clock. -- martin.
|
||||
*/
|
||||
asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
|
||||
}
|
||||
|
||||
static void __udelay_disabled(unsigned long long usecs)
|
||||
{
|
||||
unsigned long cr0, cr6, new;
|
||||
u64 clock_saved, end;
|
||||
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
clock_saved = local_tick_disable();
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_store(cr6, 6, 6);
|
||||
new = (cr0 & 0xffff00e0) | 0x00000800;
|
||||
__ctl_load(new , 0, 0);
|
||||
new = 0;
|
||||
__ctl_load(new, 6, 6);
|
||||
lockdep_off();
|
||||
do {
|
||||
set_clock_comparator(end);
|
||||
enabled_wait();
|
||||
} while (get_tod_clock_fast() < end);
|
||||
lockdep_on();
|
||||
__ctl_load(cr0, 0, 0);
|
||||
__ctl_load(cr6, 6, 6);
|
||||
local_tick_enable(clock_saved);
|
||||
}
|
||||
|
||||
static void __udelay_enabled(unsigned long long usecs)
|
||||
{
|
||||
u64 clock_saved, end;
|
||||
|
||||
end = get_tod_clock_fast() + (usecs << 12);
|
||||
do {
|
||||
clock_saved = 0;
|
||||
if (end < S390_lowcore.clock_comparator) {
|
||||
clock_saved = local_tick_disable();
|
||||
set_clock_comparator(end);
|
||||
}
|
||||
enabled_wait();
|
||||
if (clock_saved)
|
||||
local_tick_enable(clock_saved);
|
||||
} while (get_tod_clock_fast() < end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Waits for 'usecs' microseconds using the TOD clock comparator.
|
||||
*/
|
||||
void __udelay(unsigned long long usecs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
if (in_irq()) {
|
||||
__udelay_disabled(usecs);
|
||||
goto out;
|
||||
}
|
||||
if (in_softirq()) {
|
||||
if (raw_irqs_disabled_flags(flags))
|
||||
__udelay_disabled(usecs);
|
||||
else
|
||||
__udelay_enabled(usecs);
|
||||
goto out;
|
||||
}
|
||||
if (raw_irqs_disabled_flags(flags)) {
|
||||
local_bh_disable();
|
||||
__udelay_disabled(usecs);
|
||||
_local_bh_enable();
|
||||
goto out;
|
||||
}
|
||||
__udelay_enabled(usecs);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__udelay);
|
||||
|
||||
/*
|
||||
* Simple udelay variant. To be used on startup and reboot
|
||||
* when the interrupt handler isn't working.
|
||||
*/
|
||||
void udelay_simple(unsigned long long usecs)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
end = get_tod_clock_fast() + (usecs << 12);
|
||||
while (get_tod_clock_fast() < end)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
void __ndelay(unsigned long long nsecs)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
nsecs <<= 9;
|
||||
do_div(nsecs, 125);
|
||||
end = get_tod_clock_fast() + nsecs;
|
||||
if (nsecs & ~0xfffUL)
|
||||
__udelay(nsecs >> 12);
|
||||
while (get_tod_clock_fast() < end)
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(__ndelay);
|
||||
147
arch/s390/lib/div64.c
Normal file
147
arch/s390/lib/div64.c
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* __div64_32 implementation for 31 bit.
|
||||
*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#ifdef CONFIG_MARCH_G5
|
||||
|
||||
/*
|
||||
* Function to divide an unsigned 64 bit integer by an unsigned
|
||||
* 31 bit integer using signed 64/32 bit division.
|
||||
*/
|
||||
static uint32_t __div64_31(uint64_t *n, uint32_t base)
|
||||
{
|
||||
register uint32_t reg2 asm("2");
|
||||
register uint32_t reg3 asm("3");
|
||||
uint32_t *words = (uint32_t *) n;
|
||||
uint32_t tmp;
|
||||
|
||||
/* Special case base==1, remainder = 0, quotient = n */
|
||||
if (base == 1)
|
||||
return 0;
|
||||
/*
|
||||
* Special case base==0 will cause a fixed point divide exception
|
||||
* on the dr instruction and may not happen anyway. For the
|
||||
* following calculation we can assume base > 1. The first
|
||||
* signed 64 / 32 bit division with an upper half of 0 will
|
||||
* give the correct upper half of the 64 bit quotient.
|
||||
*/
|
||||
reg2 = 0UL;
|
||||
reg3 = words[0];
|
||||
asm volatile(
|
||||
" dr %0,%2\n"
|
||||
: "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
|
||||
words[0] = reg3;
|
||||
reg3 = words[1];
|
||||
/*
|
||||
* To get the lower half of the 64 bit quotient and the 32 bit
|
||||
* remainder we have to use a little trick. Since we only have
|
||||
* a signed division the quotient can get too big. To avoid this
|
||||
* the 64 bit dividend is halved, then the signed division will
|
||||
* work. Afterwards the quotient and the remainder are doubled.
|
||||
* If the last bit of the dividend has been one the remainder
|
||||
* is increased by one then checked against the base. If the
|
||||
* remainder has overflown subtract base and increase the
|
||||
* quotient. Simple, no ?
|
||||
*/
|
||||
asm volatile(
|
||||
" nr %2,%1\n"
|
||||
" srdl %0,1\n"
|
||||
" dr %0,%3\n"
|
||||
" alr %0,%0\n"
|
||||
" alr %1,%1\n"
|
||||
" alr %0,%2\n"
|
||||
" clr %0,%3\n"
|
||||
" jl 0f\n"
|
||||
" slr %0,%3\n"
|
||||
" ahi %1,1\n"
|
||||
"0:\n"
|
||||
: "+d" (reg2), "+d" (reg3), "=d" (tmp)
|
||||
: "d" (base), "2" (1UL) : "cc" );
|
||||
words[1] = reg3;
|
||||
return reg2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to divide an unsigned 64 bit integer by an unsigned
|
||||
* 32 bit integer using the unsigned 64/31 bit division.
|
||||
*/
|
||||
uint32_t __div64_32(uint64_t *n, uint32_t base)
|
||||
{
|
||||
uint32_t r;
|
||||
|
||||
/*
|
||||
* If the most significant bit of base is set, divide n by
|
||||
* (base/2). That allows to use 64/31 bit division and gives a
|
||||
* good approximation of the result: n = (base/2)*q + r. The
|
||||
* result needs to be corrected with two simple transformations.
|
||||
* If base is already < 2^31-1 __div64_31 can be used directly.
|
||||
*/
|
||||
r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
|
||||
if ((signed) base < 0) {
|
||||
uint64_t q = *n;
|
||||
/*
|
||||
* First transformation:
|
||||
* n = (base/2)*q + r
|
||||
* = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
|
||||
* Since r < (base/2), r + (base/2) < base.
|
||||
* With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
|
||||
* n = ((base/2)*2)*q1 + r1 with r1 < base.
|
||||
*/
|
||||
if (q & 1)
|
||||
r += base/2;
|
||||
q >>= 1;
|
||||
/*
|
||||
* Second transformation. ((base/2)*2) could have lost the
|
||||
* last bit.
|
||||
* n = ((base/2)*2)*q1 + r1
|
||||
* = base*q1 - ((base&1) ? q1 : 0) + r1
|
||||
*/
|
||||
if (base & 1) {
|
||||
int64_t rx = r - q;
|
||||
/*
|
||||
* base is >= 2^31. The worst case for the while
|
||||
* loop is n=2^64-1 base=2^31+1. That gives a
|
||||
* maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
|
||||
* base >= 2^31 the loop is finished after a maximum
|
||||
* of three iterations.
|
||||
*/
|
||||
while (rx < 0) {
|
||||
rx += base;
|
||||
q--;
|
||||
}
|
||||
r = rx;
|
||||
}
|
||||
*n = q;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#else /* MARCH_G5 */
|
||||
|
||||
uint32_t __div64_32(uint64_t *n, uint32_t base)
|
||||
{
|
||||
register uint32_t reg2 asm("2");
|
||||
register uint32_t reg3 asm("3");
|
||||
uint32_t *words = (uint32_t *) n;
|
||||
|
||||
reg2 = 0UL;
|
||||
reg3 = words[0];
|
||||
asm volatile(
|
||||
" dlr %0,%2\n"
|
||||
: "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
|
||||
words[0] = reg3;
|
||||
reg3 = words[1];
|
||||
asm volatile(
|
||||
" dlr %0,%2\n"
|
||||
: "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
|
||||
words[1] = reg3;
|
||||
return reg2;
|
||||
}
|
||||
|
||||
#endif /* MARCH_G5 */
|
||||
77
arch/s390/lib/find.c
Normal file
77
arch/s390/lib/find.c
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* MSB0 numbered special bitops handling.
|
||||
*
|
||||
* On s390x the bits are numbered:
|
||||
* |0..............63|64............127|128...........191|192...........255|
|
||||
* and on s390:
|
||||
* |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
|
||||
*
|
||||
* The reason for this bit numbering is the fact that the hardware sets bits
|
||||
* in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
|
||||
* from the 'wrong end'.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
|
||||
if (!tmp) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
|
||||
}
|
||||
EXPORT_SYMBOL(find_first_bit_inv);
|
||||
|
||||
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + (offset / BITS_PER_LONG);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp &= (~0UL >> offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
found_first:
|
||||
tmp &= (~0UL << (BITS_PER_LONG - size));
|
||||
if (!tmp) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit_inv);
|
||||
92
arch/s390/lib/mem32.S
Normal file
92
arch/s390/lib/mem32.S
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* String handling functions.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* memset implementation
|
||||
*
|
||||
* This code corresponds to the C construct below. We do distinguish
|
||||
* between clearing (c == 0) and setting a memory array (c != 0) simply
|
||||
* because nearly all memset invocations in the kernel clear memory and
|
||||
* the xc instruction is preferred in such cases.
|
||||
*
|
||||
* void *memset(void *s, int c, size_t n)
|
||||
* {
|
||||
* if (likely(c == 0))
|
||||
* return __builtin_memset(s, 0, n);
|
||||
* return __builtin_memset(s, c, n);
|
||||
* }
|
||||
*/
|
||||
ENTRY(memset)
|
||||
basr %r5,%r0
|
||||
.Lmemset_base:
|
||||
ltr %r4,%r4
|
||||
bzr %r14
|
||||
ltr %r3,%r3
|
||||
jnz .Lmemset_fill
|
||||
ahi %r4,-1
|
||||
lr %r3,%r4
|
||||
srl %r3,8
|
||||
ltr %r3,%r3
|
||||
lr %r1,%r2
|
||||
je .Lmemset_clear_rest
|
||||
.Lmemset_clear_loop:
|
||||
xc 0(256,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brct %r3,.Lmemset_clear_loop
|
||||
.Lmemset_clear_rest:
|
||||
ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
|
||||
br %r14
|
||||
.Lmemset_fill:
|
||||
stc %r3,0(%r2)
|
||||
chi %r4,1
|
||||
lr %r1,%r2
|
||||
ber %r14
|
||||
ahi %r4,-2
|
||||
lr %r3,%r4
|
||||
srl %r3,8
|
||||
ltr %r3,%r3
|
||||
je .Lmemset_fill_rest
|
||||
.Lmemset_fill_loop:
|
||||
mvc 1(256,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brct %r3,.Lmemset_fill_loop
|
||||
.Lmemset_fill_rest:
|
||||
ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
|
||||
br %r14
|
||||
.Lmemset_xc:
|
||||
xc 0(1,%r1),0(%r1)
|
||||
.Lmemset_mvc:
|
||||
mvc 1(1,%r1),0(%r1)
|
||||
|
||||
/*
|
||||
* memcpy implementation
|
||||
*
|
||||
* void *memcpy(void *dest, const void *src, size_t n)
|
||||
*/
|
||||
ENTRY(memcpy)
|
||||
basr %r5,%r0
|
||||
.Lmemcpy_base:
|
||||
ltr %r4,%r4
|
||||
bzr %r14
|
||||
ahi %r4,-1
|
||||
lr %r0,%r4
|
||||
srl %r0,8
|
||||
ltr %r0,%r0
|
||||
lr %r1,%r2
|
||||
jnz .Lmemcpy_loop
|
||||
.Lmemcpy_rest:
|
||||
ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
|
||||
br %r14
|
||||
.Lmemcpy_loop:
|
||||
mvc 0(256,%r1),0(%r3)
|
||||
la %r1,256(%r1)
|
||||
la %r3,256(%r3)
|
||||
brct %r0,.Lmemcpy_loop
|
||||
j .Lmemcpy_rest
|
||||
.Lmemcpy_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
88
arch/s390/lib/mem64.S
Normal file
88
arch/s390/lib/mem64.S
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* String handling functions.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* memset implementation
|
||||
*
|
||||
* This code corresponds to the C construct below. We do distinguish
|
||||
* between clearing (c == 0) and setting a memory array (c != 0) simply
|
||||
* because nearly all memset invocations in the kernel clear memory and
|
||||
* the xc instruction is preferred in such cases.
|
||||
*
|
||||
* void *memset(void *s, int c, size_t n)
|
||||
* {
|
||||
* if (likely(c == 0))
|
||||
* return __builtin_memset(s, 0, n);
|
||||
* return __builtin_memset(s, c, n);
|
||||
* }
|
||||
*/
|
||||
ENTRY(memset)
|
||||
ltgr %r4,%r4
|
||||
bzr %r14
|
||||
ltgr %r3,%r3
|
||||
jnz .Lmemset_fill
|
||||
aghi %r4,-1
|
||||
srlg %r3,%r4,8
|
||||
ltgr %r3,%r3
|
||||
lgr %r1,%r2
|
||||
jz .Lmemset_clear_rest
|
||||
.Lmemset_clear_loop:
|
||||
xc 0(256,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brctg %r3,.Lmemset_clear_loop
|
||||
.Lmemset_clear_rest:
|
||||
larl %r3,.Lmemset_xc
|
||||
ex %r4,0(%r3)
|
||||
br %r14
|
||||
.Lmemset_fill:
|
||||
stc %r3,0(%r2)
|
||||
cghi %r4,1
|
||||
lgr %r1,%r2
|
||||
ber %r14
|
||||
aghi %r4,-2
|
||||
srlg %r3,%r4,8
|
||||
ltgr %r3,%r3
|
||||
jz .Lmemset_fill_rest
|
||||
.Lmemset_fill_loop:
|
||||
mvc 1(256,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brctg %r3,.Lmemset_fill_loop
|
||||
.Lmemset_fill_rest:
|
||||
larl %r3,.Lmemset_mvc
|
||||
ex %r4,0(%r3)
|
||||
br %r14
|
||||
.Lmemset_xc:
|
||||
xc 0(1,%r1),0(%r1)
|
||||
.Lmemset_mvc:
|
||||
mvc 1(1,%r1),0(%r1)
|
||||
|
||||
/*
|
||||
* memcpy implementation
|
||||
*
|
||||
* void *memcpy(void *dest, const void *src, size_t n)
|
||||
*/
|
||||
ENTRY(memcpy)
|
||||
ltgr %r4,%r4
|
||||
bzr %r14
|
||||
aghi %r4,-1
|
||||
srlg %r5,%r4,8
|
||||
ltgr %r5,%r5
|
||||
lgr %r1,%r2
|
||||
jnz .Lmemcpy_loop
|
||||
.Lmemcpy_rest:
|
||||
larl %r5,.Lmemcpy_mvc
|
||||
ex %r4,0(%r5)
|
||||
br %r14
|
||||
.Lmemcpy_loop:
|
||||
mvc 0(256,%r1),0(%r3)
|
||||
la %r1,256(%r1)
|
||||
la %r3,256(%r3)
|
||||
brctg %r5,.Lmemcpy_loop
|
||||
j .Lmemcpy_rest
|
||||
.Lmemcpy_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
159
arch/s390/lib/probes.c
Normal file
159
arch/s390/lib/probes.c
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Common helper functions for kprobes and uprobes
|
||||
*
|
||||
* Copyright IBM Corp. 2014
|
||||
*/
|
||||
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/dis.h>
|
||||
|
||||
int probe_is_prohibited_opcode(u16 *insn)
|
||||
{
|
||||
if (!is_known_insn((unsigned char *)insn))
|
||||
return -EINVAL;
|
||||
switch (insn[0] >> 8) {
|
||||
case 0x0c: /* bassm */
|
||||
case 0x0b: /* bsm */
|
||||
case 0x83: /* diag */
|
||||
case 0x44: /* ex */
|
||||
case 0xac: /* stnsm */
|
||||
case 0xad: /* stosm */
|
||||
return -EINVAL;
|
||||
case 0xc6:
|
||||
switch (insn[0] & 0x0f) {
|
||||
case 0x00: /* exrl */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
switch (insn[0]) {
|
||||
case 0x0101: /* pr */
|
||||
case 0xb25a: /* bsa */
|
||||
case 0xb240: /* bakr */
|
||||
case 0xb258: /* bsg */
|
||||
case 0xb218: /* pc */
|
||||
case 0xb228: /* pt */
|
||||
case 0xb98d: /* epsw */
|
||||
case 0xe560: /* tbegin */
|
||||
case 0xe561: /* tbeginc */
|
||||
case 0xb2f8: /* tend */
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int probe_get_fixup_type(u16 *insn)
|
||||
{
|
||||
/* default fixup method */
|
||||
int fixup = FIXUP_PSW_NORMAL;
|
||||
|
||||
switch (insn[0] >> 8) {
|
||||
case 0x05: /* balr */
|
||||
case 0x0d: /* basr */
|
||||
fixup = FIXUP_RETURN_REGISTER;
|
||||
/* if r2 = 0, no branch will be taken */
|
||||
if ((insn[0] & 0x0f) == 0)
|
||||
fixup |= FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x06: /* bctr */
|
||||
case 0x07: /* bcr */
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x45: /* bal */
|
||||
case 0x4d: /* bas */
|
||||
fixup = FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0x47: /* bc */
|
||||
case 0x46: /* bct */
|
||||
case 0x86: /* bxh */
|
||||
case 0x87: /* bxle */
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x82: /* lpsw */
|
||||
fixup = FIXUP_NOT_REQUIRED;
|
||||
break;
|
||||
case 0xb2: /* lpswe */
|
||||
if ((insn[0] & 0xff) == 0xb2)
|
||||
fixup = FIXUP_NOT_REQUIRED;
|
||||
break;
|
||||
case 0xa7: /* bras */
|
||||
if ((insn[0] & 0x0f) == 0x05)
|
||||
fixup |= FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0xc0:
|
||||
if ((insn[0] & 0x0f) == 0x05) /* brasl */
|
||||
fixup |= FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0xeb:
|
||||
switch (insn[2] & 0xff) {
|
||||
case 0x44: /* bxhg */
|
||||
case 0x45: /* bxleg */
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 0xe3: /* bctg */
|
||||
if ((insn[2] & 0xff) == 0x46)
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0xec:
|
||||
switch (insn[2] & 0xff) {
|
||||
case 0xe5: /* clgrb */
|
||||
case 0xe6: /* cgrb */
|
||||
case 0xf6: /* crb */
|
||||
case 0xf7: /* clrb */
|
||||
case 0xfc: /* cgib */
|
||||
case 0xfd: /* cglib */
|
||||
case 0xfe: /* cib */
|
||||
case 0xff: /* clib */
|
||||
fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return fixup;
|
||||
}
|
||||
|
||||
int probe_is_insn_relative_long(u16 *insn)
|
||||
{
|
||||
/* Check if we have a RIL-b or RIL-c format instruction which
|
||||
* we need to modify in order to avoid instruction emulation. */
|
||||
switch (insn[0] >> 8) {
|
||||
case 0xc0:
|
||||
if ((insn[0] & 0x0f) == 0x00) /* larl */
|
||||
return true;
|
||||
break;
|
||||
case 0xc4:
|
||||
switch (insn[0] & 0x0f) {
|
||||
case 0x02: /* llhrl */
|
||||
case 0x04: /* lghrl */
|
||||
case 0x05: /* lhrl */
|
||||
case 0x06: /* llghrl */
|
||||
case 0x07: /* sthrl */
|
||||
case 0x08: /* lgrl */
|
||||
case 0x0b: /* stgrl */
|
||||
case 0x0c: /* lgfrl */
|
||||
case 0x0d: /* lrl */
|
||||
case 0x0e: /* llgfrl */
|
||||
case 0x0f: /* strl */
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
case 0xc6:
|
||||
switch (insn[0] & 0x0f) {
|
||||
case 0x02: /* pfdrl */
|
||||
case 0x04: /* cghrl */
|
||||
case 0x05: /* chrl */
|
||||
case 0x06: /* clghrl */
|
||||
case 0x07: /* clhrl */
|
||||
case 0x08: /* cgrl */
|
||||
case 0x0a: /* clgrl */
|
||||
case 0x0c: /* cgfrl */
|
||||
case 0x0d: /* crl */
|
||||
case 0x0e: /* clgfrl */
|
||||
case 0x0f: /* clrl */
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
78
arch/s390/lib/qrnnd.S
Normal file
78
arch/s390/lib/qrnnd.S
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# S/390 __udiv_qrnnd
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
# r2 : &__r
|
||||
# r3 : upper half of 64 bit word n
|
||||
# r4 : lower half of 64 bit word n
|
||||
# r5 : divisor d
|
||||
# the reminder r of the division is to be stored to &__r and
|
||||
# the quotient q is to be returned
|
||||
|
||||
.text
|
||||
ENTRY(__udiv_qrnnd)
|
||||
st %r2,24(%r15) # store pointer to reminder for later
|
||||
lr %r0,%r3 # reload n
|
||||
lr %r1,%r4
|
||||
ltr %r2,%r5 # reload and test divisor
|
||||
jp 5f
|
||||
# divisor >= 0x80000000
|
||||
srdl %r0,2 # n/4
|
||||
srl %r2,1 # d/2
|
||||
slr %r1,%r2 # special case if last bit of d is set
|
||||
brc 3,0f # (n/4) div (n/2) can overflow by 1
|
||||
ahi %r0,-1 # trick: subtract n/2, then divide
|
||||
0: dr %r0,%r2 # signed division
|
||||
ahi %r1,1 # trick part 2: add 1 to the quotient
|
||||
# now (n >> 2) = (d >> 1) * %r1 + %r0
|
||||
lhi %r3,1
|
||||
nr %r3,%r1 # test last bit of q
|
||||
jz 1f
|
||||
alr %r0,%r2 # add (d>>1) to r
|
||||
1: srl %r1,1 # q >>= 1
|
||||
# now (n >> 2) = (d&-2) * %r1 + %r0
|
||||
lhi %r3,1
|
||||
nr %r3,%r5 # test last bit of d
|
||||
jz 2f
|
||||
slr %r0,%r1 # r -= q
|
||||
brc 3,2f # borrow ?
|
||||
alr %r0,%r5 # r += d
|
||||
ahi %r1,-1
|
||||
2: # now (n >> 2) = d * %r1 + %r0
|
||||
alr %r1,%r1 # q <<= 1
|
||||
alr %r0,%r0 # r <<= 1
|
||||
brc 12,3f # overflow on r ?
|
||||
slr %r0,%r5 # r -= d
|
||||
ahi %r1,1 # q += 1
|
||||
3: lhi %r3,2
|
||||
nr %r3,%r4 # test next to last bit of n
|
||||
jz 4f
|
||||
ahi %r0,1 # r += 1
|
||||
4: clr %r0,%r5 # r >= d ?
|
||||
jl 6f
|
||||
slr %r0,%r5 # r -= d
|
||||
ahi %r1,1 # q += 1
|
||||
# now (n >> 1) = d * %r1 + %r0
|
||||
j 6f
|
||||
5: # divisor < 0x80000000
|
||||
srdl %r0,1
|
||||
dr %r0,%r2 # signed division
|
||||
# now (n >> 1) = d * %r1 + %r0
|
||||
6: alr %r1,%r1 # q <<= 1
|
||||
alr %r0,%r0 # r <<= 1
|
||||
brc 12,7f # overflow on r ?
|
||||
slr %r0,%r5 # r -= d
|
||||
ahi %r1,1 # q += 1
|
||||
7: lhi %r3,1
|
||||
nr %r3,%r4 # isolate last bit of n
|
||||
alr %r0,%r3 # r += (n & 1)
|
||||
clr %r0,%r5 # r >= d ?
|
||||
jl 8f
|
||||
slr %r0,%r5 # r -= d
|
||||
ahi %r1,1 # q += 1
|
||||
8: # now n = d * %r1 + %r0
|
||||
l %r2,24(%r15)
|
||||
st %r0,0(%r2)
|
||||
lr %r2,%r1
|
||||
br %r14
|
||||
.end __udiv_qrnnd
|
||||
234
arch/s390/lib/spinlock.c
Normal file
234
arch/s390/lib/spinlock.c
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
* Out of line spinlock code.
|
||||
*
|
||||
* Copyright IBM Corp. 2004, 2006
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
int spin_retry = 1000;
|
||||
|
||||
/**
|
||||
* spin_retry= parameter
|
||||
*/
|
||||
static int __init spin_retry_setup(char *str)
|
||||
{
|
||||
spin_retry = simple_strtoul(str, &str, 0);
|
||||
return 1;
|
||||
}
|
||||
__setup("spin_retry=", spin_retry_setup);
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count;
|
||||
|
||||
while (1) {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (!smp_vcpu_scheduled(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
continue;
|
||||
}
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
continue;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU if the lock is still unavailable.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR)
|
||||
smp_yield_cpu(~owner);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait);
|
||||
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count;
|
||||
|
||||
local_irq_restore(flags);
|
||||
while (1) {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (!smp_vcpu_scheduled(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
continue;
|
||||
}
|
||||
/* Loop for a while on the lock value. */
|
||||
count = spin_retry;
|
||||
do {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
continue;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU if the lock is still unavailable.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR)
|
||||
smp_yield_cpu(~owner);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
int count;
|
||||
|
||||
for (count = spin_retry; count > 0; count--)
|
||||
if (arch_spin_trylock_once(lp))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||
|
||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
|
||||
#endif
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old < 0)
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock_wait);
|
||||
|
||||
int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int count = spin_retry;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if ((int) old < 0)
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
{
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
smp_rmb();
|
||||
if ((int) old >= 0) {
|
||||
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
old = prev;
|
||||
}
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int owner, old, prev;
|
||||
int count = spin_retry;
|
||||
|
||||
prev = 0x80000000;
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && !smp_vcpu_scheduled(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old >= 0 &&
|
||||
_raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
|
||||
prev = old;
|
||||
else
|
||||
smp_rmb();
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int count = spin_retry;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if (old)
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_trylock_retry);
|
||||
|
||||
void arch_lock_relax(unsigned int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_lock_relax);
|
||||
342
arch/s390/lib/string.c
Normal file
342
arch/s390/lib/string.c
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
/*
|
||||
* Optimized string functions
|
||||
*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 2004
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*/
|
||||
|
||||
#define IN_ARCH_STRING_C 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
* Helper functions to find the end of a string
|
||||
*/
|
||||
static inline char *__strend(const char *s)
|
||||
{
|
||||
register unsigned long r0 asm("0") = 0;
|
||||
|
||||
asm volatile ("0: srst %0,%1\n"
|
||||
" jo 0b"
|
||||
: "+d" (r0), "+a" (s) : : "cc" );
|
||||
return (char *) r0;
|
||||
}
|
||||
|
||||
static inline char *__strnend(const char *s, size_t n)
|
||||
{
|
||||
register unsigned long r0 asm("0") = 0;
|
||||
const char *p = s + n;
|
||||
|
||||
asm volatile ("0: srst %0,%1\n"
|
||||
" jo 0b"
|
||||
: "+d" (p), "+a" (s) : "d" (r0) : "cc" );
|
||||
return (char *) p;
|
||||
}
|
||||
|
||||
/**
|
||||
* strlen - Find the length of a string
|
||||
* @s: The string to be sized
|
||||
*
|
||||
* returns the length of @s
|
||||
*/
|
||||
size_t strlen(const char *s)
|
||||
{
|
||||
return __strend(s) - s;
|
||||
}
|
||||
EXPORT_SYMBOL(strlen);
|
||||
|
||||
/**
|
||||
* strnlen - Find the length of a length-limited string
|
||||
* @s: The string to be sized
|
||||
* @n: The maximum number of bytes to search
|
||||
*
|
||||
* returns the minimum of the length of @s and @n
|
||||
*/
|
||||
size_t strnlen(const char * s, size_t n)
|
||||
{
|
||||
return __strnend(s, n) - s;
|
||||
}
|
||||
EXPORT_SYMBOL(strnlen);
|
||||
|
||||
/**
|
||||
* strcpy - Copy a %NUL terminated string
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
*
|
||||
* returns a pointer to @dest
|
||||
*/
|
||||
char *strcpy(char *dest, const char *src)
|
||||
{
|
||||
register int r0 asm("0") = 0;
|
||||
char *ret = dest;
|
||||
|
||||
asm volatile ("0: mvst %0,%1\n"
|
||||
" jo 0b"
|
||||
: "+&a" (dest), "+&a" (src) : "d" (r0)
|
||||
: "cc", "memory" );
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
|
||||
/**
|
||||
* strlcpy - Copy a %NUL terminated string into a sized buffer
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @size: size of destination buffer
|
||||
*
|
||||
* Compatible with *BSD: the result is always a valid
|
||||
* NUL-terminated string that fits in the buffer (unless,
|
||||
* of course, the buffer size is zero). It does not pad
|
||||
* out the result like strncpy() does.
|
||||
*/
|
||||
size_t strlcpy(char *dest, const char *src, size_t size)
|
||||
{
|
||||
size_t ret = __strend(src) - src;
|
||||
|
||||
if (size) {
|
||||
size_t len = (ret >= size) ? size-1 : ret;
|
||||
dest[len] = '\0';
|
||||
memcpy(dest, src, len);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(strlcpy);
|
||||
|
||||
/**
|
||||
* strncpy - Copy a length-limited, %NUL-terminated string
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @n: The maximum number of bytes to copy
|
||||
*
|
||||
* The result is not %NUL-terminated if the source exceeds
|
||||
* @n bytes.
|
||||
*/
|
||||
char *strncpy(char *dest, const char *src, size_t n)
|
||||
{
|
||||
size_t len = __strnend(src, n) - src;
|
||||
memset(dest + len, 0, n - len);
|
||||
memcpy(dest, src, len);
|
||||
return dest;
|
||||
}
|
||||
EXPORT_SYMBOL(strncpy);
|
||||
|
||||
/**
|
||||
* strcat - Append one %NUL-terminated string to another
|
||||
* @dest: The string to be appended to
|
||||
* @src: The string to append to it
|
||||
*
|
||||
* returns a pointer to @dest
|
||||
*/
|
||||
char *strcat(char *dest, const char *src)
|
||||
{
|
||||
register int r0 asm("0") = 0;
|
||||
unsigned long dummy;
|
||||
char *ret = dest;
|
||||
|
||||
asm volatile ("0: srst %0,%1\n"
|
||||
" jo 0b\n"
|
||||
"1: mvst %0,%2\n"
|
||||
" jo 1b"
|
||||
: "=&a" (dummy), "+a" (dest), "+a" (src)
|
||||
: "d" (r0), "0" (0UL) : "cc", "memory" );
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(strcat);
|
||||
|
||||
/**
|
||||
* strlcat - Append a length-limited, %NUL-terminated string to another
|
||||
* @dest: The string to be appended to
|
||||
* @src: The string to append to it
|
||||
* @n: The size of the destination buffer.
|
||||
*/
|
||||
size_t strlcat(char *dest, const char *src, size_t n)
|
||||
{
|
||||
size_t dsize = __strend(dest) - dest;
|
||||
size_t len = __strend(src) - src;
|
||||
size_t res = dsize + len;
|
||||
|
||||
if (dsize < n) {
|
||||
dest += dsize;
|
||||
n -= dsize;
|
||||
if (len >= n)
|
||||
len = n - 1;
|
||||
dest[len] = '\0';
|
||||
memcpy(dest, src, len);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strlcat);
|
||||
|
||||
/**
|
||||
* strncat - Append a length-limited, %NUL-terminated string to another
|
||||
* @dest: The string to be appended to
|
||||
* @src: The string to append to it
|
||||
* @n: The maximum numbers of bytes to copy
|
||||
*
|
||||
* returns a pointer to @dest
|
||||
*
|
||||
* Note that in contrast to strncpy, strncat ensures the result is
|
||||
* terminated.
|
||||
*/
|
||||
char *strncat(char *dest, const char *src, size_t n)
|
||||
{
|
||||
size_t len = __strnend(src, n) - src;
|
||||
char *p = __strend(dest);
|
||||
|
||||
p[len] = '\0';
|
||||
memcpy(p, src, len);
|
||||
return dest;
|
||||
}
|
||||
EXPORT_SYMBOL(strncat);
|
||||
|
||||
/**
|
||||
* strcmp - Compare two strings
|
||||
* @cs: One string
|
||||
* @ct: Another string
|
||||
*
|
||||
* returns 0 if @cs and @ct are equal,
|
||||
* < 0 if @cs is less than @ct
|
||||
* > 0 if @cs is greater than @ct
|
||||
*/
|
||||
int strcmp(const char *cs, const char *ct)
|
||||
{
|
||||
register int r0 asm("0") = 0;
|
||||
int ret = 0;
|
||||
|
||||
asm volatile ("0: clst %2,%3\n"
|
||||
" jo 0b\n"
|
||||
" je 1f\n"
|
||||
" ic %0,0(%2)\n"
|
||||
" ic %1,0(%3)\n"
|
||||
" sr %0,%1\n"
|
||||
"1:"
|
||||
: "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
|
||||
: : "cc" );
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
|
||||
/**
|
||||
* strrchr - Find the last occurrence of a character in a string
|
||||
* @s: The string to be searched
|
||||
* @c: The character to search for
|
||||
*/
|
||||
char * strrchr(const char * s, int c)
|
||||
{
|
||||
size_t len = __strend(s) - s;
|
||||
|
||||
if (len)
|
||||
do {
|
||||
if (s[len] == (char) c)
|
||||
return (char *) s + len;
|
||||
} while (--len > 0);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(strrchr);
|
||||
|
||||
/**
|
||||
* strstr - Find the first substring in a %NUL terminated string
|
||||
* @s1: The string to be searched
|
||||
* @s2: The string to search for
|
||||
*/
|
||||
char * strstr(const char * s1,const char * s2)
|
||||
{
|
||||
int l1, l2;
|
||||
|
||||
l2 = __strend(s2) - s2;
|
||||
if (!l2)
|
||||
return (char *) s1;
|
||||
l1 = __strend(s1) - s1;
|
||||
while (l1-- >= l2) {
|
||||
register unsigned long r2 asm("2") = (unsigned long) s1;
|
||||
register unsigned long r3 asm("3") = (unsigned long) l2;
|
||||
register unsigned long r4 asm("4") = (unsigned long) s2;
|
||||
register unsigned long r5 asm("5") = (unsigned long) l2;
|
||||
int cc;
|
||||
|
||||
asm volatile ("0: clcle %1,%3,0\n"
|
||||
" jo 0b\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=&d" (cc), "+a" (r2), "+a" (r3),
|
||||
"+a" (r4), "+a" (r5) : : "cc" );
|
||||
if (!cc)
|
||||
return (char *) s1;
|
||||
s1++;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(strstr);
|
||||
|
||||
/**
|
||||
* memchr - Find a character in an area of memory.
|
||||
* @s: The memory area
|
||||
* @c: The byte to search for
|
||||
* @n: The size of the area.
|
||||
*
|
||||
* returns the address of the first occurrence of @c, or %NULL
|
||||
* if @c is not found
|
||||
*/
|
||||
void *memchr(const void *s, int c, size_t n)
|
||||
{
|
||||
register int r0 asm("0") = (char) c;
|
||||
const void *ret = s + n;
|
||||
|
||||
asm volatile ("0: srst %0,%1\n"
|
||||
" jo 0b\n"
|
||||
" jl 1f\n"
|
||||
" la %0,0\n"
|
||||
"1:"
|
||||
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
|
||||
return (void *) ret;
|
||||
}
|
||||
EXPORT_SYMBOL(memchr);
|
||||
|
||||
/**
|
||||
* memcmp - Compare two areas of memory
|
||||
* @cs: One area of memory
|
||||
* @ct: Another area of memory
|
||||
* @count: The size of the area.
|
||||
*/
|
||||
int memcmp(const void *cs, const void *ct, size_t n)
|
||||
{
|
||||
register unsigned long r2 asm("2") = (unsigned long) cs;
|
||||
register unsigned long r3 asm("3") = (unsigned long) n;
|
||||
register unsigned long r4 asm("4") = (unsigned long) ct;
|
||||
register unsigned long r5 asm("5") = (unsigned long) n;
|
||||
int ret;
|
||||
|
||||
asm volatile ("0: clcle %1,%3,0\n"
|
||||
" jo 0b\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5)
|
||||
: : "cc" );
|
||||
if (ret)
|
||||
ret = *(char *) r2 - *(char *) r4;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(memcmp);
|
||||
|
||||
/**
|
||||
* memscan - Find a character in an area of memory.
|
||||
* @s: The memory area
|
||||
* @c: The byte to search for
|
||||
* @n: The size of the area.
|
||||
*
|
||||
* returns the address of the first occurrence of @c, or 1 byte past
|
||||
* the area if @c is not found
|
||||
*/
|
||||
void *memscan(void *s, int c, size_t n)
|
||||
{
|
||||
register int r0 asm("0") = (char) c;
|
||||
const void *ret = s + n;
|
||||
|
||||
asm volatile ("0: srst %0,%1\n"
|
||||
" jo 0b\n"
|
||||
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
|
||||
return (void *) ret;
|
||||
}
|
||||
EXPORT_SYMBOL(memscan);
|
||||
406
arch/s390/lib/uaccess.c
Normal file
406
arch/s390/lib/uaccess.c
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
/*
|
||||
* Standard user space access functions based on mvcp/mvcs and doing
|
||||
* interesting things in the secondary space mode.
|
||||
*
|
||||
* Copyright IBM Corp. 2006,2014
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define AHI "ahi"
|
||||
#define ALR "alr"
|
||||
#define CLR "clr"
|
||||
#define LHI "lhi"
|
||||
#define SLR "slr"
|
||||
#else
|
||||
#define AHI "aghi"
|
||||
#define ALR "algr"
|
||||
#define CLR "clgr"
|
||||
#define LHI "lghi"
|
||||
#define SLR "slgr"
|
||||
#endif
|
||||
|
||||
static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
|
||||
|
||||
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
|
||||
unsigned long size)
|
||||
{
|
||||
register unsigned long reg0 asm("0") = 0x81UL;
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
|
||||
"9: jz 7f\n"
|
||||
"1:"ALR" %0,%3\n"
|
||||
" "SLR" %1,%3\n"
|
||||
" "SLR" %2,%3\n"
|
||||
" j 0b\n"
|
||||
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
|
||||
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
|
||||
" "SLR" %4,%1\n"
|
||||
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 4f\n"
|
||||
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
|
||||
"10:"SLR" %0,%4\n"
|
||||
" "ALR" %2,%4\n"
|
||||
"4:"LHI" %4,-1\n"
|
||||
" "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
|
||||
" bras %3,6f\n" /* memset loop */
|
||||
" xc 0(1,%2),0(%2)\n"
|
||||
"5: xc 0(256,%2),0(%2)\n"
|
||||
" la %2,256(%2)\n"
|
||||
"6:"AHI" %4,-256\n"
|
||||
" jnm 5b\n"
|
||||
" ex %4,0(%3)\n"
|
||||
" j 8f\n"
|
||||
"7:"SLR" %0,%0\n"
|
||||
"8:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
|
||||
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
|
||||
: "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
load_kernel_asce();
|
||||
tmp1 = -256UL;
|
||||
asm volatile(
|
||||
" sacf 0\n"
|
||||
"0: mvcp 0(%0,%2),0(%1),%3\n"
|
||||
"10:jz 8f\n"
|
||||
"1:"ALR" %0,%3\n"
|
||||
" la %1,256(%1)\n"
|
||||
" la %2,256(%2)\n"
|
||||
"2: mvcp 0(%0,%2),0(%1),%3\n"
|
||||
"11:jnz 1b\n"
|
||||
" j 8f\n"
|
||||
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
|
||||
" "LHI" %3,-4096\n"
|
||||
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
|
||||
" "SLR" %4,%1\n"
|
||||
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"4: mvcp 0(%4,%2),0(%1),%3\n"
|
||||
"12:"SLR" %0,%4\n"
|
||||
" "ALR" %2,%4\n"
|
||||
"5:"LHI" %4,-1\n"
|
||||
" "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
|
||||
" bras %3,7f\n" /* memset loop */
|
||||
" xc 0(1,%2),0(%2)\n"
|
||||
"6: xc 0(256,%2),0(%2)\n"
|
||||
" la %2,256(%2)\n"
|
||||
"7:"AHI" %4,-256\n"
|
||||
" jnm 6b\n"
|
||||
" ex %4,0(%3)\n"
|
||||
" j 9f\n"
|
||||
"8:"SLR" %0,%0\n"
|
||||
"9: sacf 768\n"
|
||||
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
|
||||
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
|
||||
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
|
||||
: : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (static_key_false(&have_mvcos))
|
||||
return copy_from_user_mvcos(to, from, n);
|
||||
return copy_from_user_mvcp(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(__copy_from_user);
|
||||
|
||||
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
|
||||
unsigned long size)
|
||||
{
|
||||
register unsigned long reg0 asm("0") = 0x810000UL;
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
|
||||
"6: jz 4f\n"
|
||||
"1:"ALR" %0,%3\n"
|
||||
" "SLR" %1,%3\n"
|
||||
" "SLR" %2,%3\n"
|
||||
" j 0b\n"
|
||||
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
|
||||
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
|
||||
" "SLR" %4,%1\n"
|
||||
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
|
||||
"7:"SLR" %0,%4\n"
|
||||
" j 5f\n"
|
||||
"4:"SLR" %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
|
||||
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
|
||||
: "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
load_kernel_asce();
|
||||
tmp1 = -256UL;
|
||||
asm volatile(
|
||||
" sacf 0\n"
|
||||
"0: mvcs 0(%0,%1),0(%2),%3\n"
|
||||
"7: jz 5f\n"
|
||||
"1:"ALR" %0,%3\n"
|
||||
" la %1,256(%1)\n"
|
||||
" la %2,256(%2)\n"
|
||||
"2: mvcs 0(%0,%1),0(%2),%3\n"
|
||||
"8: jnz 1b\n"
|
||||
" j 5f\n"
|
||||
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
|
||||
" "LHI" %3,-4096\n"
|
||||
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
|
||||
" "SLR" %4,%1\n"
|
||||
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
|
||||
" jnh 6f\n"
|
||||
"4: mvcs 0(%4,%1),0(%2),%3\n"
|
||||
"9:"SLR" %0,%4\n"
|
||||
" j 6f\n"
|
||||
"5:"SLR" %0,%0\n"
|
||||
"6: sacf 768\n"
|
||||
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
|
||||
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
|
||||
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
|
||||
: : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (static_key_false(&have_mvcos))
|
||||
return copy_to_user_mvcos(to, from, n);
|
||||
return copy_to_user_mvcs(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(__copy_to_user);
|
||||
|
||||
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
register unsigned long reg0 asm("0") = 0x810081UL;
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
tmp1 = -4096UL;
|
||||
/* FIXME: copy with reduced length. */
|
||||
asm volatile(
|
||||
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
|
||||
" jz 2f\n"
|
||||
"1:"ALR" %0,%3\n"
|
||||
" "SLR" %1,%3\n"
|
||||
" "SLR" %2,%3\n"
|
||||
" j 0b\n"
|
||||
"2:"SLR" %0,%0\n"
|
||||
"3: \n"
|
||||
EX_TABLE(0b,3b)
|
||||
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
|
||||
: "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long tmp1;
|
||||
|
||||
load_kernel_asce();
|
||||
asm volatile(
|
||||
" sacf 256\n"
|
||||
" "AHI" %0,-1\n"
|
||||
" jo 5f\n"
|
||||
" bras %3,3f\n"
|
||||
"0:"AHI" %0,257\n"
|
||||
"1: mvc 0(1,%1),0(%2)\n"
|
||||
" la %1,1(%1)\n"
|
||||
" la %2,1(%2)\n"
|
||||
" "AHI" %0,-1\n"
|
||||
" jnz 1b\n"
|
||||
" j 5f\n"
|
||||
"2: mvc 0(256,%1),0(%2)\n"
|
||||
" la %1,256(%1)\n"
|
||||
" la %2,256(%2)\n"
|
||||
"3:"AHI" %0,-256\n"
|
||||
" jnm 2b\n"
|
||||
"4: ex %0,1b-0b(%3)\n"
|
||||
"5: "SLR" %0,%0\n"
|
||||
"6: sacf 768\n"
|
||||
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
|
||||
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
|
||||
: : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (static_key_false(&have_mvcos))
|
||||
return copy_in_user_mvcos(to, from, n);
|
||||
return copy_in_user_mvc(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(__copy_in_user);
|
||||
|
||||
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
|
||||
{
|
||||
register unsigned long reg0 asm("0") = 0x810000UL;
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
tmp1 = -4096UL;
|
||||
asm volatile(
|
||||
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
|
||||
" jz 4f\n"
|
||||
"1:"ALR" %0,%2\n"
|
||||
" "SLR" %1,%2\n"
|
||||
" j 0b\n"
|
||||
"2: la %3,4095(%1)\n"/* %4 = to + 4095 */
|
||||
" nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
|
||||
" "SLR" %3,%1\n"
|
||||
" "CLR" %0,%3\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
|
||||
" "SLR" %0,%3\n"
|
||||
" j 5f\n"
|
||||
"4:"SLR" %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
|
||||
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
|
||||
: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
load_kernel_asce();
|
||||
asm volatile(
|
||||
" sacf 256\n"
|
||||
" "AHI" %0,-1\n"
|
||||
" jo 5f\n"
|
||||
" bras %3,3f\n"
|
||||
" xc 0(1,%1),0(%1)\n"
|
||||
"0:"AHI" %0,257\n"
|
||||
" la %2,255(%1)\n" /* %2 = ptr + 255 */
|
||||
" srl %2,12\n"
|
||||
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
|
||||
" "SLR" %2,%1\n"
|
||||
" "CLR" %0,%2\n" /* clear crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
" "AHI" %2,-1\n"
|
||||
"1: ex %2,0(%3)\n"
|
||||
" "AHI" %2,1\n"
|
||||
" "SLR" %0,%2\n"
|
||||
" j 5f\n"
|
||||
"2: xc 0(256,%1),0(%1)\n"
|
||||
" la %1,256(%1)\n"
|
||||
"3:"AHI" %0,-256\n"
|
||||
" jnm 2b\n"
|
||||
"4: ex %0,0(%3)\n"
|
||||
"5: "SLR" %0,%0\n"
|
||||
"6: sacf 768\n"
|
||||
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
|
||||
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
|
||||
: : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long __clear_user(void __user *to, unsigned long size)
|
||||
{
|
||||
if (static_key_false(&have_mvcos))
|
||||
return clear_user_mvcos(to, size);
|
||||
return clear_user_xc(to, size);
|
||||
}
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
|
||||
static inline unsigned long strnlen_user_srst(const char __user *src,
|
||||
unsigned long size)
|
||||
{
|
||||
register unsigned long reg0 asm("0") = 0;
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
asm volatile(
|
||||
" la %2,0(%1)\n"
|
||||
" la %3,0(%0,%1)\n"
|
||||
" "SLR" %0,%0\n"
|
||||
" sacf 256\n"
|
||||
"0: srst %3,%2\n"
|
||||
" jo 0b\n"
|
||||
" la %0,1(%3)\n" /* strnlen_user results includes \0 */
|
||||
" "SLR" %0,%1\n"
|
||||
"1: sacf 768\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
|
||||
: "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long __strnlen_user(const char __user *src, unsigned long size)
|
||||
{
|
||||
if (unlikely(!size))
|
||||
return 0;
|
||||
load_kernel_asce();
|
||||
return strnlen_user_srst(src, size);
|
||||
}
|
||||
EXPORT_SYMBOL(__strnlen_user);
|
||||
|
||||
long __strncpy_from_user(char *dst, const char __user *src, long size)
|
||||
{
|
||||
size_t done, len, offset, len_str;
|
||||
|
||||
if (unlikely(size <= 0))
|
||||
return 0;
|
||||
done = 0;
|
||||
do {
|
||||
offset = (size_t)src & ~PAGE_MASK;
|
||||
len = min(size - done, PAGE_SIZE - offset);
|
||||
if (copy_from_user(dst, src, len))
|
||||
return -EFAULT;
|
||||
len_str = strnlen(dst, len);
|
||||
done += len_str;
|
||||
src += len_str;
|
||||
dst += len_str;
|
||||
} while ((len_str == len) && (done < size));
|
||||
return done;
|
||||
}
|
||||
EXPORT_SYMBOL(__strncpy_from_user);
|
||||
|
||||
/*
|
||||
* The "old" uaccess variant without mvcos can be enforced with the
|
||||
* uaccess_primary kernel parameter. This is mainly for debugging purposes.
|
||||
*/
|
||||
static int uaccess_primary __initdata;
|
||||
|
||||
static int __init parse_uaccess_pt(char *__unused)
|
||||
{
|
||||
uaccess_primary = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("uaccess_primary", parse_uaccess_pt);
|
||||
|
||||
static int __init uaccess_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
|
||||
static_key_slow_inc(&have_mvcos);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(uaccess_init);
|
||||
26
arch/s390/lib/ucmpdi2.c
Normal file
26
arch/s390/lib/ucmpdi2.c
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#include <linux/module.h>
|
||||
|
||||
union ull_union {
|
||||
unsigned long long ull;
|
||||
struct {
|
||||
unsigned int high;
|
||||
unsigned int low;
|
||||
} ui;
|
||||
};
|
||||
|
||||
int __ucmpdi2(unsigned long long a, unsigned long long b)
|
||||
{
|
||||
union ull_union au = {.ull = a};
|
||||
union ull_union bu = {.ull = b};
|
||||
|
||||
if (au.ui.high < bu.ui.high)
|
||||
return 0;
|
||||
else if (au.ui.high > bu.ui.high)
|
||||
return 2;
|
||||
if (au.ui.low < bu.ui.low)
|
||||
return 0;
|
||||
else if (au.ui.low > bu.ui.low)
|
||||
return 2;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(__ucmpdi2);
|
||||
Loading…
Add table
Add a link
Reference in a new issue