Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

11
arch/avr32/lib/Makefile Normal file
View file

@ -0,0 +1,11 @@
#
# Makefile for AVR32-specific library files
#
lib-y := copy_user.o clear_user.o
lib-y += strncpy_from_user.o strnlen_user.o
lib-y += delay.o memset.o memcpy.o findbit.o
lib-y += csum_partial.o csum_partial_copy_generic.o
lib-y += io-readsw.o io-readsl.o io-writesw.o io-writesl.o
lib-y += io-readsb.o io-writesb.o
lib-y += __avr32_lsl64.o __avr32_lsr64.o __avr32_asr64.o

View file

@ -0,0 +1,31 @@
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_asr64(DWtype u, word_type b)
*/
.text
.global __avr32_asr64
.type __avr32_asr64,@function
__avr32_asr64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsl r8, r11, r9
lsr r10, r10, r12
asr r11, r11, r12
or r10, r8
retal r12
1: neg r9
asr r10, r11, r9
asr r11, 31
retal r12

View file

@ -0,0 +1,31 @@
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_lsl64(DWtype u, word_type b)
*/
.text
.global __avr32_lsl64
.type __avr32_lsl64,@function
__avr32_lsl64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsr r8, r10, r9
lsl r10, r10, r12
lsl r11, r11, r12
or r11, r8
retal r12
1: neg r9
lsl r11, r10, r9
mov r10, 0
retal r12

View file

@ -0,0 +1,31 @@
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_lsr64(DWtype u, word_type b)
*/
.text
.global __avr32_lsr64
.type __avr32_lsr64,@function
__avr32_lsr64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsl r8, r11, r9
lsr r11, r11, r12
lsr r10, r10, r12
or r10, r8
retal r12
1: neg r9
lsr r10, r11, r9
mov r11, 0
retal r12

View file

@ -0,0 +1,76 @@
/*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
.text
.align 1
.global clear_user
.type clear_user, "function"
clear_user:
branch_if_kernel r8, __clear_user
ret_if_privileged r8, r12, r11, r11
.global __clear_user
.type __clear_user, "function"
__clear_user:
mov r9, r12
mov r8, 0
andl r9, 3, COH
brne 5f
1: sub r11, 4
brlt 2f
10: st.w r12++, r8
sub r11, 4
brge 10b
2: sub r11, -4
reteq 0
/* Unaligned count or address */
bld r11, 1
brcc 12f
11: st.h r12++, r8
sub r11, 2
reteq 0
12: st.b r12++, r8
retal 0
/* Unaligned address */
5: cp.w r11, 4
brlt 2b
lsl r9, 2
add pc, pc, r9
13: st.b r12++, r8
sub r11, 1
14: st.b r12++, r8
sub r11, 1
15: st.b r12++, r8
sub r11, 1
rjmp 1b
.size clear_user, . - clear_user
.size __clear_user, . - __clear_user
.section .fixup, "ax"
.align 1
18: sub r11, -4
19: retal r11
.section __ex_table, "a"
.align 2
.long 10b, 18b
.long 11b, 19b
.long 12b, 19b
.long 13b, 19b
.long 14b, 19b
.long 15b, 19b

119
arch/avr32/lib/copy_user.S Normal file
View file

@ -0,0 +1,119 @@
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
/*
* __kernel_size_t
* __copy_user(void *to, const void *from, __kernel_size_t n)
*
* Returns the number of bytes not copied. Might be off by
* max 3 bytes if we get a fault in the main loop.
*
* The address-space checking functions simply fall through to
* the non-checking version.
*/
.text
.align 1
.global copy_from_user
.type copy_from_user, @function
copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size copy_from_user, . - copy_from_user
.global copy_to_user
.type copy_to_user, @function
copy_to_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r12, r10, r10
.size copy_to_user, . - copy_to_user
.global __copy_user
.type __copy_user, @function
__copy_user:
mov r9, r11
andl r9, 3, COH
brne 6f
/* At this point, from is word-aligned */
1: sub r10, 4
brlt 3f
2:
10: ld.w r8, r11++
11: st.w r12++, r8
sub r10, 4
brge 2b
3: sub r10, -4
reteq 0
/*
* Handle unaligned count. Need to be careful with r10 here so
* that we return the correct value even if we get a fault
*/
4:
20: ld.ub r8, r11++
21: st.b r12++, r8
sub r10, 1
reteq 0
22: ld.ub r8, r11++
23: st.b r12++, r8
sub r10, 1
reteq 0
24: ld.ub r8, r11++
25: st.b r12++, r8
retal 0
/* Handle unaligned from-pointer */
6: cp.w r10, 4
brlt 4b
rsub r9, r9, 4
30: ld.ub r8, r11++
31: st.b r12++, r8
sub r10, 1
sub r9, 1
breq 1b
32: ld.ub r8, r11++
33: st.b r12++, r8
sub r10, 1
sub r9, 1
breq 1b
34: ld.ub r8, r11++
35: st.b r12++, r8
sub r10, 1
rjmp 1b
.size __copy_user, . - __copy_user
.section .fixup,"ax"
.align 1
19: sub r10, -4
29: retal r10
.section __ex_table,"a"
.align 2
.long 10b, 19b
.long 11b, 19b
.long 20b, 29b
.long 21b, 29b
.long 22b, 29b
.long 23b, 29b
.long 24b, 29b
.long 25b, 29b
.long 30b, 29b
.long 31b, 29b
.long 32b, 29b
.long 33b, 29b
.long 34b, 29b
.long 35b, 29b

View file

@ -0,0 +1,47 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* unsigned int csum_partial(const unsigned char *buff,
* int len, unsigned int sum)
*/
.text
.global csum_partial
.type csum_partial,"function"
.align 1
csum_partial:
/* checksum complete words, aligned or not */
3: sub r11, 4
brlt 5f
4: ld.w r9, r12++
add r10, r9
acr r10
sub r11, 4
brge 4b
/* return if we had a whole number of words */
5: sub r11, -4
reteq r10
/* checksum any remaining bytes at the end */
mov r9, 0
mov r8, 0
cp r11, 2
brlt 6f
ld.uh r9, r12++
sub r11, 2
breq 7f
lsl r9, 16
6: ld.ub r8, r12++
lsl r8, 8
7: or r9, r8
add r10, r9
acr r10
retal r10
.size csum_partial, . - csum_partial

View file

@ -0,0 +1,99 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/errno.h>
#include <asm/asm.h>
/*
* unsigned int csum_partial_copy_generic(const char *src, char *dst, int len
* int sum, int *src_err_ptr,
* int *dst_err_ptr)
*
* Copy src to dst while checksumming, otherwise like csum_partial.
*/
.macro ld_src size, reg, ptr
9999: ld.\size \reg, \ptr
.section __ex_table, "a"
.long 9999b, fixup_ld_src
.previous
.endm
.macro st_dst size, ptr, reg
9999: st.\size \ptr, \reg
.section __ex_table, "a"
.long 9999b, fixup_st_dst
.previous
.endm
.text
.global csum_partial_copy_generic
.type csum_partial_copy_generic,"function"
.align 1
csum_partial_copy_generic:
pushm r4-r7,lr
/* The inner loop */
1: sub r10, 4
brlt 5f
2: ld_src w, r5, r12++
st_dst w, r11++, r5
add r9, r5
acr r9
sub r10, 4
brge 2b
/* return if we had a whole number of words */
5: sub r10, -4
brne 7f
6: mov r12, r9
popm r4-r7,pc
/* handle additional bytes at the tail */
7: mov r5, 0
mov r4, 32
8: ld_src ub, r6, r12++
st_dst b, r11++, r6
lsl r5, 8
sub r4, 8
bfins r5, r6, 0, 8
sub r10, 1
brne 8b
lsl r5, r5, r4
add r9, r5
acr r9
rjmp 6b
/* Exception handler */
.section .fixup,"ax"
.align 1
fixup_ld_src:
mov r9, -EFAULT
cp.w r8, 0
breq 1f
st.w r8[0], r9
1: /*
* TODO: zero the complete destination - computing the rest
* is too much work
*/
mov r9, 0
rjmp 6b
fixup_st_dst:
mov r9, -EFAULT
lddsp r8, sp[20]
cp.w r8, 0
breq 1f
st.w r8[0], r9
1: mov r9, 0
rjmp 6b
.previous

57
arch/avr32/lib/delay.c Normal file
View file

@ -0,0 +1,57 @@
/*
* Precise Delay Loops for avr32
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/timex.h>
#include <linux/param.h>
#include <linux/types.h>
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
int read_current_timer(unsigned long *timer_value)
{
*timer_value = sysreg_read(COUNT);
return 0;
}
void __delay(unsigned long loops)
{
unsigned bclock, now;
bclock = sysreg_read(COUNT);
do {
now = sysreg_read(COUNT);
} while ((now - bclock) < loops);
}
inline void __const_udelay(unsigned long xloops)
{
unsigned long long loops;
asm("mulu.d %0, %1, %2"
: "=r"(loops)
: "r"(current_cpu_data.loops_per_jiffy * HZ), "r"(xloops));
__delay(loops >> 32);
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}

185
arch/avr32/lib/findbit.S Normal file
View file

@ -0,0 +1,185 @@
/*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
.text
/*
* unsigned long find_first_zero_bit(const unsigned long *addr,
* unsigned long size)
*/
ENTRY(find_first_zero_bit)
cp.w r11, 0
reteq r11
mov r9, r11
1: ld.w r8, r12[0]
com r8
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/*
* unsigned long find_next_zero_bit(const unsigned long *addr,
* unsigned long size,
* unsigned long offset)
*/
ENTRY(find_next_zero_bit)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ld.w r8, r12[0]
com r8
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ld.w r8, r12[0]
com r8
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/* Common return path for when a bit is actually found. */
.L_found:
brev r8
clz r10, r8
rsub r9, r11
add r10, r9
/* XXX: If we don't have to return exactly "size" when the bit
is not found, we may drop this "min" thing */
min r12, r11, r10
retal r12
/*
* unsigned long find_first_bit(const unsigned long *addr,
* unsigned long size)
*/
ENTRY(find_first_bit)
cp.w r11, 0
reteq r11
mov r9, r11
1: ld.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/*
* unsigned long find_next_bit(const unsigned long *addr,
* unsigned long size,
* unsigned long offset)
*/
ENTRY(find_next_bit)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ld.w r8, r12[0]
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ld.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
ENTRY(find_next_bit_le)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ldswp.w r8, r12[0]
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ldswp.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
ENTRY(find_next_zero_bit_le)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ldswp.w r8, r12[0]
sub r12, -4
com r8
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ldswp.w r8, r12[0]
com r8
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11

View file

@ -0,0 +1,49 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.text
.Lnot_word_aligned:
1: ld.ub r8, r12[0]
sub r10, 1
st.b r11++, r8
reteq r12
tst r11, r9
brne 1b
/* fall through */
.global __raw_readsb
.type __raw_readsb,@function
__raw_readsb:
cp.w r10, 0
mov r9, 3
reteq r12
tst r11, r9
brne .Lnot_word_aligned
sub r10, 4
brlt 2f
1: ldins.b r8:t, r12[0]
ldins.b r8:u, r12[0]
ldins.b r8:l, r12[0]
ldins.b r8:b, r12[0]
st.w r11++, r8
sub r10, 4
brge 1b
2: sub r10, -4
reteq r12
3: ld.ub r8, r12[0]
sub r10, 1
st.b r11++, r8
brne 3b
retal r12

View file

@ -0,0 +1,24 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.global __raw_readsl
.type __raw_readsl,@function
__raw_readsl:
cp.w r10, 0
reteq r12
/*
* If r11 isn't properly aligned, we might get an exception on
* some implementations. But there's not much we can do about it.
*/
1: ld.w r8, r12[0]
sub r10, 1
st.w r11++, r8
brne 1b
retal r12

View file

@ -0,0 +1,43 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.Lnot_word_aligned:
/*
* Bad alignment will cause a hardware exception, which is as
* good as anything. No need for us to check for proper alignment.
*/
ld.uh r8, r12[0]
sub r10, 1
st.h r11++, r8
/* fall through */
.global __raw_readsw
.type __raw_readsw,@function
__raw_readsw:
cp.w r10, 0
reteq r12
mov r9, 3
tst r11, r9
brne .Lnot_word_aligned
sub r10, 2
brlt 2f
1: ldins.h r8:t, r12[0]
ldins.h r8:b, r12[0]
st.w r11++, r8
sub r10, 2
brge 1b
2: sub r10, -2
reteq r12
ld.uh r8, r12[0]
st.h r11++, r8
retal r12

View file

@ -0,0 +1,52 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.text
.Lnot_word_aligned:
1: ld.ub r8, r11++
sub r10, 1
st.b r12[0], r8
reteq r12
tst r11, r9
brne 1b
/* fall through */
.global __raw_writesb
.type __raw_writesb,@function
__raw_writesb:
cp.w r10, 0
mov r9, 3
reteq r12
tst r11, r9
brne .Lnot_word_aligned
sub r10, 4
brlt 2f
1: ld.w r8, r11++
bfextu r9, r8, 24, 8
st.b r12[0], r9
bfextu r9, r8, 16, 8
st.b r12[0], r9
bfextu r9, r8, 8, 8
st.b r12[0], r9
st.b r12[0], r8
sub r10, 4
brge 1b
2: sub r10, -4
reteq r12
3: ld.ub r8, r11++
sub r10, 1
st.b r12[0], r8
brne 3b
retal r12

View file

@ -0,0 +1,20 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.global __raw_writesl
.type __raw_writesl,@function
__raw_writesl:
cp.w r10, 0
reteq r12
1: ld.w r8, r11++
sub r10, 1
st.w r12[0], r8
brne 1b
retal r12

View file

@ -0,0 +1,38 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.Lnot_word_aligned:
ld.uh r8, r11++
sub r10, 1
st.h r12[0], r8
.global __raw_writesw
.type __raw_writesw,@function
__raw_writesw:
cp.w r10, 0
mov r9, 3
reteq r12
tst r11, r9
brne .Lnot_word_aligned
sub r10, 2
brlt 2f
1: ld.w r8, r11++
bfextu r9, r8, 16, 16
st.h r12[0], r9
st.h r12[0], r8
sub r10, 2
brge 1b
2: sub r10, -2
reteq r12
ld.uh r8, r11++
st.h r12[0], r8
retal r12

72
arch/avr32/lib/memcpy.S Normal file
View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* void *memcpy(void *to, const void *from, unsigned long n)
*
* This implementation does word-aligned loads in the main loop,
* possibly sacrificing alignment of stores.
*
* Hopefully, in most cases, both "to" and "from" will be
* word-aligned to begin with.
*/
.text
.global memcpy
.type memcpy, @function
memcpy:
mov r9, r11
andl r9, 3, COH
brne 1f
/* At this point, "from" is word-aligned */
2: mov r9, r12
5: sub r10, 4
brlt 4f
3: ld.w r8, r11++
sub r10, 4
st.w r12++, r8
brge 3b
4: neg r10
reteq r9
/* Handle unaligned count */
lsl r10, 2
add pc, pc, r10
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
retal r9
/* Handle unaligned "from" pointer */
1: sub r10, 4
movlt r9, r12
brlt 4b
add r10, r9
lsl r9, 2
add pc, pc, r9
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
mov r8, r12
add pc, pc, r9
sub r8, 1
nop
sub r8, 1
nop
sub r8, 1
nop
mov r9, r8
rjmp 5b

72
arch/avr32/lib/memset.S Normal file
View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/arm/lib/memset.S
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <asm/asm.h>
/*
* r12: void *b
* r11: int c
* r10: size_t len
*
* Returns b in r12
*/
.text
.global memset
.type memset, @function
.align 5
memset:
mov r9, r12
mov r8, r12
or r11, r11, r11 << 8
andl r9, 3, COH
brne 1f
2: or r11, r11, r11 << 16
sub r10, 4
brlt 5f
/* Let's do some real work */
4: st.w r8++, r11
sub r10, 4
brge 4b
/*
* When we get here, we've got less than 4 bytes to set. r10
* might be negative.
*/
5: sub r10, -4
reteq r12
/* Fastpath ends here, exactly 32 bytes from memset */
/* Handle unaligned count or pointer */
bld r10, 1
brcc 6f
st.b r8++, r11
st.b r8++, r11
bld r10, 0
retcc r12
6: st.b r8++, r11
retal r12
/* Handle unaligned pointer */
1: sub r10, 4
brlt 5b
add r10, r9
lsl r9, 1
add pc, r9
st.b r8++, r11
st.b r8++, r11
st.b r8++, r11
rjmp 2b
.size memset, . - memset

View file

@ -0,0 +1,60 @@
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
/*
* long strncpy_from_user(char *dst, const char *src, long count)
*
* On success, returns the length of the string, not including
* the terminating NUL.
*
* If the string is longer than count, returns count
*
* If userspace access fails, returns -EFAULT
*/
.text
.align 1
.global strncpy_from_user
.type strncpy_from_user, "function"
strncpy_from_user:
mov r9, -EFAULT
branch_if_kernel r8, __strncpy_from_user
ret_if_privileged r8, r11, r10, r9
.global __strncpy_from_user
.type __strncpy_from_user, "function"
__strncpy_from_user:
cp.w r10, 0
reteq 0
mov r9, r10
1: ld.ub r8, r11++
st.b r12++, r8
cp.w r8, 0
breq 2f
sub r9, 1
brne 1b
2: sub r10, r9
retal r10
.section .fixup, "ax"
.align 1
3: mov r12, -EFAULT
retal r12
.section __ex_table, "a"
.align 2
.long 1b, 3b

View file

@ -0,0 +1,67 @@
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/asm.h>
.text
.align 1
.global strnlen_user
.type strnlen_user, "function"
strnlen_user:
branch_if_kernel r8, __strnlen_user
sub r8, r11, 1
add r8, r12
retcs 0
brmi adjust_length /* do a closer inspection */
.global __strnlen_user
.type __strnlen_user, "function"
__strnlen_user:
mov r10, r12
10: ld.ub r8, r12++
cp.w r8, 0
breq 2f
sub r11, 1
brne 10b
sub r12, -1
2: sub r12, r10
retal r12
.type adjust_length, "function"
adjust_length:
cp.w r12, 0 /* addr must always be < TASK_SIZE */
retmi 0
pushm lr
lddpc lr, _task_size
sub r11, lr, r12
mov r9, r11
call __strnlen_user
cp.w r12, r9
brgt 1f
popm pc
1: popm pc, r12=0
.align 2
_task_size:
.long TASK_SIZE
.section .fixup, "ax"
.align 1
19: retal 0
.section __ex_table, "a"
.align 2
.long 10b, 19b