Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,51 @@
# List of files in the vdso, has to be asm only for now
obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
# Build rules
targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
$(call if_changed,vdso64ld)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
vdso64.so: $(obj)/vdso64.so.dbg
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: vdso64.so

View file

@ -0,0 +1,84 @@
/*
* vDSO provided cache flush routines
*
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
* IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
.text
/*
* Default "generic" version of __kernel_sync_dicache.
*
* void __kernel_sync_dicache(unsigned long start, unsigned long end)
*
* Flushes the data cache & invalidate the instruction cache for the
* provided range [start, end[
*/
V_FUNCTION_BEGIN(__kernel_sync_dicache)
.cfi_startproc
mflr r12
.cfi_register lr,r12
mr r11,r3
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
mr r10,r3
lwz r7,CFG_DCACHE_BLOCKSZ(r10)
addi r5,r7,-1
andc r6,r11,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
srw. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
1: dcbst 0,r6
add r6,r6,r7
bdnz 1b
sync
/* Now invalidate the instruction cache */
lwz r7,CFG_ICACHE_BLOCKSZ(r10)
addi r5,r7,-1
andc r6,r11,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
srw. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
2: icbi 0,r6
add r6,r6,r7
bdnz 2b
isync
li r3,0
blr
.cfi_endproc
V_FUNCTION_END(__kernel_sync_dicache)
/*
* POWER5 version of __kernel_sync_dicache
*/
V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
.cfi_startproc
crclr cr0*4+so
sync
isync
li r3,0
blr
.cfi_endproc
V_FUNCTION_END(__kernel_sync_dicache_p5)

View file

@ -0,0 +1,85 @@
/*
* Access to the shared data page by the vDSO & syscall map
*
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
.text
V_FUNCTION_BEGIN(__get_datapage)
.cfi_startproc
/* We don't want that exposed or overridable as we want other objects
* to be able to bl directly to here
*/
.protected __get_datapage
.hidden __get_datapage
mflr r0
.cfi_register lr,r0
bcl 20,31,1f
.global __kernel_datapage_offset;
__kernel_datapage_offset:
.long 0
1:
mflr r3
mtlr r0
lwz r0,0(r3)
add r3,r0,r3
blr
.cfi_endproc
V_FUNCTION_END(__get_datapage)
/*
* void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
*
* returns a pointer to the syscall map. the map is agnostic to the
* size of "long", unlike kernel bitops, it stores bits from top to
* bottom so that memory actually contains a linear bitmap
* check for syscall N by testing bit (0x80000000 >> (N & 0x1f)) of
* 32 bits int at N >> 5.
*/
V_FUNCTION_BEGIN(__kernel_get_syscall_map)
.cfi_startproc
mflr r12
.cfi_register lr,r12
mr r4,r3
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
cmpli cr0,r4,0
crclr cr0*4+so
beqlr
li r0,__NR_syscalls
stw r0,0(r4)
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_syscall_map)
/*
* void unsigned long __kernel_get_tbfreq(void);
*
* returns the timebase frequency in HZ
*/
V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
bl V_LOCAL_FUNC(__get_datapage)
ld r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
crclr cr0*4+so
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)

View file

@ -0,0 +1,45 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2012
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
.text
/*
* Exact prototype of getcpu
*
* int __kernel_getcpu(unsigned *cpu, unsigned *node);
*
*/
V_FUNCTION_BEGIN(__kernel_getcpu)
.cfi_startproc
mfspr r5,SPRN_SPRG_VDSO_READ
cmpdi cr0,r3,0
cmpdi cr1,r4,0
clrlwi r6,r5,16
rlwinm r7,r5,16,31-15,31-0
beq cr0,1f
stw r6,0(r3)
1: beq cr1,2f
stw r7,0(r4)
2: crclr cr0*4+so
li r3,0 /* always success */
blr
.cfi_endproc
V_FUNCTION_END(__kernel_getcpu)

View file

@ -0,0 +1,244 @@
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* ppc64 kernel for use in the vDSO
*
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
* IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
/*
* Exact prototype of gettimeofday
*
* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
*
*/
V_FUNCTION_BEGIN(__kernel_gettimeofday)
.cfi_startproc
mflr r12
.cfi_register lr,r12
mr r11,r3 /* r11 holds tv */
mr r10,r4 /* r10 holds tz */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
cmpldi r11,0 /* check if tv is NULL */
beq 2f
lis r7,1000000@ha /* load up USEC_PER_SEC */
addi r7,r7,1000000@l
bl V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
std r4,TVAL64_TV_SEC(r11) /* store sec in tv */
std r5,TVAL64_TV_USEC(r11) /* store usec in tv */
2: cmpldi r10,0 /* check if tz is NULL */
beq 1f
lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
lwz r5,CFG_TZ_DSTTIME(r3)
stw r4,TZONE_TZ_MINWEST(r10)
stw r5,TZONE_TZ_DSTTIME(r10)
1: mtlr r12
crclr cr0*4+so
li r3,0 /* always success */
blr
.cfi_endproc
V_FUNCTION_END(__kernel_gettimeofday)
/*
* Exact prototype of clock_gettime()
*
* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
*
*/
V_FUNCTION_BEGIN(__kernel_clock_gettime)
.cfi_startproc
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
mr r11,r4 /* r11 saves tp */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
lis r7,NSEC_PER_SEC@h /* want nanoseconds */
ori r7,r7,NSEC_PER_SEC@l
50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
bne cr1,80f /* if not monotonic, all done */
/*
* CLOCK_MONOTONIC
*/
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
* have the counter value in r8 that was returned by __do_get_tspec.
* At this point, r4,r5 contain our sec/nsec values.
*/
lwa r6,WTOM_CLOCK_SEC(r3)
lwa r9,WTOM_CLOCK_NSEC(r3)
/* We now have our result in r6,r9. We create a fake dependency
* on that result and re-check the counter
*/
or r0,r6,r9
xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
cmpld cr0,r0,r8 /* check if updated */
bne- 50b
/* Add wall->monotonic offset and check for overflow or underflow.
*/
add r4,r4,r6
add r5,r5,r9
cmpd cr0,r5,r7
cmpdi cr1,r5,0
blt 1f
subf r5,r7,r5
addi r4,r4,1
1: bge cr1,80f
addi r4,r4,-1
add r5,r5,r7
80: std r4,TSPC64_TV_SEC(r11)
std r5,TSPC64_TV_NSEC(r11)
mtlr r12
crclr cr0*4+so
li r3,0
blr
/*
* syscall fallback
*/
99:
li r0,__NR_clock_gettime
sc
blr
.cfi_endproc
V_FUNCTION_END(__kernel_clock_gettime)
/*
* Exact prototype of clock_getres()
*
* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
*
*/
V_FUNCTION_BEGIN(__kernel_clock_getres)
.cfi_startproc
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
li r3,0
cmpli cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h
ori r5,r5,CLOCK_REALTIME_RES@l
std r3,TSPC64_TV_SEC(r4)
std r5,TSPC64_TV_NSEC(r4)
blr
/*
* syscall fallback
*/
99:
li r0,__NR_clock_getres
sc
blr
.cfi_endproc
V_FUNCTION_END(__kernel_clock_getres)
/*
* Exact prototype of time()
*
* time_t time(time *t);
*
*/
V_FUNCTION_BEGIN(__kernel_time)
.cfi_startproc
mflr r12
.cfi_register lr,r12
mr r11,r3 /* r11 holds t */
bl V_LOCAL_FUNC(__get_datapage)
ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
cmpldi r11,0 /* check if t is NULL */
beq 2f
std r4,0(r11) /* store result at *t */
2: mtlr r12
crclr cr0*4+so
mr r3,r4
blr
.cfi_endproc
V_FUNCTION_END(__kernel_time)
/*
* This is the core of clock_gettime() and gettimeofday(),
* it returns the current time in r4 (seconds) and r5.
* On entry, r7 gives the resolution of r5, either USEC_PER_SEC
* or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
* It expects the datapage ptr in r3 and doesn't clobber it.
* It clobbers r0, r6 and r9.
* On return, r8 contains the counter value that can be reused.
* This clobbers cr0 but not any other cr field.
*/
V_FUNCTION_BEGIN(__do_get_tspec)
.cfi_startproc
/* check for update count & load values */
1: ld r8,CFG_TB_UPDATE_COUNT(r3)
andi. r0,r8,1 /* pending update ? loop */
bne- 1b
xor r0,r8,r8 /* create dependency */
add r3,r3,r0
/* Get TB & offset it. We use the MFTB macro which will generate
* workaround code for Cell.
*/
MFTB(r6)
ld r9,CFG_TB_ORIG_STAMP(r3)
subf r6,r9,r6
/* Scale result */
ld r5,CFG_TB_TO_XS(r3)
sldi r6,r6,12 /* compute time since stamp_xtime */
mulhdu r6,r6,r5 /* in units of 2^-32 seconds */
/* Add stamp since epoch */
ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
lwz r5,STAMP_SEC_FRAC(r3)
or r0,r4,r5
or r0,r0,r6
xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
cmpld r0,r8 /* check if updated */
bne- 1b /* reload if so */
/* convert to seconds & nanoseconds and add to stamp */
add r6,r6,r5 /* add on fractional seconds of xtime */
mulhwu r5,r6,r7 /* compute micro or nanoseconds and */
srdi r6,r6,32 /* seconds since stamp_xtime */
clrldi r5,r5,32
add r4,r4,r6
blr
.cfi_endproc
V_FUNCTION_END(__do_get_tspec)

View file

@ -0,0 +1 @@
#include "../vdso32/note.S"

View file

@ -0,0 +1,311 @@
/*
* Signal trampoline for 64 bits processes in a ppc64 kernel for
* use in the vDSO
*
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.
* Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
.text
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
call instruction. Since we don't have a call here, we artificially
extend the range covered by the unwind info by padding before the
real start. */
nop
.balign 8
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
.Lsigrt_start = . - 4
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
.Lsigrt_end:
V_FUNCTION_END(__kernel_sigtramp_rt64)
/* The ".balign 8" above and the following zeros mimic the old stack
trampoline layout. The last magic value is the ucontext pointer,
chosen in such a way that older libgcc unwind code returns a zero
for a sigcontext pointer. */
.long 0,0,0
.quad 0,-21*8
/* Register r1 can be found at offset 8 of a pt_regs structure.
A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */
#define cfa_save \
.byte 0x0f; /* DW_CFA_def_cfa_expression */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x23; .uleb128 RSIZE; /* DW_OP_plus_uconst */ \
.byte 0x06; /* DW_OP_deref */ \
9:
/* Register REGNO can be found at offset OFS of a pt_regs structure.
A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */
#define rsave(regno, ofs) \
.byte 0x10; /* DW_CFA_expression */ \
.uleb128 regno; /* regno */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
.byte 0x06; /* DW_OP_deref */ \
.ifne ofs; \
.byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \
.endif; \
9:
/* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16
of the VMX reg struct. A pointer to the VMX reg struct is at VREGS in
the pt_regs struct. This macro is for REGNO == 0, and contains
'subroutines' that the other macros jump to. */
#define vsave_msr0(regno) \
.byte 0x10; /* DW_CFA_expression */ \
.uleb128 regno + 77; /* regno */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x30 + regno; /* DW_OP_lit0 */ \
2: \
.byte 0x40; /* DW_OP_lit16 */ \
.byte 0x1e; /* DW_OP_mul */ \
3: \
.byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x12; /* DW_OP_dup */ \
.byte 0x23; /* DW_OP_plus_uconst */ \
.uleb128 33*RSIZE; /* msr offset */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x0c; .long 1 << 25; /* DW_OP_const4u */ \
.byte 0x1a; /* DW_OP_and */ \
.byte 0x12; /* DW_OP_dup, ret 0 if bra taken */ \
.byte 0x30; /* DW_OP_lit0 */ \
.byte 0x29; /* DW_OP_eq */ \
.byte 0x28; .short 0x7fff; /* DW_OP_bra to end */ \
.byte 0x13; /* DW_OP_drop, pop the 0 */ \
.byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x22; /* DW_OP_plus */ \
.byte 0x2f; .short 0x7fff; /* DW_OP_skip to end */ \
9:
/* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16
of the VMX reg struct. REGNO is 1 thru 31. */
#define vsave_msr1(regno) \
.byte 0x10; /* DW_CFA_expression */ \
.uleb128 regno + 77; /* regno */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x30 + regno; /* DW_OP_lit n */ \
.byte 0x2f; .short 2b - 9f; /* DW_OP_skip */ \
9:
/* If msr bit 1<<25 is set, then VMX register REGNO is at offset OFS of
the VMX save block. */
#define vsave_msr2(regno, ofs) \
.byte 0x10; /* DW_CFA_expression */ \
.uleb128 regno + 77; /* regno */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x0a; .short ofs; /* DW_OP_const2u */ \
.byte 0x2f; .short 3b - 9f; /* DW_OP_skip */ \
9:
/* VMX register REGNO is at offset OFS of the VMX save area. */
#define vsave(regno, ofs) \
.byte 0x10; /* DW_CFA_expression */ \
.uleb128 regno + 77; /* regno */ \
.uleb128 9f - 1f; /* length */ \
1: \
.byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \
.byte 0x06; /* DW_OP_deref */ \
.byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \
9:
/* This is where the pt_regs pointer can be found on the stack. */
#define PTREGS 128+168+56
/* Size of regs. */
#define RSIZE 8
/* Size of CR reg in DWARF unwind info. */
#define CRSIZE 4
/* Offset of CR reg within a full word. */
#ifdef __LITTLE_ENDIAN__
#define CROFF 0
#else
#define CROFF (RSIZE - CRSIZE)
#endif
/* This is the offset of the VMX reg pointer. */
#define VREGS 48*RSIZE+33*8
/* Describe where general purpose regs are saved. */
#define EH_FRAME_GEN \
cfa_save; \
rsave ( 0, 0*RSIZE); \
rsave ( 2, 2*RSIZE); \
rsave ( 3, 3*RSIZE); \
rsave ( 4, 4*RSIZE); \
rsave ( 5, 5*RSIZE); \
rsave ( 6, 6*RSIZE); \
rsave ( 7, 7*RSIZE); \
rsave ( 8, 8*RSIZE); \
rsave ( 9, 9*RSIZE); \
rsave (10, 10*RSIZE); \
rsave (11, 11*RSIZE); \
rsave (12, 12*RSIZE); \
rsave (13, 13*RSIZE); \
rsave (14, 14*RSIZE); \
rsave (15, 15*RSIZE); \
rsave (16, 16*RSIZE); \
rsave (17, 17*RSIZE); \
rsave (18, 18*RSIZE); \
rsave (19, 19*RSIZE); \
rsave (20, 20*RSIZE); \
rsave (21, 21*RSIZE); \
rsave (22, 22*RSIZE); \
rsave (23, 23*RSIZE); \
rsave (24, 24*RSIZE); \
rsave (25, 25*RSIZE); \
rsave (26, 26*RSIZE); \
rsave (27, 27*RSIZE); \
rsave (28, 28*RSIZE); \
rsave (29, 29*RSIZE); \
rsave (30, 30*RSIZE); \
rsave (31, 31*RSIZE); \
rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \
rsave (65, 36*RSIZE); /* lr */ \
rsave (68, 38*RSIZE + CROFF); /* cr fields */ \
rsave (69, 38*RSIZE + CROFF); \
rsave (70, 38*RSIZE + CROFF); \
rsave (71, 38*RSIZE + CROFF); \
rsave (72, 38*RSIZE + CROFF); \
rsave (73, 38*RSIZE + CROFF); \
rsave (74, 38*RSIZE + CROFF); \
rsave (75, 38*RSIZE + CROFF)
/* Describe where the FP regs are saved. */
#define EH_FRAME_FP \
rsave (32, 48*RSIZE + 0*8); \
rsave (33, 48*RSIZE + 1*8); \
rsave (34, 48*RSIZE + 2*8); \
rsave (35, 48*RSIZE + 3*8); \
rsave (36, 48*RSIZE + 4*8); \
rsave (37, 48*RSIZE + 5*8); \
rsave (38, 48*RSIZE + 6*8); \
rsave (39, 48*RSIZE + 7*8); \
rsave (40, 48*RSIZE + 8*8); \
rsave (41, 48*RSIZE + 9*8); \
rsave (42, 48*RSIZE + 10*8); \
rsave (43, 48*RSIZE + 11*8); \
rsave (44, 48*RSIZE + 12*8); \
rsave (45, 48*RSIZE + 13*8); \
rsave (46, 48*RSIZE + 14*8); \
rsave (47, 48*RSIZE + 15*8); \
rsave (48, 48*RSIZE + 16*8); \
rsave (49, 48*RSIZE + 17*8); \
rsave (50, 48*RSIZE + 18*8); \
rsave (51, 48*RSIZE + 19*8); \
rsave (52, 48*RSIZE + 20*8); \
rsave (53, 48*RSIZE + 21*8); \
rsave (54, 48*RSIZE + 22*8); \
rsave (55, 48*RSIZE + 23*8); \
rsave (56, 48*RSIZE + 24*8); \
rsave (57, 48*RSIZE + 25*8); \
rsave (58, 48*RSIZE + 26*8); \
rsave (59, 48*RSIZE + 27*8); \
rsave (60, 48*RSIZE + 28*8); \
rsave (61, 48*RSIZE + 29*8); \
rsave (62, 48*RSIZE + 30*8); \
rsave (63, 48*RSIZE + 31*8)
/* Describe where the VMX regs are saved. */
#ifdef CONFIG_ALTIVEC
#define EH_FRAME_VMX \
vsave_msr0 ( 0); \
vsave_msr1 ( 1); \
vsave_msr1 ( 2); \
vsave_msr1 ( 3); \
vsave_msr1 ( 4); \
vsave_msr1 ( 5); \
vsave_msr1 ( 6); \
vsave_msr1 ( 7); \
vsave_msr1 ( 8); \
vsave_msr1 ( 9); \
vsave_msr1 (10); \
vsave_msr1 (11); \
vsave_msr1 (12); \
vsave_msr1 (13); \
vsave_msr1 (14); \
vsave_msr1 (15); \
vsave_msr1 (16); \
vsave_msr1 (17); \
vsave_msr1 (18); \
vsave_msr1 (19); \
vsave_msr1 (20); \
vsave_msr1 (21); \
vsave_msr1 (22); \
vsave_msr1 (23); \
vsave_msr1 (24); \
vsave_msr1 (25); \
vsave_msr1 (26); \
vsave_msr1 (27); \
vsave_msr1 (28); \
vsave_msr1 (29); \
vsave_msr1 (30); \
vsave_msr1 (31); \
vsave_msr2 (33, 32*16+12); \
vsave (32, 33*16)
#else
#define EH_FRAME_VMX
#endif
.section .eh_frame,"a",@progbits
.Lcie:
.long .Lcie_end - .Lcie_start
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 -8 /* Data alignment factor */
.byte 67 /* Return address register column, ap */
.uleb128 1 /* Augmentation value length */
.byte 0x14 /* DW_EH_PE_pcrel | DW_EH_PE_udata8. */
.byte 0x0c,1,0 /* DW_CFA_def_cfa: r1 ofs 0 */
.balign 8
.Lcie_end:
.long .Lfde0_end - .Lfde0_start
.Lfde0_start:
.long .Lfde0_start - .Lcie /* CIE pointer. */
.quad .Lsigrt_start - . /* PC start, length */
.quad .Lsigrt_end - .Lsigrt_start
.uleb128 0 /* Augmentation */
EH_FRAME_GEN
EH_FRAME_FP
EH_FRAME_VMX
# Do we really need to describe the frame at this point? ie. will
# we ever have some call chain that returns somewhere past the addi?
# I don't think so, since gcc doesn't support async signals.
# .byte 0x41 /* DW_CFA_advance_loc 1*4 */
#undef PTREGS
#define PTREGS 168+56
# EH_FRAME_GEN
# EH_FRAME_FP
# EH_FRAME_VMX
.balign 8
.Lfde0_end:

View file

@ -0,0 +1,158 @@
/*
* This is the infamous ld script for the 64 bits vdso
* library
*/
#include <asm/vdso.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
#else
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
#endif
OUTPUT_ARCH(powerpc:common64)
ENTRY(_start)
SECTIONS
{
. = VDSO64_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
*(.sfpr .glink)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
PROVIDE(etext = .);
. = ALIGN(8);
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
__mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
. = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) }
. = ALIGN(8);
__fw_ftr_fixup : { *(__fw_ftr_fixup) }
/*
* Other stuff is appended to the text segment:
*/
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.dynamic : { *(.dynamic) } :text :dynamic
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
.opd ALIGN(8) : { KEEP (*(.opd)) }
.got ALIGN(8) : { *(.got .toc) }
_end = .;
PROVIDE(end = .);
/*
* Stabs debugging sections are here too.
*/
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/*
* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the beginning
* of the section so we begin them at 0.
*/
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/DISCARD/ : {
*(.note.GNU-stack)
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
VDSO_VERSION_STRING {
global:
/*
* Has to be there for the kernel to find
*/
__kernel_datapage_offset;
__kernel_get_syscall_map;
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
__kernel_get_tbfreq;
__kernel_sync_dicache;
__kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
__kernel_getcpu;
__kernel_time;
local: *;
};
}

View file

@ -0,0 +1,13 @@
#include <linux/linkage.h>
#include <asm/page.h>
__PAGE_ALIGNED_DATA
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
.incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:
.previous