mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
6
arch/arm64/mm/Makefile
Normal file
6
arch/arm64/mm/Makefile
Normal file
|
@ -0,0 +1,6 @@
|
|||
obj-y := dma-mapping.o extable.o fault.o init.o \
|
||||
cache.o copypage.o flush.o \
|
||||
ioremap.o mmap.o pgd.o mmu.o \
|
||||
context.o proc.o pageattr.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
|
284
arch/arm64/mm/cache.S
Normal file
284
arch/arm64/mm/cache.S
Normal file
|
@ -0,0 +1,284 @@
|
|||
/*
|
||||
* Cache maintenance
|
||||
*
|
||||
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
||||
/*
|
||||
* __flush_dcache_all()
|
||||
*
|
||||
* Flush the whole D-cache.
|
||||
*
|
||||
* Corrupted registers: x0-x7, x9-x11
|
||||
*/
|
||||
__flush_dcache_all:
|
||||
dmb sy // ensure ordering with previous memory accesses
|
||||
mrs x0, clidr_el1 // read clidr
|
||||
and x3, x0, #0x7000000 // extract loc from clidr
|
||||
lsr x3, x3, #23 // left align loc bit field
|
||||
cbz x3, finished // if loc is 0, then no need to clean
|
||||
mov x10, #0 // start clean at cache level 0
|
||||
loop1:
|
||||
add x2, x10, x10, lsr #1 // work out 3x current cache level
|
||||
lsr x1, x0, x2 // extract cache type bits from clidr
|
||||
and x1, x1, #7 // mask of the bits for current cache only
|
||||
cmp x1, #2 // see what cache we have at this level
|
||||
b.lt skip // skip if no cache, or just i-cache
|
||||
save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
|
||||
msr csselr_el1, x10 // select current cache level in csselr
|
||||
isb // isb to sych the new cssr&csidr
|
||||
mrs x1, ccsidr_el1 // read the new ccsidr
|
||||
restore_irqs x9
|
||||
and x2, x1, #7 // extract the length of the cache lines
|
||||
add x2, x2, #4 // add 4 (line length offset)
|
||||
mov x4, #0x3ff
|
||||
and x4, x4, x1, lsr #3 // find maximum number on the way size
|
||||
clz w5, w4 // find bit position of way size increment
|
||||
mov x7, #0x7fff
|
||||
and x7, x7, x1, lsr #13 // extract max number of the index size
|
||||
loop2:
|
||||
mov x9, x4 // create working copy of max way size
|
||||
loop3:
|
||||
lsl x6, x9, x5
|
||||
orr x11, x10, x6 // factor way and cache number into x11
|
||||
lsl x6, x7, x2
|
||||
orr x11, x11, x6 // factor index number into x11
|
||||
dc cisw, x11 // clean & invalidate by set/way
|
||||
subs x9, x9, #1 // decrement the way
|
||||
b.ge loop3
|
||||
subs x7, x7, #1 // decrement the index
|
||||
b.ge loop2
|
||||
skip:
|
||||
add x10, x10, #2 // increment cache number
|
||||
cmp x3, x10
|
||||
b.gt loop1
|
||||
finished:
|
||||
mov x10, #0 // swith back to cache level 0
|
||||
msr csselr_el1, x10 // select current cache level in csselr
|
||||
dsb sy
|
||||
isb
|
||||
ret
|
||||
ENDPROC(__flush_dcache_all)
|
||||
|
||||
/*
|
||||
* flush_cache_all()
|
||||
*
|
||||
* Flush the entire cache system. The data cache flush is now achieved
|
||||
* using atomic clean / invalidates working outwards from L1 cache. This
|
||||
* is done using Set/Way based cache maintainance instructions. The
|
||||
* instruction cache can still be invalidated back to the point of
|
||||
* unification in a single instruction.
|
||||
*/
|
||||
ENTRY(flush_cache_all)
|
||||
mov x12, lr
|
||||
bl __flush_dcache_all
|
||||
mov x0, #0
|
||||
ic ialluis // I+BTB cache invalidate
|
||||
ret x12
|
||||
ENDPROC(flush_cache_all)
|
||||
|
||||
ENTRY(__flush_dcache_louis)
|
||||
dmb ish
|
||||
mrs x0, clidr_el1
|
||||
ands x3, x0, #(7 << 21)
|
||||
lsr x3, x3, #20 // w3 = LoUIS * 2
|
||||
b.eq level_is_zero
|
||||
mov x10, #0
|
||||
b loop1 // start flushing cache
|
||||
level_is_zero:
|
||||
ret
|
||||
ENDPROC(__flush_dcache_louis)
|
||||
|
||||
ENTRY(flush_cache_louis)
|
||||
mov x12, lr
|
||||
bl __flush_dcache_louis
|
||||
mov x0, #0
|
||||
ic ialluis
|
||||
ret x12
|
||||
ENDPROC(flush_cache_louis)
|
||||
|
||||
/*
|
||||
* flush_icache_range(start,end)
|
||||
*
|
||||
* Ensure that the I and D caches are coherent within specified region.
|
||||
* This is typically used when code has been written to a memory region,
|
||||
* and will be executed.
|
||||
*
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(flush_icache_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* __flush_cache_user_range(start,end)
|
||||
*
|
||||
* Ensure that the I and D caches are coherent within specified region.
|
||||
* This is typically used when code has been written to a memory region,
|
||||
* and will be executed.
|
||||
*
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(__flush_cache_user_range)
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x4, x0, x3
|
||||
1:
|
||||
USER(9f, dc cvau, x4 ) // clean D line to PoU
|
||||
add x4, x4, x2
|
||||
cmp x4, x1
|
||||
b.lo 1b
|
||||
dsb ish
|
||||
|
||||
icache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x4, x0, x3
|
||||
1:
|
||||
USER(9f, ic ivau, x4 ) // invalidate I line PoU
|
||||
add x4, x4, x2
|
||||
cmp x4, x1
|
||||
b.lo 1b
|
||||
9: // ignore any faulting cache operation
|
||||
dsb ish
|
||||
isb
|
||||
ret
|
||||
ENDPROC(flush_icache_range)
|
||||
ENDPROC(__flush_cache_user_range)
|
||||
|
||||
/*
|
||||
* __flush_dcache_area(kaddr, size)
|
||||
*
|
||||
* Ensure that the data held in the page kaddr is written back to the
|
||||
* page in question.
|
||||
*
|
||||
* - kaddr - kernel address
|
||||
* - size - size in question
|
||||
*/
|
||||
ENTRY(__flush_dcache_area)
|
||||
dcache_line_size x2, x3
|
||||
add x1, x0, x1
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
1: dc civac, x0 // clean & invalidate D line / unified line
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__flush_dcache_area)
|
||||
|
||||
/*
|
||||
* __inval_cache_range(start, end)
|
||||
* - start - start address of region
|
||||
* - end - end address of region
|
||||
*/
|
||||
ENTRY(__inval_cache_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* __dma_inv_range(start, end)
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
__dma_inv_range:
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
tst x1, x3 // end cache line aligned?
|
||||
bic x1, x1, x3
|
||||
b.eq 1f
|
||||
dc civac, x1 // clean & invalidate D / U line
|
||||
1: tst x0, x3 // start cache line aligned?
|
||||
bic x0, x0, x3
|
||||
b.eq 2f
|
||||
dc civac, x0 // clean & invalidate D / U line
|
||||
b 3f
|
||||
2: dc ivac, x0 // invalidate D / U line
|
||||
3: add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 2b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__inval_cache_range)
|
||||
ENDPROC(__dma_inv_range)
|
||||
|
||||
/*
|
||||
* __dma_clean_range(start, end)
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
__dma_clean_range:
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__dma_clean_range)
|
||||
|
||||
/*
|
||||
* __dma_flush_range(start, end)
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(__dma_flush_range)
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x0, x0, x3
|
||||
1: dc civac, x0 // clean & invalidate D / U line
|
||||
add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 1b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__dma_flush_range)
|
||||
|
||||
/*
|
||||
* __dma_map_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(__dma_map_area)
|
||||
add x1, x1, x0
|
||||
cmp w2, #DMA_FROM_DEVICE
|
||||
b.eq __dma_inv_range
|
||||
b __dma_clean_range
|
||||
ENDPROC(__dma_map_area)
|
||||
|
||||
/*
|
||||
* __dma_unmap_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(__dma_unmap_area)
|
||||
add x1, x1, x0
|
||||
cmp w2, #DMA_TO_DEVICE
|
||||
b.ne __dma_inv_range
|
||||
ret
|
||||
ENDPROC(__dma_unmap_area)
|
159
arch/arm64/mm/context.c
Normal file
159
arch/arm64/mm/context.c
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/context.c
|
||||
*
|
||||
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
|
||||
#define asid_bits(reg) \
|
||||
(((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8)
|
||||
|
||||
#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS)
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
||||
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
|
||||
|
||||
/*
|
||||
* We fork()ed a process, and we need a new context for the child to run in.
|
||||
*/
|
||||
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
mm->context.id = 0;
|
||||
raw_spin_lock_init(&mm->context.id_lock);
|
||||
}
|
||||
|
||||
static void flush_context(void)
|
||||
{
|
||||
/* set the reserved TTBR0 before flushing the TLB */
|
||||
cpu_set_reserved_ttbr0();
|
||||
flush_tlb_all();
|
||||
if (icache_is_aivivt())
|
||||
__flush_icache_all();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Locking needed for multi-threaded applications where the same
|
||||
* mm->context.id could be set from different CPUs during the
|
||||
* broadcast. This function is also called via IPI so the
|
||||
* mm->context.id_lock has to be IRQ-safe.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&mm->context.id_lock, flags);
|
||||
if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
|
||||
/*
|
||||
* Old version of ASID found. Set the new one and reset
|
||||
* mm_cpumask(mm).
|
||||
*/
|
||||
mm->context.id = asid;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
|
||||
|
||||
/*
|
||||
* Set the mm_cpumask(mm) bit for the current CPU.
|
||||
*/
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the ASID on the current CPU. This function call is broadcast from the
|
||||
* CPU handling the ASID rollover and holding cpu_asid_lock.
|
||||
*/
|
||||
static void reset_context(void *info)
|
||||
{
|
||||
unsigned int asid;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
|
||||
smp_rmb();
|
||||
asid = cpu_last_asid + cpu;
|
||||
|
||||
flush_context();
|
||||
set_mm_context(mm, asid);
|
||||
|
||||
/* set the new ASID */
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
|
||||
{
|
||||
mm->context.id = asid;
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void __new_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int asid;
|
||||
unsigned int bits = asid_bits();
|
||||
|
||||
raw_spin_lock(&cpu_asid_lock);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Check the ASID again, in case the change was broadcast from another
|
||||
* CPU before we acquired the lock.
|
||||
*/
|
||||
if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
raw_spin_unlock(&cpu_asid_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* At this point, it is guaranteed that the current mm (with an old
|
||||
* ASID) isn't active on any other CPU since the ASIDs are changed
|
||||
* simultaneously via IPI.
|
||||
*/
|
||||
asid = ++cpu_last_asid;
|
||||
|
||||
/*
|
||||
* If we've used up all our ASIDs, we need to start a new version and
|
||||
* flush the TLB.
|
||||
*/
|
||||
if (unlikely((asid & ((1 << bits) - 1)) == 0)) {
|
||||
/* increment the ASID version */
|
||||
cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits);
|
||||
if (cpu_last_asid == 0)
|
||||
cpu_last_asid = ASID_FIRST_VERSION;
|
||||
asid = cpu_last_asid + smp_processor_id();
|
||||
flush_context();
|
||||
#ifdef CONFIG_SMP
|
||||
smp_wmb();
|
||||
smp_call_function(reset_context, NULL, 1);
|
||||
#endif
|
||||
cpu_last_asid += NR_CPUS - 1;
|
||||
}
|
||||
|
||||
set_mm_context(mm, asid);
|
||||
raw_spin_unlock(&cpu_asid_lock);
|
||||
}
|
36
arch/arm64/mm/copypage.c
Normal file
36
arch/arm64/mm/copypage.c
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/copypage.c
|
||||
*
|
||||
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
|
||||
{
|
||||
copy_page(kto, kfrom);
|
||||
__flush_dcache_area(kto, PAGE_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
|
||||
|
||||
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
|
||||
{
|
||||
clear_page(kaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
|
487
arch/arm64/mm/dma-mapping.c
Normal file
487
arch/arm64/mm/dma-mapping.c
Normal file
|
@ -0,0 +1,487 @@
|
|||
/*
|
||||
* SWIOTLB-based DMA API implementation
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||
bool coherent)
|
||||
{
|
||||
if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
|
||||
return pgprot_writecombine(prot);
|
||||
return prot;
|
||||
}
|
||||
|
||||
static struct gen_pool *atomic_pool;
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
atomic_pool_size = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
unsigned long val;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (!atomic_pool) {
|
||||
WARN(1, "coherent pool not initialised!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(atomic_pool, size);
|
||||
if (val) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
|
||||
|
||||
*ret_page = phys_to_page(phys);
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool __in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
static int __free_from_pool(void *start, size_t size)
|
||||
{
|
||||
if (!__in_atomic_pool(start, size))
|
||||
return 0;
|
||||
|
||||
gen_pool_free(atomic_pool, (unsigned long)start, size);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void *__dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (dev == NULL) {
|
||||
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
|
||||
flags |= GFP_DMA;
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
addr = page_address(page);
|
||||
memset(addr, 0, size);
|
||||
return addr;
|
||||
} else {
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void __dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
bool freed;
|
||||
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
|
||||
|
||||
if (dev == NULL) {
|
||||
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
|
||||
return;
|
||||
}
|
||||
|
||||
freed = dma_release_from_contiguous(dev,
|
||||
phys_to_page(paddr),
|
||||
size >> PAGE_SHIFT);
|
||||
if (!freed)
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct page *page;
|
||||
void *ptr, *coherent_ptr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!(flags & __GFP_WAIT)) {
|
||||
struct page *page = NULL;
|
||||
void *addr = __alloc_from_pool(size, &page, flags);
|
||||
|
||||
if (addr)
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
|
||||
return addr;
|
||||
|
||||
}
|
||||
|
||||
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
|
||||
if (!ptr)
|
||||
goto no_mem;
|
||||
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
__dma_flush_range(ptr, ptr + size);
|
||||
|
||||
/* create a coherent mapping */
|
||||
page = virt_to_page(ptr);
|
||||
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
__get_dma_pgprot(attrs,
|
||||
__pgprot(PROT_NORMAL_NC), false),
|
||||
NULL);
|
||||
if (!coherent_ptr)
|
||||
goto no_map;
|
||||
|
||||
return coherent_ptr;
|
||||
|
||||
no_map:
|
||||
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
|
||||
no_mem:
|
||||
*dma_handle = DMA_ERROR_CODE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __dma_free_noncoherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
|
||||
|
||||
if (__free_from_pool(vaddr, size))
|
||||
return;
|
||||
vunmap(vaddr);
|
||||
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
dma_addr_t dev_addr;
|
||||
|
||||
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
|
||||
return dev_addr;
|
||||
}
|
||||
|
||||
|
||||
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i, ret;
|
||||
|
||||
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
|
||||
for_each_sg(sgl, sg, ret, i)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __swiotlb_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
|
||||
}
|
||||
|
||||
static void __swiotlb_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
|
||||
}
|
||||
|
||||
static void __swiotlb_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
}
|
||||
|
||||
static void __swiotlb_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
|
||||
}
|
||||
|
||||
static void __swiotlb_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
}
|
||||
|
||||
/* vma->vm_page_prot must be set appropriately before calling this function */
|
||||
static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
|
||||
PAGE_SHIFT;
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __swiotlb_mmap_noncoherent(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
|
||||
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||
}
|
||||
|
||||
static int __swiotlb_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
/* Just use whatever page_prot attributes were specified */
|
||||
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||
}
|
||||
|
||||
struct dma_map_ops noncoherent_swiotlb_dma_ops = {
|
||||
.alloc = __dma_alloc_noncoherent,
|
||||
.free = __dma_free_noncoherent,
|
||||
.mmap = __swiotlb_mmap_noncoherent,
|
||||
.map_page = __swiotlb_map_page,
|
||||
.unmap_page = __swiotlb_unmap_page,
|
||||
.map_sg = __swiotlb_map_sg_attrs,
|
||||
.unmap_sg = __swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = __swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
|
||||
|
||||
struct dma_map_ops coherent_swiotlb_dma_ops = {
|
||||
.alloc = __dma_alloc_coherent,
|
||||
.free = __dma_free_coherent,
|
||||
.mmap = __swiotlb_mmap_coherent,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
|
||||
|
||||
static void *arm_exynos_dma_mcode_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
|
||||
static void arm_exynos_dma_mcode_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs);
|
||||
|
||||
struct dma_map_ops arm_exynos_dma_mcode_ops = {
|
||||
.alloc = arm_exynos_dma_mcode_alloc,
|
||||
.free = arm_exynos_dma_mcode_free,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_exynos_dma_mcode_ops);
|
||||
|
||||
static void *arm_exynos_dma_mcode_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
if (!*handle)
|
||||
return NULL;
|
||||
|
||||
addr = ioremap(*handle, size);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void arm_exynos_dma_mcode_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs)
|
||||
{
|
||||
iounmap(cpu_addr);
|
||||
}
|
||||
|
||||
extern int swiotlb_late_init_with_default_size(size_t default_size);
|
||||
|
||||
static int __init atomic_pool_init(void)
|
||||
{
|
||||
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
|
||||
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
unsigned int pool_size_order = get_order(atomic_pool_size);
|
||||
|
||||
if (dev_get_cma_area(NULL))
|
||||
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
||||
pool_size_order);
|
||||
else
|
||||
page = alloc_pages(GFP_DMA, pool_size_order);
|
||||
|
||||
if (page) {
|
||||
int ret;
|
||||
void *page_addr = page_address(page);
|
||||
|
||||
memset(page_addr, 0, atomic_pool_size);
|
||||
__dma_flush_range(page_addr, page_addr + atomic_pool_size);
|
||||
|
||||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
VM_USERMAP, prot, atomic_pool_init);
|
||||
|
||||
if (!addr)
|
||||
goto destroy_genpool;
|
||||
|
||||
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
|
||||
page_to_phys(page),
|
||||
atomic_pool_size, -1);
|
||||
if (ret)
|
||||
goto remove_mapping;
|
||||
|
||||
gen_pool_set_algo(atomic_pool,
|
||||
gen_pool_first_fit_order_align,
|
||||
(void *)PAGE_SHIFT);
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
||||
atomic_pool_size / 1024);
|
||||
return 0;
|
||||
}
|
||||
goto out;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
free_page:
|
||||
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
||||
__free_pages(page, pool_size_order);
|
||||
out:
|
||||
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
||||
atomic_pool_size / 1024);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int __init swiotlb_late_init(void)
|
||||
{
|
||||
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
|
||||
|
||||
dma_ops = &noncoherent_swiotlb_dma_ops;
|
||||
|
||||
return swiotlb_late_init_with_default_size(swiotlb_size);
|
||||
}
|
||||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret |= swiotlb_late_init();
|
||||
ret |= atomic_pool_init();
|
||||
|
||||
return ret;
|
||||
}
|
||||
arch_initcall(arm64_dma_init);
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
||||
|
||||
static int __init dma_debug_do_init(void)
|
||||
{
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(dma_debug_do_init);
|
332
arch/arm64/mm/dump.c
Normal file
332
arch/arm64/mm/dump.c
Normal file
|
@ -0,0 +1,332 @@
|
|||
/*
|
||||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||
* Debug helper to dump the current kernel pagetables of the system
|
||||
* so that we can see what the various memory ranges are set to.
|
||||
*
|
||||
* Derived from x86 and arm implementation:
|
||||
* (C) Copyright 2008 Intel Corporation
|
||||
*
|
||||
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
|
||||
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
enum address_markers_idx {
|
||||
VMALLOC_START_NR = 0,
|
||||
VMALLOC_END_NR,
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
VMEMMAP_START_NR,
|
||||
VMEMMAP_END_NR,
|
||||
#endif
|
||||
PCI_START_NR,
|
||||
PCI_END_NR,
|
||||
FIXADDR_START_NR,
|
||||
FIXADDR_END_NR,
|
||||
MODULES_START_NR,
|
||||
MODUELS_END_NR,
|
||||
KERNEL_SPACE_NR,
|
||||
};
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
{ VMALLOC_START, "vmalloc() Area" },
|
||||
{ VMALLOC_END, "vmalloc() End" },
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
{ 0, "vmemmap start" },
|
||||
{ 0, "vmemmap end" },
|
||||
#endif
|
||||
{ (unsigned long) PCI_IOBASE, "PCI I/O start" },
|
||||
{ (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
|
||||
{ FIXADDR_START, "Fixmap start" },
|
||||
{ FIXADDR_TOP, "Fixmap end" },
|
||||
{ MODULES_VADDR, "Modules start" },
|
||||
{ MODULES_END, "Modules end" },
|
||||
{ PAGE_OFFSET, "Kernel Mapping" },
|
||||
{ -1, NULL },
|
||||
};
|
||||
|
||||
struct pg_state {
|
||||
struct seq_file *seq;
|
||||
const struct addr_marker *marker;
|
||||
unsigned long start_address;
|
||||
unsigned level;
|
||||
u64 current_prot;
|
||||
};
|
||||
|
||||
struct prot_bits {
|
||||
u64 mask;
|
||||
u64 val;
|
||||
const char *set;
|
||||
const char *clear;
|
||||
};
|
||||
|
||||
static const struct prot_bits pte_bits[] = {
|
||||
{
|
||||
.mask = PTE_USER,
|
||||
.val = PTE_USER,
|
||||
.set = "USR",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = PTE_RDONLY,
|
||||
.val = PTE_RDONLY,
|
||||
.set = "ro",
|
||||
.clear = "RW",
|
||||
}, {
|
||||
.mask = PTE_PXN,
|
||||
.val = PTE_PXN,
|
||||
.set = "NX",
|
||||
.clear = "x ",
|
||||
}, {
|
||||
.mask = PTE_SHARED,
|
||||
.val = PTE_SHARED,
|
||||
.set = "SHD",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = PTE_AF,
|
||||
.val = PTE_AF,
|
||||
.set = "AF",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = PTE_NG,
|
||||
.val = PTE_NG,
|
||||
.set = "NG",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = PTE_UXN,
|
||||
.val = PTE_UXN,
|
||||
.set = "UXN",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
|
||||
.set = "DEVICE/nGnRnE",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
|
||||
.set = "DEVICE/nGnRE",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_DEVICE_GRE),
|
||||
.set = "DEVICE/GRE",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_NORMAL_NC),
|
||||
.set = "MEM/NORMAL-NC",
|
||||
}, {
|
||||
.mask = PTE_ATTRINDX_MASK,
|
||||
.val = PTE_ATTRINDX(MT_NORMAL),
|
||||
.set = "MEM/NORMAL",
|
||||
}
|
||||
};
|
||||
|
||||
struct pg_level {
|
||||
const struct prot_bits *bits;
|
||||
size_t num;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
static struct pg_level pg_level[] = {
|
||||
{
|
||||
}, { /* pgd */
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pud */
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pmd */
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
}, { /* pte */
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
},
|
||||
};
|
||||
|
||||
static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
|
||||
size_t num)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num; i++, bits++) {
|
||||
const char *s;
|
||||
|
||||
if ((st->current_prot & bits->mask) == bits->val)
|
||||
s = bits->set;
|
||||
else
|
||||
s = bits->clear;
|
||||
|
||||
if (s)
|
||||
seq_printf(st->seq, " %s", s);
|
||||
}
|
||||
}
|
||||
|
||||
static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
||||
u64 val)
|
||||
{
|
||||
static const char units[] = "KMGTPE";
|
||||
u64 prot = val & pg_level[level].mask;
|
||||
|
||||
if (addr < LOWEST_ADDR)
|
||||
return;
|
||||
|
||||
if (!st->level) {
|
||||
st->level = level;
|
||||
st->current_prot = prot;
|
||||
st->start_address = addr;
|
||||
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
} else if (prot != st->current_prot || level != st->level ||
|
||||
addr >= st->marker[1].start_address) {
|
||||
const char *unit = units;
|
||||
unsigned long delta;
|
||||
|
||||
if (st->current_prot) {
|
||||
seq_printf(st->seq, "0x%16lx-0x%16lx ",
|
||||
st->start_address, addr);
|
||||
|
||||
delta = (addr - st->start_address) >> 10;
|
||||
while (!(delta & 1023) && unit[1]) {
|
||||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
seq_printf(st->seq, "%9lu%c", delta, *unit);
|
||||
if (pg_level[st->level].bits)
|
||||
dump_prot(st, pg_level[st->level].bits,
|
||||
pg_level[st->level].num);
|
||||
seq_puts(st->seq, "\n");
|
||||
}
|
||||
|
||||
if (addr >= st->marker[1].start_address) {
|
||||
st->marker++;
|
||||
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
}
|
||||
|
||||
st->start_address = addr;
|
||||
st->current_prot = prot;
|
||||
st->level = level;
|
||||
}
|
||||
|
||||
if (addr >= st->marker[1].start_address) {
|
||||
st->marker++;
|
||||
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
||||
addr = start + i * PAGE_SIZE;
|
||||
note_page(st, addr, 4, pte_val(*pte));
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
|
||||
addr = start + i * PMD_SIZE;
|
||||
if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd))
|
||||
note_page(st, addr, 3, pmd_val(*pmd));
|
||||
else
|
||||
walk_pte(st, pmd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
||||
addr = start + i * PUD_SIZE;
|
||||
if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud))
|
||||
note_page(st, addr, 2, pud_val(*pud));
|
||||
else
|
||||
walk_pmd(st, pud, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, 0);
|
||||
unsigned i;
|
||||
unsigned long addr;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
addr = start + i * PGDIR_SIZE;
|
||||
if (pgd_none(*pgd) || pgd_bad(*pgd))
|
||||
note_page(st, addr, 1, pgd_val(*pgd));
|
||||
else
|
||||
walk_pud(st, pgd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct pg_state st = {
|
||||
.seq = m,
|
||||
.marker = address_markers,
|
||||
};
|
||||
|
||||
walk_pgd(&st, &init_mm, LOWEST_ADDR);
|
||||
|
||||
note_page(&st, 0, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ptdump_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int ptdump_init(void)
|
||||
{
|
||||
struct dentry *pe;
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
||||
if (pg_level[i].bits)
|
||||
for (j = 0; j < pg_level[i].num; j++)
|
||||
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
||||
|
||||
address_markers[VMEMMAP_START_NR].start_address =
|
||||
(unsigned long)virt_to_page(PAGE_OFFSET);
|
||||
address_markers[VMEMMAP_END_NR].start_address =
|
||||
(unsigned long)virt_to_page(high_memory);
|
||||
|
||||
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
return pe ? 0 : -ENOMEM;
|
||||
}
|
||||
device_initcall(ptdump_init);
|
17
arch/arm64/mm/extable.c
Normal file
17
arch/arm64/mm/extable.c
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/extable.c
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(instruction_pointer(regs));
|
||||
if (fixup)
|
||||
regs->pc = fixup->fixup;
|
||||
|
||||
return fixup != NULL;
|
||||
}
|
533
arch/arm64/mm/fault.c
Normal file
533
arch/arm64/mm/fault.c
Normal file
|
@ -0,0 +1,533 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/fault.c
|
||||
*
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
* Copyright (C) 1995-2004 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static const char *fault_name(unsigned int esr);
|
||||
|
||||
/*
|
||||
* Dump out the page tables associated with 'addr' in mm 'mm'.
|
||||
*/
|
||||
void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
||||
if (!mm)
|
||||
mm = &init_mm;
|
||||
|
||||
pr_alert("pgd = %p\n", mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (pgd_none(*pgd) || pgd_bad(*pgd))
|
||||
break;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
printk(", *pud=%016llx", pud_val(*pud));
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
break;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
printk(", *pmd=%016llx", pmd_val(*pmd));
|
||||
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
printk(", *pte=%016llx", pte_val(*pte));
|
||||
pte_unmap(pte);
|
||||
} while(0);
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The kernel tried to access some page that wasn't present.
|
||||
*/
|
||||
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Are we prepared to handle this kernel fault?
|
||||
*/
|
||||
if (fixup_exception(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* No handler, we'll have to terminate things with extreme prejudice.
|
||||
*/
|
||||
bust_spinlocks(1);
|
||||
pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
|
||||
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
||||
"paging request", addr);
|
||||
|
||||
show_pte(mm, addr);
|
||||
die("Oops", regs, esr);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map. User mode
|
||||
* accesses just cause a SIGSEGV
|
||||
*/
|
||||
static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
|
||||
unsigned int esr, unsigned int sig, int code,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo si;
|
||||
|
||||
if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
|
||||
printk_ratelimit()) {
|
||||
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
|
||||
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
|
||||
addr, esr);
|
||||
show_pte(tsk->mm, addr);
|
||||
show_regs(regs);
|
||||
}
|
||||
|
||||
tsk->thread.fault_address = addr;
|
||||
tsk->thread.fault_code = esr;
|
||||
si.si_signo = sig;
|
||||
si.si_errno = 0;
|
||||
si.si_code = code;
|
||||
si.si_addr = (void __user *)addr;
|
||||
force_sig_info(sig, &si, tsk);
|
||||
}
|
||||
|
||||
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->active_mm;
|
||||
|
||||
/*
|
||||
* If we are in kernel mode at this point, we have no context to
|
||||
* handle this fault with.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
__do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
|
||||
else
|
||||
__do_kernel_fault(mm, addr, esr, regs);
|
||||
}
|
||||
|
||||
#define VM_FAULT_BADMAP 0x010000
|
||||
#define VM_FAULT_BADACCESS 0x020000
|
||||
|
||||
#define ESR_LNX_EXEC (1 << 24)
|
||||
|
||||
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int mm_flags, unsigned long vm_flags,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int fault;
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
fault = VM_FAULT_BADMAP;
|
||||
if (unlikely(!vma))
|
||||
goto out;
|
||||
if (unlikely(vma->vm_start > addr))
|
||||
goto check_stack;
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so we can handle
|
||||
* it.
|
||||
*/
|
||||
good_area:
|
||||
/*
|
||||
* Check that the permissions on the VMA allow for the fault which
|
||||
* occurred. If we encountered a write or exec fault, we must have
|
||||
* appropriate permissions, otherwise we allow any permission.
|
||||
*/
|
||||
if (!(vma->vm_flags & vm_flags)) {
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
|
||||
|
||||
check_stack:
|
||||
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
||||
goto good_area;
|
||||
out:
|
||||
return fault;
|
||||
}
|
||||
|
||||
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
int fault, sig, code;
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
|
||||
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (interrupts_enabled(regs))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user context, we must not take
|
||||
* the fault.
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
mm_flags |= FAULT_FLAG_USER;
|
||||
|
||||
if (esr & ESR_LNX_EXEC) {
|
||||
vm_flags = VM_EXEC;
|
||||
} else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
|
||||
vm_flags = VM_WRITE;
|
||||
mm_flags |= FAULT_FLAG_WRITE;
|
||||
}
|
||||
|
||||
/*
|
||||
* As per x86, we may deadlock here. However, since the kernel only
|
||||
* validly references user space from well defined areas of the code,
|
||||
* we can bug out early if this is from code which shouldn't.
|
||||
*/
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
if (!user_mode(regs) && !search_exception_tables(regs->pc))
|
||||
goto no_context;
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
} else {
|
||||
/*
|
||||
* The above down_read_trylock() might have succeeded in which
|
||||
* case, we'll have missed the might_sleep() from down_read().
|
||||
*/
|
||||
might_sleep();
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
if (!user_mode(regs) && !search_exception_tables(regs->pc))
|
||||
goto no_context;
|
||||
#endif
|
||||
}
|
||||
|
||||
fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
|
||||
|
||||
/*
|
||||
* If we need to retry but a fatal signal is pending, handle the
|
||||
* signal first. We do not need to release the mmap_sem because it
|
||||
* would already be released in __lock_page_or_retry in mm/filemap.c.
|
||||
*/
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting is only done on the initial
|
||||
* attempt. If we go through a retry, it is extremely likely that the
|
||||
* page will be found in page cache at that point.
|
||||
*/
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||
if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
|
||||
addr);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
|
||||
addr);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
/*
|
||||
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
|
||||
* starvation.
|
||||
*/
|
||||
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
mm_flags |= FAULT_FLAG_TRIED;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/*
|
||||
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|
||||
*/
|
||||
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
|
||||
VM_FAULT_BADACCESS))))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we are in kernel mode at this point, we have no context to
|
||||
* handle this fault with.
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
/*
|
||||
* We ran out of memory, call the OOM killer, and return to
|
||||
* userspace (which will retry the fault, or kill us if we got
|
||||
* oom-killed).
|
||||
*/
|
||||
pagefault_out_of_memory();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (fault & VM_FAULT_SIGBUS) {
|
||||
/*
|
||||
* We had some memory, but were unable to successfully fix up
|
||||
* this page fault.
|
||||
*/
|
||||
sig = SIGBUS;
|
||||
code = BUS_ADRERR;
|
||||
} else {
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory
|
||||
* map.
|
||||
*/
|
||||
sig = SIGSEGV;
|
||||
code = fault == VM_FAULT_BADACCESS ?
|
||||
SEGV_ACCERR : SEGV_MAPERR;
|
||||
}
|
||||
|
||||
__do_user_fault(tsk, addr, esr, sig, code, regs);
|
||||
return 0;
|
||||
|
||||
no_context:
|
||||
__do_kernel_fault(mm, addr, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* First Level Translation Fault Handler
|
||||
*
|
||||
* We enter here because the first level page table doesn't contain a valid
|
||||
* entry for the address.
|
||||
*
|
||||
* If the address is in kernel space (>= TASK_SIZE), then we are probably
|
||||
* faulting in the vmalloc() area.
|
||||
*
|
||||
* If the init_task's first level page tables contains the relevant entry, we
|
||||
* copy the it to this task. If not, we send the process a signal, fixup the
|
||||
* exception, or oops the kernel.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
|
||||
* or a critical region, and should only copy the information from the master
|
||||
* page table, nothing more.
|
||||
*/
|
||||
static int __kprobes do_translation_fault(unsigned long addr,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (addr < TASK_SIZE)
|
||||
return do_page_fault(addr, esr, regs);
|
||||
|
||||
do_bad_area(addr, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This abort handler always returns "fault".
|
||||
*/
|
||||
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct fault_info {
|
||||
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
int sig;
|
||||
int code;
|
||||
const char *name;
|
||||
} fault_info[] = {
|
||||
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 3 address size fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
|
||||
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
|
||||
{ do_bad, SIGBUS, 0, "reserved permission fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort" },
|
||||
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 18" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 19" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error" },
|
||||
{ do_bad, SIGBUS, 0, "asynchronous parity error" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 26" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 27" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 32" },
|
||||
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
|
||||
{ do_bad, SIGBUS, 0, "debug event" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 35" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 36" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 37" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 38" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 39" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 40" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 41" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 42" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 43" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 44" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 45" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 46" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 47" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 48" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 49" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 50" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 51" },
|
||||
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 53" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 54" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 55" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 56" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 57" },
|
||||
{ do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 59" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 60" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 61" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 62" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 63" },
|
||||
};
|
||||
|
||||
static const char *fault_name(unsigned int esr)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
return inf->name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a data abort to the relevant handler.
|
||||
*/
|
||||
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
struct siginfo info;
|
||||
|
||||
if (!inf->fn(addr, esr, regs))
|
||||
return;
|
||||
|
||||
pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
|
||||
inf->name, esr, addr);
|
||||
|
||||
info.si_signo = inf->sig;
|
||||
info.si_errno = 0;
|
||||
info.si_code = inf->code;
|
||||
info.si_addr = (void __user *)addr;
|
||||
arm64_notify_die("", regs, &info, esr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle stack alignment exceptions.
|
||||
*/
|
||||
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo info;
|
||||
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRALN;
|
||||
info.si_addr = (void __user *)addr;
|
||||
arm64_notify_die("", regs, &info, esr);
|
||||
}
|
||||
|
||||
static struct fault_info debug_fault_info[] = {
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 3" },
|
||||
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
|
||||
{ do_bad, SIGTRAP, 0, "aarch32 vector catch" },
|
||||
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 7" },
|
||||
};
|
||||
|
||||
void __init hook_debug_fault_code(int nr,
|
||||
int (*fn)(unsigned long, unsigned int, struct pt_regs *),
|
||||
int sig, int code, const char *name)
|
||||
{
|
||||
BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
|
||||
|
||||
debug_fault_info[nr].fn = fn;
|
||||
debug_fault_info[nr].sig = sig;
|
||||
debug_fault_info[nr].code = code;
|
||||
debug_fault_info[nr].name = name;
|
||||
}
|
||||
|
||||
asmlinkage int __exception do_debug_exception(unsigned long addr,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
|
||||
struct siginfo info;
|
||||
|
||||
if (!inf->fn(addr, esr, regs))
|
||||
return 1;
|
||||
|
||||
pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
|
||||
inf->name, esr, addr);
|
||||
|
||||
info.si_signo = inf->sig;
|
||||
info.si_errno = 0;
|
||||
info.si_code = inf->code;
|
||||
info.si_addr = (void __user *)addr;
|
||||
arm64_notify_die("", regs, &info, 0);
|
||||
|
||||
return 0;
|
||||
}
|
122
arch/arm64/mm/flush.c
Normal file
122
arch/arm64/mm/flush.c
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/flush.c
|
||||
*
|
||||
* Copyright (C) 1995-2002 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__flush_icache_all();
|
||||
}
|
||||
|
||||
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long uaddr, void *kaddr,
|
||||
unsigned long len)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
unsigned long addr = (unsigned long)kaddr;
|
||||
if (icache_is_aliasing()) {
|
||||
__flush_dcache_area(kaddr, len);
|
||||
__flush_icache_all();
|
||||
} else {
|
||||
flush_icache_range(addr, addr + len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy user data from/to a page which is mapped into a different processes
|
||||
* address space. Really, we want to allow our "user space" model to handle
|
||||
* this.
|
||||
*
|
||||
* Note that this code needs to run on the current CPU.
|
||||
*/
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long uaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
#endif
|
||||
memcpy(dst, src, len);
|
||||
flush_ptrace_access(vma, page, uaddr, dst, len);
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
void __sync_icache_dcache(pte_t pte, unsigned long addr)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
|
||||
/* no flushing needed for anonymous pages */
|
||||
if (!page_mapping(page))
|
||||
return;
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
|
||||
__flush_dcache_area(page_address(page),
|
||||
PAGE_SIZE << compound_order(page));
|
||||
__flush_icache_all();
|
||||
} else if (icache_is_aivivt()) {
|
||||
__flush_icache_all();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called when a page has been modified by the kernel. Mark
|
||||
* it as dirty for later flushing when mapped in user space (if executable,
|
||||
* see __sync_icache_dcache).
|
||||
*/
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
if (test_bit(PG_dcache_clean, &page->flags))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*
|
||||
* Additional functions defined in assembly.
|
||||
*/
|
||||
EXPORT_SYMBOL(flush_cache_all);
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = pmd_mksplitting(*pmdp);
|
||||
|
||||
VM_BUG_ON(address & ~PMD_MASK);
|
||||
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
|
||||
|
||||
/* dummy IPI to serialise against fast_gup */
|
||||
kick_all_cpus_sync();
|
||||
}
|
||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
68
arch/arm64/mm/hugetlbpage.c
Normal file
68
arch/arm64/mm/hugetlbpage.c
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* arch/arm64/mm/hugetlbpage.c
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Ltd.
|
||||
*
|
||||
* Based on arch/x86/mm/hugetlbpage.c.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <asm/mman.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
return !(pud_val(pud) & PUD_TABLE_BIT);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __init int setup_hugepagesz(char *opt)
|
||||
{
|
||||
unsigned long ps = memparse(opt, &opt);
|
||||
if (ps == PMD_SIZE) {
|
||||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
357
arch/arm64/mm/init.c
Normal file
357
arch/arm64/mm/init.c
Normal file
|
@ -0,0 +1,357 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/init.c
|
||||
*
|
||||
* Copyright (C) 1995-2005 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/efi.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
phys_addr_t memstart_addr __read_mostly = 0;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
static int __init early_initrd(char *p)
|
||||
{
|
||||
unsigned long start, size;
|
||||
char *endp;
|
||||
|
||||
start = memparse(p, &endp);
|
||||
if (*endp == ',') {
|
||||
size = memparse(endp + 1, NULL);
|
||||
|
||||
initrd_start = (unsigned long)__va(start);
|
||||
initrd_end = (unsigned long)__va(start + size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_param("initrd", early_initrd);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
|
||||
* currently assumes that for memory starting above 4G, 32-bit devices will
|
||||
* use a DMA offset.
|
||||
*/
|
||||
static phys_addr_t max_zone_dma_phys(void)
|
||||
{
|
||||
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
|
||||
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
|
||||
unsigned long max_dma = min;
|
||||
|
||||
memset(zone_size, 0, sizeof(zone_size));
|
||||
|
||||
/* 4GB maximum for 32-bit only capable devices */
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
|
||||
max_dma = PFN_DOWN(max_zone_dma_phys());
|
||||
zone_size[ZONE_DMA] = max_dma - min;
|
||||
}
|
||||
zone_size[ZONE_NORMAL] = max - max_dma;
|
||||
|
||||
memcpy(zhole_size, zone_size, sizeof(zhole_size));
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start = memblock_region_memory_base_pfn(reg);
|
||||
unsigned long end = memblock_region_memory_end_pfn(reg);
|
||||
|
||||
if (start >= max)
|
||||
continue;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
|
||||
unsigned long dma_end = min(end, max_dma);
|
||||
zhole_size[ZONE_DMA] -= dma_end - start;
|
||||
}
|
||||
|
||||
if (end > max_dma) {
|
||||
unsigned long normal_end = min(end, max);
|
||||
unsigned long normal_start = max(start, max_dma);
|
||||
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
|
||||
}
|
||||
}
|
||||
|
||||
free_area_init_node(0, zone_size, min, zhole_size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
|
||||
#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
|
||||
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(pfn_valid);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM
|
||||
static void arm64_memory_present(void)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static void arm64_memory_present(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
|
||||
for_each_memblock(memory, reg)
|
||||
memory_present(0, memblock_region_memory_base_pfn(reg),
|
||||
memblock_region_memory_end_pfn(reg));
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init arm64_memblock_init(void)
|
||||
{
|
||||
phys_addr_t dma_phys_limit = 0;
|
||||
|
||||
/*
|
||||
* Register the kernel text, kernel data, initrd, and initial
|
||||
* pagetables with memblock.
|
||||
*/
|
||||
memblock_reserve(__pa(_text), _end - _text);
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start)
|
||||
memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
|
||||
#endif
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
/* 4GB maximum for 32-bit only capable devices */
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
dma_phys_limit = max_zone_dma_phys();
|
||||
dma_contiguous_reserve(dma_phys_limit);
|
||||
|
||||
memblock_allow_resize();
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
void __init bootmem_init(void)
|
||||
{
|
||||
unsigned long min, max;
|
||||
|
||||
min = PFN_UP(memblock_start_of_DRAM());
|
||||
max = PFN_DOWN(memblock_end_of_DRAM());
|
||||
|
||||
/*
|
||||
* Sparsemem tries to allocate bootmem in memory_present(), so must be
|
||||
* done after the fixed reservations.
|
||||
*/
|
||||
arm64_memory_present();
|
||||
|
||||
sparse_init();
|
||||
zone_sizes_init(min, max);
|
||||
|
||||
high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
|
||||
max_pfn = max_low_pfn = max;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
struct page *start_pg, *end_pg;
|
||||
unsigned long pg, pgend;
|
||||
|
||||
/*
|
||||
* Convert start_pfn/end_pfn to a struct page pointer.
|
||||
*/
|
||||
start_pg = pfn_to_page(start_pfn - 1) + 1;
|
||||
end_pg = pfn_to_page(end_pfn - 1) + 1;
|
||||
|
||||
/*
|
||||
* Convert to physical addresses, and round start upwards and end
|
||||
* downwards.
|
||||
*/
|
||||
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
|
||||
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
|
||||
|
||||
/*
|
||||
* If there are free pages between these, free the section of the
|
||||
* memmap array.
|
||||
*/
|
||||
if (pg < pgend)
|
||||
free_bootmem(pg, pgend - pg);
|
||||
}
|
||||
|
||||
/*
|
||||
* The mem_map array can get very big. Free the unused area of the memory map.
|
||||
*/
|
||||
static void __init free_unused_memmap(void)
|
||||
{
|
||||
unsigned long start, prev_end = 0;
|
||||
struct memblock_region *reg;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
start = __phys_to_pfn(reg->base);
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
/*
|
||||
* Take care not to free memmap entries that don't exist due
|
||||
* to SPARSEMEM sections which aren't present.
|
||||
*/
|
||||
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
|
||||
#endif
|
||||
/*
|
||||
* If we had a previous bank, and there is a space between the
|
||||
* current bank and the previous, free it.
|
||||
*/
|
||||
if (prev_end && prev_end < start)
|
||||
free_memmap(prev_end, start);
|
||||
|
||||
/*
|
||||
* Align up here since the VM subsystem insists that the
|
||||
* memmap entries are valid from the bank end aligned to
|
||||
* MAX_ORDER_NR_PAGES.
|
||||
*/
|
||||
prev_end = ALIGN(start + __phys_to_pfn(reg->size),
|
||||
MAX_ORDER_NR_PAGES);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
|
||||
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
/*
|
||||
* mem_init() marks the free areas in the mem_map and tells us how much memory
|
||||
* is free. This is done after various parts of the system have claimed their
|
||||
* memory after the kernel image.
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||
free_unused_memmap();
|
||||
#endif
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
#define MLK(b, t) b, t, ((t) - (b)) >> 10
|
||||
#define MLM(b, t) b, t, ((t) - (b)) >> 20
|
||||
#define MLG(b, t) b, t, ((t) - (b)) >> 30
|
||||
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
|
||||
|
||||
pr_notice("Virtual kernel memory layout:\n"
|
||||
" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
|
||||
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
|
||||
#endif
|
||||
" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
|
||||
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
|
||||
" modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
|
||||
" memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
|
||||
" .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
|
||||
" .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
|
||||
" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLG(VMALLOC_START, VMALLOC_END),
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
MLG((unsigned long)vmemmap,
|
||||
(unsigned long)vmemmap + VMEMMAP_SIZE),
|
||||
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
|
||||
(unsigned long)virt_to_page(high_memory)),
|
||||
#endif
|
||||
MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
|
||||
MLK(FIXADDR_START, FIXADDR_TOP),
|
||||
MLM(MODULES_VADDR, MODULES_END),
|
||||
MLM(PAGE_OFFSET, (unsigned long)high_memory),
|
||||
MLK_ROUNDUP(__init_begin, __init_end),
|
||||
MLK_ROUNDUP(_text, _etext),
|
||||
MLK_ROUNDUP(_sdata, _edata));
|
||||
|
||||
#undef MLK
|
||||
#undef MLM
|
||||
#undef MLK_ROUNDUP
|
||||
|
||||
/*
|
||||
* Check boundaries twice: Some fundamental inconsistencies can be
|
||||
* detected at build time already.
|
||||
*/
|
||||
#ifdef CONFIG_COMPAT
|
||||
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
|
||||
#endif
|
||||
BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
|
||||
BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
|
||||
|
||||
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
||||
extern int sysctl_overcommit_memory;
|
||||
/*
|
||||
* On a machine this small we won't get anywhere without
|
||||
* overcommit, so turn it on by default.
|
||||
*/
|
||||
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
|
||||
}
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_alternatives_memory();
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif
|
||||
rkp_call(RKP_DEF_INIT, 0, 0, 0, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
|
||||
static int keep_initrd;
|
||||
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd)
|
||||
free_reserved_area((void *)start, (void *)end, 0, "initrd");
|
||||
}
|
||||
|
||||
static int __init keepinitrd_setup(char *__unused)
|
||||
{
|
||||
keep_initrd = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("keepinitrd", keepinitrd_setup);
|
||||
#endif
|
199
arch/arm64/mm/ioremap.c
Normal file
199
arch/arm64/mm/ioremap.c
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/ioremap.c
|
||||
*
|
||||
* (C) Copyright 1995 1996 Linus Torvalds
|
||||
* Hacked for ARM by Phil Blundell <philb@gnu.org>
|
||||
* Hacked to allow all architectures to build, and various cleanups
|
||||
* by Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
unsigned long last_addr;
|
||||
unsigned long offset = phys_addr & ~PAGE_MASK;
|
||||
int err;
|
||||
unsigned long addr;
|
||||
struct vm_struct *area;
|
||||
|
||||
/*
|
||||
* Page align the mapping address and size, taking account of any
|
||||
* offset.
|
||||
*/
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
|
||||
/*
|
||||
* Don't allow wraparound, zero size or outside PHYS_MASK.
|
||||
*/
|
||||
last_addr = phys_addr + size - 1;
|
||||
if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow RAM to be mapped.
|
||||
*/
|
||||
if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
|
||||
return NULL;
|
||||
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
|
||||
err = ioremap_page_range(addr, addr + size, phys_addr, prot);
|
||||
if (err) {
|
||||
vunmap((void *)addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(offset + addr);
|
||||
}
|
||||
|
||||
void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size, prot,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
void __iounmap(volatile void __iomem *io_addr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
|
||||
|
||||
/*
|
||||
* We could get an address outside vmalloc range in case
|
||||
* of ioremap_cache() reusing a RAM mapping.
|
||||
*/
|
||||
if (VMALLOC_START <= addr && addr < VMALLOC_END)
|
||||
vunmap((void *)addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
|
||||
{
|
||||
/* For normal memory we already have a cacheable mapping. */
|
||||
if (pfn_valid(__phys_to_pfn(phys_addr)))
|
||||
return (void __iomem *)__phys_to_virt(phys_addr);
|
||||
|
||||
return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
||||
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
|
||||
#endif
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
|
||||
#endif
|
||||
|
||||
static inline pud_t * __init early_ioremap_pud(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
||||
|
||||
return pud_offset(pgd, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
||||
{
|
||||
pud_t *pud = early_ioremap_pud(addr);
|
||||
|
||||
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
||||
|
||||
return pmd_offset(pud, addr);
|
||||
}
|
||||
|
||||
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
|
||||
{
|
||||
pmd_t *pmd = early_ioremap_pmd(addr);
|
||||
|
||||
BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
|
||||
|
||||
return pte_offset_kernel(pmd, addr);
|
||||
}
|
||||
|
||||
void __init early_ioremap_init(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pgd_populate(&init_mm, pgd, bm_pud);
|
||||
pud = pud_offset(pgd, addr);
|
||||
pud_populate(&init_mm, pud, bm_pmd);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||
|
||||
/*
|
||||
* The boot-ioremap range spans multiple pmds, for which
|
||||
* we are not prepared:
|
||||
*/
|
||||
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
||||
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
||||
|
||||
if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
WARN_ON(1);
|
||||
pr_warn("pmd %p != %p\n",
|
||||
pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_END));
|
||||
|
||||
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
||||
pr_warn("FIX_BTMAP_BEGIN: %d\n",
|
||||
FIX_BTMAP_BEGIN);
|
||||
}
|
||||
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
void __init __early_set_fixmap(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
unsigned long addr = __fix_to_virt(idx);
|
||||
pte_t *pte;
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
pte = early_ioremap_pte(addr);
|
||||
|
||||
if (pgprot_val(flags))
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
else {
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
||||
}
|
||||
}
|
34
arch/arm64/mm/mm.h
Normal file
34
arch/arm64/mm/mm.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
extern void __init bootmem_init(void);
|
||||
extern void __init arm64_swiotlb_init(void);
|
||||
|
||||
/* For exynos compatible */
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/*
|
||||
* ARM specific vm_struct->flags bits.
|
||||
*/
|
||||
|
||||
/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
|
||||
#define VM_ARM_SECTION_MAPPING 0x80000000
|
||||
|
||||
/* permanent static mappings from iotable_init() */
|
||||
#define VM_ARM_STATIC_MAPPING 0x40000000
|
||||
|
||||
/* empty mapping */
|
||||
#define VM_ARM_EMPTY_MAPPING 0x20000000
|
||||
|
||||
/* mapping type (attributes) for permanent static mappings */
|
||||
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
||||
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
||||
|
||||
/* consistent regions used by dma_alloc_attrs() */
|
||||
#define VM_ARM_DMA_CONSISTENT 0x20000000
|
||||
|
||||
|
||||
struct static_vm {
|
||||
struct vm_struct vm;
|
||||
struct list_head list;
|
||||
};
|
||||
extern struct list_head static_vmlist;
|
142
arch/arm64/mm/mmap.c
Normal file
142
arch/arm64/mm/mmap.c
Normal file
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/mmap.c
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/elf.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/shm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/*
|
||||
* Leave enough space between the mmap area and the stack to honour ulimit in
|
||||
* the face of randomisation.
|
||||
*/
|
||||
#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
|
||||
#define MAX_GAP (STACK_TOP/6*5)
|
||||
|
||||
static int mmap_is_legacy(void)
|
||||
{
|
||||
if (current->personality & ADDR_COMPAT_LAYOUT)
|
||||
return 1;
|
||||
|
||||
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
|
||||
return 1;
|
||||
|
||||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since get_random_int() returns the same value within a 1 jiffy window, we
|
||||
* will almost always get the same randomisation for the stack and mmap
|
||||
* region. This will mean the relative distance between stack and mmap will be
|
||||
* the same.
|
||||
*
|
||||
* To avoid this we can shift the randomness by 1 bit.
|
||||
*/
|
||||
static unsigned long mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd = 0;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
|
||||
|
||||
return rnd << (PAGE_SHIFT + 1);
|
||||
}
|
||||
|
||||
static unsigned long mmap_base(void)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
if (gap < MIN_GAP)
|
||||
gap = MIN_GAP;
|
||||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
|
||||
}
|
||||
|
||||
/*
|
||||
* This function, called very early during the creation of a new process VM
|
||||
* image, sets up which VM layout function to use:
|
||||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* Fall back to the standard layout if the personality bit is set, or
|
||||
* if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
|
||||
|
||||
|
||||
/*
|
||||
* You really shouldn't be using read() or write() on /dev/mem. This might go
|
||||
* away in the future.
|
||||
*/
|
||||
int valid_phys_addr_range(phys_addr_t addr, size_t size)
|
||||
{
|
||||
if (addr < PHYS_OFFSET)
|
||||
return 0;
|
||||
if (addr + size > __pa(high_memory - 1) + 1)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not allow /dev/mem mappings beyond the supported physical range.
|
||||
*/
|
||||
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
||||
{
|
||||
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STRICT_DEVMEM
|
||||
|
||||
#include <linux/ioport.h>
|
||||
|
||||
/*
|
||||
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
|
||||
* is valid. The argument is a physical page number. We mimic x86 here by
|
||||
* disallowing access to system RAM as well as device-exclusive MMIO regions.
|
||||
* This effectively disable read()/write() on /dev/mem.
|
||||
*/
|
||||
int devmem_is_allowed(unsigned long pfn)
|
||||
{
|
||||
if (iomem_is_exclusive(pfn << PAGE_SHIFT))
|
||||
return 0;
|
||||
if (!page_is_ram(pfn))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
729
arch/arm64/mm/mmu.c
Normal file
729
arch/arm64/mm/mmu.c
Normal file
|
@ -0,0 +1,729 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/mmu.c
|
||||
*
|
||||
* Copyright (C) 1995-2005 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/memblock.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/map.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#if defined(CONFIG_ECT)
|
||||
#include <soc/samsung/ect_parser.h>
|
||||
#endif
|
||||
|
||||
static int iotable_on;
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
extern int boot_mode_security;
|
||||
#endif
|
||||
/*
|
||||
* Empty_zero_page is a special page that is used for zero-initialized data
|
||||
* and COW.
|
||||
*/
|
||||
struct page *empty_zero_page;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
struct cachepolicy {
|
||||
const char policy[16];
|
||||
u64 mair;
|
||||
u64 tcr;
|
||||
};
|
||||
|
||||
static struct cachepolicy cache_policies[] __initdata = {
|
||||
{
|
||||
.policy = "uncached",
|
||||
.mair = 0x44, /* inner, outer non-cacheable */
|
||||
.tcr = TCR_IRGN_NC | TCR_ORGN_NC,
|
||||
}, {
|
||||
.policy = "writethrough",
|
||||
.mair = 0xaa, /* inner, outer write-through, read-allocate */
|
||||
.tcr = TCR_IRGN_WT | TCR_ORGN_WT,
|
||||
}, {
|
||||
.policy = "writeback",
|
||||
.mair = 0xee, /* inner, outer write-back, read-allocate */
|
||||
.tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* These are useful for identifying cache coherency problems by allowing the
|
||||
* cache or the cache and writebuffer to be turned off. It changes the Normal
|
||||
* memory caching attributes in the MAIR_EL1 register.
|
||||
*/
|
||||
static int __init early_cachepolicy(char *p)
|
||||
{
|
||||
int i;
|
||||
u64 tmp;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
|
||||
int len = strlen(cache_policies[i].policy);
|
||||
|
||||
if (memcmp(p, cache_policies[i].policy, len) == 0)
|
||||
break;
|
||||
}
|
||||
if (i == ARRAY_SIZE(cache_policies)) {
|
||||
pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
flush_cache_all();
|
||||
|
||||
/*
|
||||
* Modify MT_NORMAL attributes in MAIR_EL1.
|
||||
*/
|
||||
asm volatile(
|
||||
" mrs %0, mair_el1\n"
|
||||
" bfi %0, %1, %2, #8\n"
|
||||
" msr mair_el1, %0\n"
|
||||
" isb\n"
|
||||
: "=&r" (tmp)
|
||||
: "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
|
||||
|
||||
/*
|
||||
* Modify TCR PTW cacheability attributes.
|
||||
*/
|
||||
asm volatile(
|
||||
" mrs %0, tcr_el1\n"
|
||||
" bic %0, %0, %2\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" msr tcr_el1, %0\n"
|
||||
" isb\n"
|
||||
: "=&r" (tmp)
|
||||
: "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
|
||||
|
||||
flush_cache_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("cachepolicy", early_cachepolicy);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
if (!pfn_valid(pfn))
|
||||
return pgprot_noncached(vma_prot);
|
||||
else if (file->f_flags & O_SYNC)
|
||||
return pgprot_writecombine(vma_prot);
|
||||
return vma_prot;
|
||||
}
|
||||
EXPORT_SYMBOL(phys_mem_access_prot);
|
||||
|
||||
static void __init *early_alloc(unsigned long sz)
|
||||
{
|
||||
void *ptr = __va(memblock_alloc(sz, sz));
|
||||
memset(ptr, 0, sz);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
/* Extra memory needed by VMM */
|
||||
void* vmm_extra_mem = 0;
|
||||
spinlock_t ro_rkp_pages_lock = __SPIN_LOCK_UNLOCKED();
|
||||
char ro_pages_stat[RO_PAGES] = {0};
|
||||
unsigned ro_alloc_last = 0;
|
||||
int rkp_ro_mapped = 0;
|
||||
int ro_buf_done = 0;
|
||||
|
||||
void *rkp_ro_alloc(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
void * alloc_addr = NULL;
|
||||
if (!ro_buf_done)
|
||||
return alloc_addr;
|
||||
|
||||
spin_lock_irqsave(&ro_rkp_pages_lock,flags);
|
||||
|
||||
for (i = 0, j = ro_alloc_last; i < (RO_PAGES) ; i++) {
|
||||
j = (j+i) %(RO_PAGES);
|
||||
if (!ro_pages_stat[j]) {
|
||||
ro_pages_stat[j] = 1;
|
||||
ro_alloc_last = j+1;
|
||||
alloc_addr = (void*) ((u64)RKP_RBUF_VA + (j << PAGE_SHIFT));
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ro_rkp_pages_lock,flags);
|
||||
|
||||
return alloc_addr;
|
||||
}
|
||||
|
||||
void rkp_ro_free(void *free_addr)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
i = ((u64)free_addr - (u64)RKP_RBUF_VA) >> PAGE_SHIFT;
|
||||
spin_lock_irqsave(&ro_rkp_pages_lock,flags);
|
||||
ro_pages_stat[i] = 0;
|
||||
ro_alloc_last = i;
|
||||
spin_unlock_irqrestore(&ro_rkp_pages_lock,flags);
|
||||
}
|
||||
|
||||
unsigned int is_rkp_ro_page(u64 addr)
|
||||
{
|
||||
if( (addr >= (u64)RKP_RBUF_VA)
|
||||
&& (addr < (u64)(RKP_RBUF_VA+ TIMA_ROBUF_SIZE)))
|
||||
return 1;
|
||||
else return 0;
|
||||
}
|
||||
/* we suppose the whole block remap to page table should start with block borders */
|
||||
static inline void __init block_to_pages(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, unsigned long pfn)
|
||||
{
|
||||
pte_t *old_pte;
|
||||
pmd_t new ;
|
||||
pte_t *pte = rkp_ro_alloc();
|
||||
|
||||
if (!pte)
|
||||
pte = (pte_t *)early_alloc(PAGE_SIZE);
|
||||
|
||||
old_pte = pte;
|
||||
|
||||
__pmd_populate(&new, __pa(pte), PMD_TYPE_TABLE); /* populate to temporary pmd */
|
||||
|
||||
pte = pte_offset_kernel(&new, addr);
|
||||
|
||||
do {
|
||||
if (iotable_on == 1)
|
||||
set_pte(pte, pfn_pte(pfn, PROT_NORMAL_NC));
|
||||
else
|
||||
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
||||
pfn++;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
__pmd_populate(pmd, __pa(old_pte), PMD_TYPE_TABLE);
|
||||
}
|
||||
#endif
|
||||
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, unsigned long pfn,
|
||||
pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if(boot_mode_security)
|
||||
#endif
|
||||
if(pmd_block(*pmd))
|
||||
return block_to_pages(pmd, addr, end, pfn);
|
||||
#endif
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
|
||||
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
|
||||
}
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (!boot_mode_security)
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
#endif
|
||||
#else
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
#endif
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
do {
|
||||
if (iotable_on == 1)
|
||||
set_pte(pte, pfn_pte(pfn, pgprot_iotable_init(PAGE_KERNEL_EXEC)));
|
||||
else
|
||||
set_pte(pte, pfn_pte(pfn, prot));
|
||||
pfn++;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
int map_io)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
int rkp_do = 0;
|
||||
#endif
|
||||
pmdval_t prot_sect;
|
||||
pgprot_t prot_pte;
|
||||
|
||||
if (map_io) {
|
||||
prot_sect = PROT_SECT_DEVICE_nGnRE;
|
||||
prot_pte = __pgprot(PROT_DEVICE_nGnRE);
|
||||
} else {
|
||||
prot_sect = PROT_SECT_NORMAL_EXEC;
|
||||
prot_pte = PAGE_KERNEL_EXEC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for initial section mappings in the pgd/pud and remove them.
|
||||
*/
|
||||
if (pud_none(*pud) || pud_bad(*pud)) {
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif
|
||||
rkp_do = 1;
|
||||
|
||||
if( rkp_do && ro_buf_done ){
|
||||
pmd = rkp_ro_alloc();
|
||||
}else{
|
||||
pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
|
||||
}
|
||||
#else /* !CONFIG_TIMA_RKP */
|
||||
pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
|
||||
#endif
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
/* try section mapping first */
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||
pmd_t old_pmd =*pmd;
|
||||
set_pmd(pmd, __pmd(phys | prot_sect));
|
||||
if (iotable_on == 1)
|
||||
set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_NC));
|
||||
else
|
||||
set_pmd(pmd, __pmd(phys | prot_sect));
|
||||
/*
|
||||
* Check for previous table entries created during
|
||||
* boot (__create_page_tables) and flush them.
|
||||
*/
|
||||
if (!pmd_none(old_pmd))
|
||||
flush_tlb_all();
|
||||
} else {
|
||||
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
||||
prot_pte);
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
int map_io)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
}
|
||||
BUG_ON(pgd_bad(*pgd));
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
|
||||
/*
|
||||
* For 4K granule only, attempt to put down a 1GB block
|
||||
*/
|
||||
if (!map_io && (PAGE_SHIFT == 12) &&
|
||||
((addr | next | phys) & ~PUD_MASK) == 0) {
|
||||
pud_t old_pud = *pud;
|
||||
set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
|
||||
|
||||
/*
|
||||
* If we have an old value for a pud, it will
|
||||
* be pointing to a pmd table that we no longer
|
||||
* need (from swapper_pg_dir).
|
||||
*
|
||||
* Look up the old pmd table and free it.
|
||||
*/
|
||||
if (!pud_none(old_pud)) {
|
||||
phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
|
||||
memblock_free(table, PAGE_SIZE);
|
||||
flush_tlb_all();
|
||||
}
|
||||
} else {
|
||||
alloc_init_pmd(pud, addr, next, phys, map_io);
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the page directory entries and any necessary page tables for the
|
||||
* mapping specified by 'md'.
|
||||
*/
|
||||
static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
int map_io)
|
||||
{
|
||||
unsigned long addr, length, end, next;
|
||||
|
||||
addr = virt & PAGE_MASK;
|
||||
length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
|
||||
|
||||
end = addr + length;
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
alloc_init_pud(pgd, addr, next, phys, map_io);
|
||||
phys += next - addr;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void __init create_mapping(phys_addr_t phys, unsigned long virt,
|
||||
phys_addr_t size)
|
||||
{
|
||||
if (virt < VMALLOC_START) {
|
||||
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
||||
&phys, virt);
|
||||
return;
|
||||
}
|
||||
__create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
|
||||
}
|
||||
|
||||
void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
|
||||
{
|
||||
if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
|
||||
pr_warn("BUG: not creating id mapping for %pa\n", &addr);
|
||||
return;
|
||||
}
|
||||
__create_mapping(&idmap_pg_dir[pgd_index(addr)],
|
||||
addr, addr, size, map_io);
|
||||
}
|
||||
|
||||
static void __init map_mem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t limit;
|
||||
phys_addr_t start;
|
||||
phys_addr_t end;
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
phys_addr_t mid = 0x60000000;
|
||||
int rkp_do = 0;
|
||||
#endif
|
||||
/*
|
||||
* Temporarily limit the memblock range. We need to do this as
|
||||
* create_mapping requires puds, pmds and ptes to be allocated from
|
||||
* memory addressable from the initial direct kernel mapping.
|
||||
*
|
||||
* The initial direct kernel mapping, located at swapper_pg_dir, gives
|
||||
* us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
|
||||
* PHYS_OFFSET (which must be aligned to 2MB as per
|
||||
* Documentation/arm64/booting.txt).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
|
||||
limit = PHYS_OFFSET + PMD_SIZE;
|
||||
else
|
||||
limit = PHYS_OFFSET + PUD_SIZE;
|
||||
memblock_set_current_limit(limit);
|
||||
|
||||
/* map all the memory banks */
|
||||
for_each_memblock(memory, reg) {
|
||||
start = reg->base;
|
||||
end = start + reg->size;
|
||||
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
#ifndef CONFIG_ARM64_64K_PAGES
|
||||
/*
|
||||
* For the first memory bank align the start address and
|
||||
* current memblock limit to prevent create_mapping() from
|
||||
* allocating pte page tables from unmapped memory.
|
||||
* When 64K pages are enabled, the pte page table for the
|
||||
* first PGDIR_SIZE is already present in swapper_pg_dir.
|
||||
*/
|
||||
if (start < limit)
|
||||
start = ALIGN(start, PMD_SIZE);
|
||||
if (end < limit) {
|
||||
limit = end & PMD_MASK;
|
||||
memblock_set_current_limit(limit);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif
|
||||
rkp_do = 1;
|
||||
|
||||
if( rkp_do ){
|
||||
if (((u64)start < TIMA_ROBUF_START) && ((u64)end > TIMA_ROBUF_START)) {
|
||||
create_mapping(start, __phys_to_virt(start), mid - start);
|
||||
memset((void*)RKP_RBUF_VA, 0, TIMA_ROBUF_SIZE);
|
||||
ro_buf_done = 1;
|
||||
create_mapping(mid, __phys_to_virt(mid), end - mid);
|
||||
}
|
||||
else {
|
||||
create_mapping(start, __phys_to_virt(start), end - start);
|
||||
}
|
||||
}else{
|
||||
create_mapping(start, __phys_to_virt(start), end - start);
|
||||
}
|
||||
#else /* !CONFIG_TIMA_RKP */
|
||||
create_mapping(start, __phys_to_virt(start), end - start);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security) {
|
||||
#endif
|
||||
vmm_extra_mem = early_alloc(0x600000);
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
/* Limit no longer required. */
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps and sets up the zero page.
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
void *zero_page;
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
int rkp_do = 0;
|
||||
#endif
|
||||
map_mem();
|
||||
|
||||
#if defined(CONFIG_ECT)
|
||||
ect_init_map_io();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Finally flush the caches and tlb to ensure that we're in a
|
||||
* consistent state.
|
||||
*/
|
||||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
|
||||
/* allocate the zero page. */
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif
|
||||
rkp_do = 1;
|
||||
if (rkp_do && ro_buf_done)
|
||||
zero_page = rkp_ro_alloc();
|
||||
else
|
||||
zero_page = early_alloc(PAGE_SIZE);
|
||||
#else /* !CONFIG_TIMA_RKP */
|
||||
zero_page = early_alloc(PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
bootmem_init();
|
||||
|
||||
empty_zero_page = virt_to_page(zero_page);
|
||||
|
||||
/*
|
||||
* TTBR0 is only used for the identity mapping at this stage. Make it
|
||||
* point to zero page to avoid speculatively fetching new entries.
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable the identity mapping to allow the MMU disabling.
|
||||
*/
|
||||
void setup_mm_for_reboot(void)
|
||||
{
|
||||
cpu_switch_mm(idmap_pg_dir, &init_mm);
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether a kernel address is valid (derived from arch/x86/).
|
||||
*/
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if ((((long)addr) >> VA_BITS) != -1UL)
|
||||
return 0;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_sect(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_sect(*pmd))
|
||||
return pfn_valid(pmd_pfn(*pmd));
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if (pte_none(*pte))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(pte_pfn(*pte));
|
||||
}
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
{
|
||||
return vmemmap_populate_basepages(start, end, node);
|
||||
}
|
||||
#else /* !CONFIG_ARM64_64K_PAGES */
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
{
|
||||
unsigned long addr = start;
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
pgd = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pud = vmemmap_pud_populate(pgd, addr, node);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
void *p = NULL;
|
||||
|
||||
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
|
||||
} else
|
||||
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
||||
} while (addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_64K_PAGES */
|
||||
void vmemmap_free(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
/* For compatible with Exynos */
|
||||
|
||||
LIST_HEAD(static_vmlist);
|
||||
|
||||
void __init add_static_vm_early(struct static_vm *svm)
|
||||
{
|
||||
struct static_vm *curr_svm;
|
||||
struct vm_struct *vm;
|
||||
void *vaddr;
|
||||
|
||||
vm = &svm->vm;
|
||||
vm_area_add_early(vm);
|
||||
vaddr = vm->addr;
|
||||
|
||||
list_for_each_entry(curr_svm, &static_vmlist, list) {
|
||||
vm = &curr_svm->vm;
|
||||
|
||||
if (vm->addr > vaddr)
|
||||
break;
|
||||
}
|
||||
list_add_tail(&svm->list, &curr_svm->list);
|
||||
}
|
||||
|
||||
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
|
||||
{
|
||||
void *ptr = __va(memblock_alloc(sz, align));
|
||||
memset(ptr, 0, sz);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the architecture specific mappings
|
||||
*/
|
||||
static void __init __iotable_init(struct map_desc *io_desc, int nr, int exec)
|
||||
{
|
||||
struct map_desc *md;
|
||||
struct vm_struct *vm;
|
||||
struct static_vm *svm;
|
||||
phys_addr_t phys;
|
||||
pgprot_t prot;
|
||||
void *vm_caller;
|
||||
|
||||
if (!nr)
|
||||
return;
|
||||
|
||||
svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
|
||||
|
||||
if (!exec) {
|
||||
iotable_on = 1;
|
||||
prot = pgprot_iotable_init(PAGE_KERNEL_EXEC);
|
||||
vm_caller = iotable_init;
|
||||
} else {
|
||||
iotable_on = 0;
|
||||
prot = PAGE_KERNEL_EXEC;
|
||||
vm_caller = iotable_init_exec;
|
||||
}
|
||||
|
||||
for (md = io_desc; nr; md++, nr--) {
|
||||
phys = __pfn_to_phys(md->pfn);
|
||||
create_mapping(phys, md->virtual, md->length);
|
||||
vm = &svm->vm;
|
||||
vm->addr = (void *)(md->virtual & PAGE_MASK);
|
||||
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
||||
vm->phys_addr = __pfn_to_phys(md->pfn);
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags |= VM_ARM_MTYPE(md->type);
|
||||
vm->caller = iotable_init;
|
||||
add_static_vm_early(svm++);
|
||||
}
|
||||
|
||||
iotable_on = 0;
|
||||
}
|
||||
|
||||
void __init iotable_init(struct map_desc *io_desc, int nr)
|
||||
{
|
||||
__iotable_init(io_desc, nr, 0);
|
||||
}
|
||||
|
||||
void __init iotable_init_exec(struct map_desc *io_desc, int nr)
|
||||
{
|
||||
__iotable_init(io_desc, nr, 1);
|
||||
}
|
97
arch/arm64/mm/pageattr.c
Normal file
97
arch/arm64/mm/pageattr.c
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct page_change_data {
|
||||
pgprot_t set_mask;
|
||||
pgprot_t clear_mask;
|
||||
};
|
||||
|
||||
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct page_change_data *cdata = data;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
pte = clear_pte_bit(pte, cdata->clear_mask);
|
||||
pte = set_pte_bit(pte, cdata->set_mask);
|
||||
|
||||
set_pte(ptep, pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int change_memory_common(unsigned long addr, int numpages,
|
||||
pgprot_t set_mask, pgprot_t clear_mask)
|
||||
{
|
||||
unsigned long start = addr;
|
||||
unsigned long size = PAGE_SIZE*numpages;
|
||||
unsigned long end = start + size;
|
||||
int ret;
|
||||
struct page_change_data data;
|
||||
|
||||
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
|
||||
start &= PAGE_MASK;
|
||||
end = start + size;
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
if (!is_module_address(start) || !is_module_address(end - 1))
|
||||
return -EINVAL;
|
||||
|
||||
data.set_mask = set_mask;
|
||||
data.clear_mask = clear_mask;
|
||||
|
||||
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
|
||||
&data);
|
||||
|
||||
flush_tlb_kernel_range(start, end);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(PTE_RDONLY),
|
||||
__pgprot(PTE_WRITE));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_ro);
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(PTE_WRITE),
|
||||
__pgprot(PTE_RDONLY));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_rw);
|
||||
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(PTE_PXN),
|
||||
__pgprot(0));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_nx);
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_memory_common(addr, numpages,
|
||||
__pgprot(0),
|
||||
__pgprot(PTE_PXN));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_x);
|
109
arch/arm64/mm/pgd.c
Normal file
109
arch/arm64/mm/pgd.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* PGD allocation/freeing
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
#ifdef CONFIG_TIMA_RKP
|
||||
#include <linux/rkp_entry.h>
|
||||
extern u8 rkp_started;
|
||||
#endif /* CONFIG_TIMA_RKP */
|
||||
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
||||
|
||||
static struct kmem_cache *pgd_cache;
|
||||
#ifndef CONFIG_TIMA_RKP
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
if (PGD_SIZE == PAGE_SIZE)
|
||||
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
else
|
||||
return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
|
||||
}
|
||||
#else
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *ret = NULL;
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif
|
||||
ret = (pgd_t *) rkp_ro_alloc();
|
||||
if (!ret) {
|
||||
if (PGD_SIZE == PAGE_SIZE)
|
||||
ret = (pgd_t *)get_zeroed_page(GFP_KERNEL);
|
||||
else
|
||||
return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
|
||||
}
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security && rkp_started)
|
||||
#endif //CONFIG_KNOX_KAP
|
||||
rkp_call(RKP_PGD_NEW, (unsigned long)ret, 0, 0, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_TIMA_RKP
|
||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
if (PGD_SIZE == PAGE_SIZE)
|
||||
free_page((unsigned long)pgd);
|
||||
else
|
||||
kmem_cache_free(pgd_cache, pgd);
|
||||
}
|
||||
#else
|
||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
int rkp_do = 0;
|
||||
#ifdef CONFIG_KNOX_KAP
|
||||
if (boot_mode_security)
|
||||
#endif //CONFIG_KNOX_KAP
|
||||
rkp_do = 1;
|
||||
|
||||
if (rkp_do) rkp_call(RKP_PGD_FREE, (unsigned long)pgd, 0, 0, 0, 0);
|
||||
/* if pgd memory come from read only buffer, the put it back */
|
||||
/*TODO: use a macro*/
|
||||
if( rkp_do && (unsigned long)pgd >= (unsigned long)RKP_RBUF_VA && (unsigned long)pgd < ((unsigned long)RKP_RBUF_VA + TIMA_ROBUF_SIZE))
|
||||
rkp_ro_free((void*)pgd);
|
||||
else
|
||||
{
|
||||
if (PGD_SIZE == PAGE_SIZE)
|
||||
free_page((unsigned long)pgd);
|
||||
else
|
||||
kmem_cache_free(pgd_cache, pgd);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
static int __init pgd_cache_init(void)
|
||||
{
|
||||
/*
|
||||
* Naturally aligned pgds required by the architecture.
|
||||
*/
|
||||
if (PGD_SIZE != PAGE_SIZE)
|
||||
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
|
||||
SLAB_PANIC, NULL);
|
||||
return 0;
|
||||
}
|
||||
core_initcall(pgd_cache_init);
|
54
arch/arm64/mm/proc-macros.S
Normal file
54
arch/arm64/mm/proc-macros.S
Normal file
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/proc-macros.S
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
/*
|
||||
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
|
||||
*/
|
||||
.macro vma_vm_mm, rd, rn
|
||||
ldr \rd, [\rn, #VMA_VM_MM]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* mmid - get context id from mm pointer (mm->context.id)
|
||||
*/
|
||||
.macro mmid, rd, rn
|
||||
ldr \rd, [\rn, #MM_CONTEXT_ID]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* dcache_line_size - get the minimum D-cache line size from the CTR register.
|
||||
*/
|
||||
.macro dcache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
||||
|
||||
/*
|
||||
* icache_line_size - get the minimum I-cache line size from the CTR register.
|
||||
*/
|
||||
.macro icache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
and \tmp, \tmp, #0xf // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
257
arch/arm64/mm/proc.S
Normal file
257
arch/arm64/mm/proc.S
Normal file
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Based on arch/arm/mm/proc.S
|
||||
*
|
||||
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
|
||||
#else
|
||||
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define TCR_SMP_FLAGS TCR_SHARED
|
||||
#else
|
||||
#define TCR_SMP_FLAGS 0
|
||||
#endif
|
||||
|
||||
/* PTWs cacheable, inner/outer WBWA */
|
||||
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
||||
|
||||
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
|
||||
|
||||
/*
|
||||
* cpu_cache_off()
|
||||
*
|
||||
* Turn the CPU D-cache off.
|
||||
*/
|
||||
ENTRY(cpu_cache_off)
|
||||
mrs x0, sctlr_el1
|
||||
bic x0, x0, #1 << 2 // clear SCTLR.C
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
ret
|
||||
ENDPROC(cpu_cache_off)
|
||||
|
||||
/*
|
||||
* cpu_reset(loc)
|
||||
*
|
||||
* Perform a soft reset of the system. Put the CPU into the same state
|
||||
* as it would be if it had been reset, and branch to what would be the
|
||||
* reset vector. It must be executed with the flat identity mapping.
|
||||
*
|
||||
* - loc - location to jump to for soft reset
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(cpu_reset)
|
||||
mrs x1, sctlr_el1
|
||||
bic x1, x1, #1
|
||||
msr sctlr_el1, x1 // disable the MMU
|
||||
isb
|
||||
ret x0
|
||||
ENDPROC(cpu_reset)
|
||||
|
||||
ENTRY(cpu_soft_restart)
|
||||
/* Save address of cpu_reset() and reset address */
|
||||
mov x19, x0
|
||||
mov x20, x1
|
||||
|
||||
/* Turn D-cache off */
|
||||
bl cpu_cache_off
|
||||
|
||||
/* Push out all dirty data, and ensure cache is empty */
|
||||
bl flush_cache_all
|
||||
|
||||
mov x0, x20
|
||||
ret x19
|
||||
ENDPROC(cpu_soft_restart)
|
||||
|
||||
/*
|
||||
* cpu_do_idle()
|
||||
*
|
||||
* Idle the processor (wait for interrupt).
|
||||
*/
|
||||
ENTRY(cpu_do_idle)
|
||||
dsb sy // WFI may enter a low-power mode
|
||||
wfi
|
||||
ret
|
||||
ENDPROC(cpu_do_idle)
|
||||
|
||||
#ifdef CONFIG_ARM64_CPU_SUSPEND
|
||||
/**
|
||||
* cpu_do_suspend - save CPU registers context
|
||||
*
|
||||
* x0: virtual address of context pointer
|
||||
*/
|
||||
ENTRY(cpu_do_suspend)
|
||||
mrs x2, tpidr_el0
|
||||
mrs x3, tpidrro_el0
|
||||
mrs x4, contextidr_el1
|
||||
mrs x5, mair_el1
|
||||
mrs x6, cpacr_el1
|
||||
mrs x7, ttbr1_el1
|
||||
mrs x8, tcr_el1
|
||||
mrs x9, vbar_el1
|
||||
mrs x10, mdscr_el1
|
||||
mrs x11, oslsr_el1
|
||||
mrs x12, sctlr_el1
|
||||
stp x2, x3, [x0]
|
||||
stp x4, x5, [x0, #16]
|
||||
stp x6, x7, [x0, #32]
|
||||
stp x8, x9, [x0, #48]
|
||||
stp x10, x11, [x0, #64]
|
||||
str x12, [x0, #80]
|
||||
ret
|
||||
ENDPROC(cpu_do_suspend)
|
||||
|
||||
/**
|
||||
* cpu_do_resume - restore CPU register context
|
||||
*
|
||||
* x0: Physical address of context pointer
|
||||
* x1: ttbr0_el1 to be restored
|
||||
*
|
||||
* Returns:
|
||||
* sctlr_el1 value in x0
|
||||
*/
|
||||
ENTRY(cpu_do_resume)
|
||||
/*
|
||||
* Invalidate local tlb entries before turning on MMU
|
||||
*/
|
||||
tlbi vmalle1
|
||||
ldp x2, x3, [x0]
|
||||
ldp x4, x5, [x0, #16]
|
||||
ldp x6, x7, [x0, #32]
|
||||
ldp x8, x9, [x0, #48]
|
||||
ldp x10, x11, [x0, #64]
|
||||
ldr x12, [x0, #80]
|
||||
msr tpidr_el0, x2
|
||||
msr tpidrro_el0, x3
|
||||
msr contextidr_el1, x4
|
||||
msr mair_el1, x5
|
||||
msr cpacr_el1, x6
|
||||
msr ttbr0_el1, x1
|
||||
msr ttbr1_el1, x7
|
||||
msr tcr_el1, x8
|
||||
msr vbar_el1, x9
|
||||
msr mdscr_el1, x10
|
||||
/*
|
||||
* Restore oslsr_el1 by writing oslar_el1
|
||||
*/
|
||||
ubfx x11, x11, #1, #1
|
||||
msr oslar_el1, x11
|
||||
mov x0, x12
|
||||
dsb nsh // Make sure local tlb invalidation completed
|
||||
isb
|
||||
ret
|
||||
ENDPROC(cpu_do_resume)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cpu_do_switch_mm(pgd_phys, tsk)
|
||||
*
|
||||
* Set the translation table base pointer to be pgd_phys.
|
||||
*
|
||||
* - pgd_phys - physical address of new TTB
|
||||
*/
|
||||
ENTRY(cpu_do_switch_mm)
|
||||
mmid w1, x1 // get mm->context.id
|
||||
bfi x0, x1, #48, #16 // set the ASID
|
||||
msr ttbr0_el1, x0 // set TTBR0
|
||||
isb
|
||||
ret
|
||||
ENDPROC(cpu_do_switch_mm)
|
||||
|
||||
.section ".text.init", #alloc, #execinstr
|
||||
|
||||
/*
|
||||
* __cpu_setup
|
||||
*
|
||||
* Initialise the processor for turning the MMU on. Return in x0 the
|
||||
* value of the SCTLR_EL1 register.
|
||||
*/
|
||||
ENTRY(__cpu_setup)
|
||||
ic iallu // I+BTB cache invalidate
|
||||
tlbi vmalle1is // invalidate I + D TLBs
|
||||
dsb ish
|
||||
|
||||
mov x0, #3 << 20
|
||||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
msr mdscr_el1, xzr // Reset mdscr_el1
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
*
|
||||
* n = AttrIndx[2:0]
|
||||
* n MAIR
|
||||
* DEVICE_nGnRnE 000 00000000
|
||||
* DEVICE_nGnRE 001 00000100
|
||||
* DEVICE_GRE 010 00001100
|
||||
* NORMAL_NC 011 01000100
|
||||
* NORMAL 100 11111111
|
||||
*/
|
||||
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
|
||||
MAIR(0x04, MT_DEVICE_nGnRE) | \
|
||||
MAIR(0x0c, MT_DEVICE_GRE) | \
|
||||
MAIR(0x44, MT_NORMAL_NC) | \
|
||||
MAIR(0xff, MT_NORMAL)
|
||||
msr mair_el1, x5
|
||||
/*
|
||||
* Prepare SCTLR
|
||||
*/
|
||||
adr x5, crval
|
||||
ldp w5, w6, [x5]
|
||||
mrs x0, sctlr_el1
|
||||
bic x0, x0, x5 // clear bits
|
||||
orr x0, x0, x6 // set bits
|
||||
/*
|
||||
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
|
||||
* both user and kernel.
|
||||
*/
|
||||
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
|
||||
/*
|
||||
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
|
||||
* TCR_EL1.
|
||||
*/
|
||||
mrs x9, ID_AA64MMFR0_EL1
|
||||
bfi x10, x9, #32, #3
|
||||
msr tcr_el1, x10
|
||||
ret // return to head.S
|
||||
ENDPROC(__cpu_setup)
|
||||
|
||||
/*
|
||||
* n n T
|
||||
* U E WT T UD US IHBS
|
||||
* CE0 XWHW CZ ME TEEA S
|
||||
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
|
||||
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
|
||||
* .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
|
||||
*/
|
||||
.type crval, #object
|
||||
crval:
|
||||
.word 0x000802e2 // clear
|
||||
.word 0x0405d11d // set
|
Loading…
Add table
Add a link
Reference in a new issue