Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

6
arch/score/mm/Makefile Normal file
View file

@ -0,0 +1,6 @@
#
# Makefile for the Linux/SCORE-specific parts of the memory manager.
#
obj-y += cache.o extable.o fault.o init.o \
tlb-miss.o tlb-score.o pgtable.o

281
arch/score/mm/cache.c Normal file
View file

@ -0,0 +1,281 @@
/*
* arch/score/mm/cache.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <asm/mmu_context.h>
/*
Just flush entire Dcache!!
You must ensure the page doesn't include instructions, because
the function will not flush the Icache.
The addr must be cache aligned.
*/
static void flush_data_cache_page(unsigned long addr)
{
unsigned int i;
for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
__asm__ __volatile__(
"cache 0x0e, [%0, 0]\n"
"cache 0x1a, [%0, 0]\n"
"nop\n"
: : "r" (addr));
addr += L1_CACHE_BYTES;
}
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long addr;
if (PageHighMem(page))
return;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &(page)->flags);
return;
}
/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
addr = (unsigned long) page_address(page);
flush_data_cache_page(addr);
}
EXPORT_SYMBOL(flush_dcache_page);
/* called by update_mmu_cache. */
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
struct page *page;
unsigned long pfn, addr;
int exec = (vma->vm_flags & VM_EXEC);
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
addr = (unsigned long) page_address(page);
if (exec)
flush_data_cache_page(addr);
clear_bit(PG_dcache_dirty, &(page)->flags);
}
}
static inline void setup_protection_map(void)
{
protection_map[0] = PAGE_NONE;
protection_map[1] = PAGE_READONLY;
protection_map[2] = PAGE_COPY;
protection_map[3] = PAGE_COPY;
protection_map[4] = PAGE_READONLY;
protection_map[5] = PAGE_READONLY;
protection_map[6] = PAGE_COPY;
protection_map[7] = PAGE_COPY;
protection_map[8] = PAGE_NONE;
protection_map[9] = PAGE_READONLY;
protection_map[10] = PAGE_SHARED;
protection_map[11] = PAGE_SHARED;
protection_map[12] = PAGE_READONLY;
protection_map[13] = PAGE_READONLY;
protection_map[14] = PAGE_SHARED;
protection_map[15] = PAGE_SHARED;
}
void cpu_cache_init(void)
{
setup_protection_map();
}
void flush_icache_all(void)
{
__asm__ __volatile__(
"la r8, flush_icache_all\n"
"cache 0x10, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
: : : "r8");
}
void flush_dcache_all(void)
{
__asm__ __volatile__(
"la r8, flush_dcache_all\n"
"cache 0x1f, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
"cache 0x1a, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
: : : "r8");
}
void flush_cache_all(void)
{
__asm__ __volatile__(
"la r8, flush_cache_all\n"
"cache 0x10, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
"cache 0x1f, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
"cache 0x1a, [r8, 0]\n"
"nop\nnop\nnop\nnop\nnop\nnop\n"
: : : "r8");
}
void flush_cache_mm(struct mm_struct *mm)
{
if (!(mm->context))
return;
flush_cache_all();
}
/*if we flush a range precisely , the processing may be very long.
We must check each page in the range whether present. If the page is present,
we can flush the range in the page. Be careful, the range may be cross two
page, a page is present and another is not present.
*/
/*
The interface is provided in hopes that the port can find
a suitably efficient method for removing multiple page
sized regions from the cache.
*/
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int exec = vma->vm_flags & VM_EXEC;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
if (!(mm->context))
return;
pgdp = pgd_offset(mm, start);
pudp = pud_offset(pgdp, start);
pmdp = pmd_offset(pudp, start);
ptep = pte_offset(pmdp, start);
while (start <= end) {
unsigned long tmpend;
pgdp = pgd_offset(mm, start);
pudp = pud_offset(pgdp, start);
pmdp = pmd_offset(pudp, start);
ptep = pte_offset(pmdp, start);
if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
continue;
}
tmpend = (start | (PAGE_SIZE-1)) > end ?
end : (start | (PAGE_SIZE-1));
flush_dcache_range(start, tmpend);
if (exec)
flush_icache_range(start, tmpend);
start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
}
}
void flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
int exec = vma->vm_flags & VM_EXEC;
unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
if (exec)
flush_icache_range(kaddr, kaddr + PAGE_SIZE);
}
void flush_cache_sigtramp(unsigned long addr)
{
__asm__ __volatile__(
"cache 0x02, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
"cache 0x02, [%0, 0x4]\n"
"nop\nnop\nnop\nnop\nnop\n"
"cache 0x0d, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
"cache 0x0d, [%0, 0x4]\n"
"nop\nnop\nnop\nnop\nnop\n"
"cache 0x1a, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (addr));
}
/*
1. WB and invalid a cache line of Dcache
2. Drain Write Buffer
the range must be smaller than PAGE_SIZE
*/
void flush_dcache_range(unsigned long start, unsigned long end)
{
int size, i;
start = start & ~(L1_CACHE_BYTES - 1);
end = end & ~(L1_CACHE_BYTES - 1);
size = end - start;
/* flush dcache to ram, and invalidate dcache lines. */
for (i = 0; i < size; i += L1_CACHE_BYTES) {
__asm__ __volatile__(
"cache 0x0e, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
"cache 0x1a, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (start));
start += L1_CACHE_BYTES;
}
}
void flush_icache_range(unsigned long start, unsigned long end)
{
int size, i;
start = start & ~(L1_CACHE_BYTES - 1);
end = end & ~(L1_CACHE_BYTES - 1);
size = end - start;
/* invalidate icache lines. */
for (i = 0; i < size; i += L1_CACHE_BYTES) {
__asm__ __volatile__(
"cache 0x02, [%0, 0]\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (start));
start += L1_CACHE_BYTES;
}
}
EXPORT_SYMBOL(flush_icache_range);

38
arch/score/mm/extable.c Normal file
View file

@ -0,0 +1,38 @@
/*
* arch/score/mm/extable.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->cp0_epc);
if (fixup) {
regs->cp0_epc = fixup->fixup;
return 1;
}
return 0;
}

236
arch/score/mm/fault.c Normal file
View file

@ -0,0 +1,236 @@
/*
* arch/score/mm/fault.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
unsigned long flags = 0;
siginfo_t info;
int fault;
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto vmalloc_fault;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
0, field, address, field, regs->cp0_epc,
field, regs->regs[3]);
die("Oops", regs);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}

109
arch/score/mm/init.c Normal file
View file

@ -0,0 +1,109 @@
/*
* arch/score/mm/init.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/kcore.h>
#include <linux/sched.h>
#include <linux/initrd.h>
#include <asm/sections.h>
#include <asm/tlb.h>
unsigned long empty_zero_page;
EXPORT_SYMBOL_GPL(empty_zero_page);
static void setup_zero_page(void)
{
struct page *page;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *) empty_zero_page);
mark_page_reserved(page);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
int page_is_ram(unsigned long pagenr)
{
if (pagenr >= min_low_pfn && pagenr < max_low_pfn)
return 1;
else
return 0;
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long lastpfn;
pagetable_init();
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
lastpfn = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
}
void __init mem_init(void)
{
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_all_bootmem();
setup_zero_page(); /* Setup zeroed pages. */
mem_init_print_info(NULL);
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
#endif
void __init_refok free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
unsigned long pgd_current;
#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PTE_ORDER);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);

52
arch/score/mm/pgtable.c Normal file
View file

@ -0,0 +1,52 @@
/*
* arch/score/mm/pgtable-32.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/mm.h>
void pgd_init(unsigned long page)
{
unsigned long *p = (unsigned long *) page;
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i += 8) {
p[i + 0] = (unsigned long) invalid_pte_table;
p[i + 1] = (unsigned long) invalid_pte_table;
p[i + 2] = (unsigned long) invalid_pte_table;
p[i + 3] = (unsigned long) invalid_pte_table;
p[i + 4] = (unsigned long) invalid_pte_table;
p[i + 5] = (unsigned long) invalid_pte_table;
p[i + 6] = (unsigned long) invalid_pte_table;
p[i + 7] = (unsigned long) invalid_pte_table;
}
}
void __init pagetable_init(void)
{
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
}

199
arch/score/mm/tlb-miss.S Normal file
View file

@ -0,0 +1,199 @@
/*
* arch/score/mm/tlbex.S
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <asm/asmmacro.h>
#include <asm/pgtable-bits.h>
#include <asm/scoreregs.h>
/*
* After this macro runs, the pte faulted on is
* in register PTE, a ptr into the table in which
* the pte belongs is in PTR.
*/
.macro load_pte, pte, ptr
la \ptr, pgd_current
lw \ptr, [\ptr, 0]
mfcr \pte, cr6
srli \pte, \pte, 22
slli \pte, \pte, 2
add \ptr, \ptr, \pte
lw \ptr, [\ptr, 0]
mfcr \pte, cr6
srli \pte, \pte, 10
andi \pte, 0xffc
add \ptr, \ptr, \pte
lw \pte, [\ptr, 0]
.endm
.macro pte_reload, ptr
lw \ptr, [\ptr, 0]
mtcr \ptr, cr12
nop
nop
nop
nop
nop
.endm
.macro do_fault, write
SAVE_ALL
mfcr r6, cr6
mv r4, r0
ldi r5, \write
la r8, do_page_fault
brl r8
j ret_from_exception
.endm
.macro pte_writable, pte, ptr, label
andi \pte, 0x280
cmpi.c \pte, 0x280
bne \label
lw \pte, [\ptr, 0] /*reload PTE*/
.endm
/*
* Make PTE writable, update software status bits as well,
* then store at PTR.
*/
.macro pte_makewrite, pte, ptr
ori \pte, 0x426
sw \pte, [\ptr, 0]
.endm
.text
ENTRY(score7_FTLB_refill_Handler)
la r31, pgd_current /* get pgd pointer */
lw r31, [r31, 0] /* get the address of PGD */
mfcr r30, cr6
srli r30, r30, 22 /* PGDIR_SHIFT = 22*/
slli r30, r30, 2
add r31, r31, r30
lw r31, [r31, 0] /* get the address of the start address of PTE table */
mfcr r30, cr9
andi r30, 0xfff /* equivalent to get PET index and right shift 2 bits */
add r31, r31, r30
lw r30, [r31, 0] /* load pte entry */
mtcr r30, cr12
nop
nop
nop
nop
nop
mtrtlb
nop
nop
nop
nop
nop
rte /* 6 cycles to make sure tlb entry works */
ENTRY(score7_KSEG_refill_Handler)
la r31, pgd_current /* get pgd pointer */
lw r31, [r31, 0] /* get the address of PGD */
mfcr r30, cr6
srli r30, r30, 22 /* PGDIR_SHIFT = 22 */
slli r30, r30, 2
add r31, r31, r30
lw r31, [r31, 0] /* get the address of the start address of PTE table */
mfcr r30, cr6 /* get Bad VPN */
srli r30, r30, 10
andi r30, 0xffc /* PTE VPN mask (bit 11~2) */
add r31, r31, r30
lw r30, [r31, 0] /* load pte entry */
mtcr r30, cr12
nop
nop
nop
nop
nop
mtrtlb
nop
nop
nop
nop
nop
rte /* 6 cycles to make sure tlb entry works */
nopage_tlbl:
do_fault 0 /* Read */
ENTRY(handle_tlb_refill)
load_pte r30, r31
pte_writable r30, r31, handle_tlb_refill_nopage
pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */
pte_reload r31
mtrtlb
nop
nop
nop
nop
nop
rte
handle_tlb_refill_nopage:
do_fault 0 /* Read */
ENTRY(handle_tlb_invaild)
load_pte r30, r31
stlb /* find faulting entry */
pte_writable r30, r31, handle_tlb_invaild_nopage
pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */
pte_reload r31
mtptlb
nop
nop
nop
nop
nop
rte
handle_tlb_invaild_nopage:
do_fault 0 /* Read */
ENTRY(handle_mod)
load_pte r30, r31
stlb /* find faulting entry */
andi r30, _PAGE_WRITE /* Writable? */
cmpz.c r30
beq nowrite_mod
lw r30, [r31, 0] /* reload into r30 */
/* Present and writable bits set, set accessed and dirty bits. */
pte_makewrite r30, r31
/* Now reload the entry into the tlb. */
pte_reload r31
mtptlb
nop
nop
nop
nop
nop
rte
nowrite_mod:
do_fault 1 /* Write */

251
arch/score/mm/tlb-score.c Normal file
View file

@ -0,0 +1,251 @@
/*
* arch/score/mm/tlb-score.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <lennox.wu@sunplusct.com>
* Chen Liqin <liqin.chen@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/highmem.h>
#include <linux/module.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#define TLBSIZE 32
unsigned long asid_cache = ASID_FIRST_VERSION;
EXPORT_SYMBOL(asid_cache);
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ASID;
int entry;
local_irq_save(flags);
old_ASID = pevn_get() & ASID_MASK;
pectx_set(0); /* invalid */
entry = tlblock_get(); /* skip locked entries*/
for (; entry < TLBSIZE; entry++) {
tlbpt_set(entry);
pevn_set(KSEG1);
barrier();
tlb_write_indexed();
}
pevn_set(old_ASID);
local_irq_restore(flags);
}
/*
* If mm is currently active_mm, we can't really drop it. Instead,
* we will get a new one for it.
*/
static inline void
drop_mmu_context(struct mm_struct *mm)
{
unsigned long flags;
local_irq_save(flags);
get_new_mmu_context(mm);
pevn_set(mm->context & ASID_MASK);
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
if (mm->context != 0)
drop_mmu_context(mm);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long vma_mm_context = mm->context;
if (mm->context != 0) {
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= TLBSIZE) {
int oldpid = pevn_get() & ASID_MASK;
int newpid = vma_mm_context & ASID_MASK;
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
while (start < end) {
int idx;
pevn_set(start | newpid);
start += PAGE_SIZE;
barrier();
tlb_probe();
idx = tlbpt_get();
pectx_set(0);
pevn_set(KSEG1);
if (idx < 0)
continue;
tlb_write_indexed();
}
pevn_set(oldpid);
} else {
/* Bigger than TLBSIZE, get new ASID directly */
get_new_mmu_context(mm);
if (mm == current->active_mm)
pevn_set(vma_mm_context & ASID_MASK);
}
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= TLBSIZE) {
int pid = pevn_get();
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
end &= PAGE_MASK;
while (start < end) {
long idx;
pevn_set(start);
start += PAGE_SIZE;
tlb_probe();
idx = tlbpt_get();
if (idx < 0)
continue;
pectx_set(0);
pevn_set(KSEG1);
barrier();
tlb_write_indexed();
}
pevn_set(pid);
} else {
local_flush_tlb_all();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if (vma && vma->vm_mm->context != 0) {
unsigned long flags;
int oldpid, newpid, idx;
unsigned long vma_ASID = vma->vm_mm->context;
newpid = vma_ASID & ASID_MASK;
page &= PAGE_MASK;
local_irq_save(flags);
oldpid = pevn_get() & ASID_MASK;
pevn_set(page | newpid);
barrier();
tlb_probe();
idx = tlbpt_get();
pectx_set(0);
pevn_set(KSEG1);
if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/
goto finish;
barrier();
tlb_write_indexed();
finish:
pevn_set(oldpid);
local_irq_restore(flags);
}
}
/*
* This one is only used for pages with the global bit set so we don't care
* much about the ASID.
*/
void local_flush_tlb_one(unsigned long page)
{
unsigned long flags;
int oldpid, idx;
local_irq_save(flags);
oldpid = pevn_get();
page &= (PAGE_MASK << 1);
pevn_set(page);
barrier();
tlb_probe();
idx = tlbpt_get();
pectx_set(0);
if (idx >= 0) {
/* Make sure all entries differ. */
pevn_set(KSEG1);
barrier();
tlb_write_indexed();
}
pevn_set(oldpid);
local_irq_restore(flags);
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long flags;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = pevn_get() & ASID_MASK;
local_irq_save(flags);
address &= PAGE_MASK;
pevn_set(address | pid);
barrier();
tlb_probe();
idx = tlbpt_get();
pectx_set(pte_val(pte));
pevn_set(address | pid);
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
pevn_set(pid);
local_irq_restore(flags);
}
void tlb_init(void)
{
tlblock_set(0);
local_flush_tlb_all();
memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100),
&score7_FTLB_refill_Handler, 0xFC);
flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100,
EXCEPTION_VECTOR_BASE_ADDR + 0x1FC);
}