mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
19
arch/sparc/mm/Makefile
Normal file
19
arch/sparc/mm/Makefile
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Makefile for the linux Sparc-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
asflags-y := -ansi
|
||||
ccflags-y := -Werror
|
||||
|
||||
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
|
||||
obj-y += fault_$(BITS).o
|
||||
obj-y += init_$(BITS).o
|
||||
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
|
||||
obj-$(CONFIG_SPARC32) += srmmu_access.o
|
||||
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
|
||||
obj-$(CONFIG_SPARC32) += leon_mm.o
|
||||
|
||||
# Only used by sparc64
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
||||
# Only used by sparc32
|
||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
105
arch/sparc/mm/extable.c
Normal file
105
arch/sparc/mm/extable.c
Normal file
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* linux/arch/sparc/mm/extable.c
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
void sort_extable(struct exception_table_entry *start,
|
||||
struct exception_table_entry *finish)
|
||||
{
|
||||
}
|
||||
|
||||
/* Caller knows they are in a range if ret->fixup == 0 */
|
||||
const struct exception_table_entry *
|
||||
search_extable(const struct exception_table_entry *start,
|
||||
const struct exception_table_entry *last,
|
||||
unsigned long value)
|
||||
{
|
||||
const struct exception_table_entry *walk;
|
||||
|
||||
/* Single insn entries are encoded as:
|
||||
* word 1: insn address
|
||||
* word 2: fixup code address
|
||||
*
|
||||
* Range entries are encoded as:
|
||||
* word 1: first insn address
|
||||
* word 2: 0
|
||||
* word 3: last insn address + 4 bytes
|
||||
* word 4: fixup code address
|
||||
*
|
||||
* Deleted entries are encoded as:
|
||||
* word 1: unused
|
||||
* word 2: -1
|
||||
*
|
||||
* See asm/uaccess.h for more details.
|
||||
*/
|
||||
|
||||
/* 1. Try to find an exact match. */
|
||||
for (walk = start; walk <= last; walk++) {
|
||||
if (walk->fixup == 0) {
|
||||
/* A range entry, skip both parts. */
|
||||
walk++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* A deleted entry; see trim_init_extable */
|
||||
if (walk->fixup == -1)
|
||||
continue;
|
||||
|
||||
if (walk->insn == value)
|
||||
return walk;
|
||||
}
|
||||
|
||||
/* 2. Try to find a range match. */
|
||||
for (walk = start; walk <= (last - 1); walk++) {
|
||||
if (walk->fixup)
|
||||
continue;
|
||||
|
||||
if (walk[0].insn <= value && walk[1].insn > value)
|
||||
return walk;
|
||||
|
||||
walk++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
/* We could memmove them around; easier to mark the trimmed ones. */
|
||||
void trim_init_extable(struct module *m)
|
||||
{
|
||||
unsigned int i;
|
||||
bool range;
|
||||
|
||||
for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
|
||||
range = m->extable[i].fixup == 0;
|
||||
|
||||
if (within_module_init(m->extable[i].insn, m)) {
|
||||
m->extable[i].fixup = -1;
|
||||
if (range)
|
||||
m->extable[i+1].fixup = -1;
|
||||
}
|
||||
if (range)
|
||||
i++;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
/* Special extable search, which handles ranges. Returns fixup */
|
||||
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
|
||||
{
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_exception_tables(addr);
|
||||
if (!entry)
|
||||
return 0;
|
||||
|
||||
/* Inside range? Fix g2 and return correct fixup */
|
||||
if (!entry->fixup) {
|
||||
*g2 = (addr - entry->insn) / 4;
|
||||
return (entry + 1)->fixup;
|
||||
}
|
||||
|
||||
return entry->fixup;
|
||||
}
|
468
arch/sparc/mm/fault_32.c
Normal file
468
arch/sparc/mm/fault_32.c
Normal file
|
@ -0,0 +1,468 @@
|
|||
/*
|
||||
* fault.c: Page fault handlers for the Sparc.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <asm/head.h>
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
static void __noreturn unhandled_fault(unsigned long address,
|
||||
struct task_struct *tsk,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if ((unsigned long) address < PAGE_SIZE) {
|
||||
printk(KERN_ALERT
|
||||
"Unable to handle kernel NULL pointer dereference\n");
|
||||
} else {
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
|
||||
address);
|
||||
}
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
|
||||
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
|
||||
(tsk->mm ? (unsigned long) tsk->mm->pgd :
|
||||
(unsigned long) tsk->active_mm->pgd));
|
||||
die_if_kernel("Oops", regs);
|
||||
}
|
||||
|
||||
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
|
||||
unsigned long address)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned long g2;
|
||||
unsigned int insn;
|
||||
int i;
|
||||
|
||||
i = search_extables_range(ret_pc, &g2);
|
||||
switch (i) {
|
||||
case 3:
|
||||
/* load & store will be handled by fixup */
|
||||
return 3;
|
||||
|
||||
case 1:
|
||||
/* store will be handled by fixup, load will bump out */
|
||||
/* for _to_ macros */
|
||||
insn = *((unsigned int *) pc);
|
||||
if ((insn >> 21) & 1)
|
||||
return 1;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
/* load will be handled by fixup, store will bump out */
|
||||
/* for _from_ macros */
|
||||
insn = *((unsigned int *) pc);
|
||||
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
|
||||
return 2;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
regs.pc = pc;
|
||||
regs.npc = pc + 4;
|
||||
__asm__ __volatile__(
|
||||
"rd %%psr, %0\n\t"
|
||||
"nop\n\t"
|
||||
"nop\n\t"
|
||||
"nop\n" : "=r" (regs.psr));
|
||||
unhandled_fault(address, current, ®s);
|
||||
|
||||
/* Not reached */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
show_signal_msg(struct pt_regs *regs, int sig, int code,
|
||||
unsigned long address, struct task_struct *tsk)
|
||||
{
|
||||
if (!unhandled_signal(tsk, sig))
|
||||
return;
|
||||
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
|
||||
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
|
||||
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
|
||||
tsk->comm, task_pid_nr(tsk), address,
|
||||
(void *)regs->pc, (void *)regs->u_regs[UREG_I7],
|
||||
(void *)regs->u_regs[UREG_FP], code);
|
||||
|
||||
print_vma_addr(KERN_CONT " in ", regs->pc);
|
||||
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
||||
unsigned long addr)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
info.si_signo = sig;
|
||||
info.si_code = code;
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void __user *) addr;
|
||||
info.si_trapno = 0;
|
||||
|
||||
if (unlikely(show_unhandled_signals))
|
||||
show_signal_msg(regs, sig, info.si_code,
|
||||
addr, current);
|
||||
|
||||
force_sig_info (sig, &info, current);
|
||||
}
|
||||
|
||||
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
|
||||
{
|
||||
unsigned int insn;
|
||||
|
||||
if (text_fault)
|
||||
return regs->pc;
|
||||
|
||||
if (regs->psr & PSR_PS)
|
||||
insn = *(unsigned int *) regs->pc;
|
||||
else
|
||||
__get_user(insn, (unsigned int *) regs->pc);
|
||||
|
||||
return safe_compute_effective_address(regs, insn);
|
||||
}
|
||||
|
||||
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
||||
int text_fault)
|
||||
{
|
||||
unsigned long addr = compute_si_addr(regs, text_fault);
|
||||
|
||||
__do_fault_siginfo(code, sig, regs, addr);
|
||||
}
|
||||
|
||||
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
unsigned int fixup;
|
||||
unsigned long g2;
|
||||
int from_user = !(regs->psr & PSR_PS);
|
||||
int fault, code;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
if (text_fault)
|
||||
address = regs->pc;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*/
|
||||
code = SEGV_MAPERR;
|
||||
if (address >= TASK_SIZE)
|
||||
goto vmalloc_fault;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto no_context;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
if (!from_user && address >= PAGE_OFFSET)
|
||||
goto bad_area;
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
if (vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if (expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
code = SEGV_ACCERR;
|
||||
if (write) {
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
} else {
|
||||
/* Allow reads even for write-only mappings */
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
|
||||
if (from_user)
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (write)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGSEGV)
|
||||
goto bad_area;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
||||
1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
||||
1, regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
bad_area_nosemaphore:
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if (from_user) {
|
||||
do_fault_siginfo(code, SIGSEGV, regs, text_fault);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Is this in ex_table? */
|
||||
no_context:
|
||||
g2 = regs->u_regs[UREG_G2];
|
||||
if (!from_user) {
|
||||
fixup = search_extables_range(regs->pc, &g2);
|
||||
/* Values below 10 are reserved for other things */
|
||||
if (fixup > 10) {
|
||||
extern const unsigned __memset_start[];
|
||||
extern const unsigned __memset_end[];
|
||||
extern const unsigned __csum_partial_copy_start[];
|
||||
extern const unsigned __csum_partial_copy_end[];
|
||||
|
||||
#ifdef DEBUG_EXCEPTIONS
|
||||
printk("Exception: PC<%08lx> faddr<%08lx>\n",
|
||||
regs->pc, address);
|
||||
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
|
||||
regs->pc, fixup, g2);
|
||||
#endif
|
||||
if ((regs->pc >= (unsigned long)__memset_start &&
|
||||
regs->pc < (unsigned long)__memset_end) ||
|
||||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
|
||||
regs->pc < (unsigned long)__csum_partial_copy_end)) {
|
||||
regs->u_regs[UREG_I4] = address;
|
||||
regs->u_regs[UREG_I5] = regs->pc;
|
||||
}
|
||||
regs->u_regs[UREG_G2] = g2;
|
||||
regs->pc = fixup;
|
||||
regs->npc = regs->pc + 4;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unhandled_fault(address, tsk, regs);
|
||||
do_exit(SIGKILL);
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
if (from_user) {
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
}
|
||||
goto no_context;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
|
||||
if (!from_user)
|
||||
goto no_context;
|
||||
|
||||
vmalloc_fault:
|
||||
{
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*/
|
||||
int offset = pgd_index(address);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
|
||||
pgd = tsk->active_mm->pgd + offset;
|
||||
pgd_k = init_mm.pgd + offset;
|
||||
|
||||
if (!pgd_present(*pgd)) {
|
||||
if (!pgd_present(*pgd_k))
|
||||
goto bad_area_nosemaphore;
|
||||
pgd_val(*pgd) = pgd_val(*pgd_k);
|
||||
return;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pgd, address);
|
||||
pmd_k = pmd_offset(pgd_k, address);
|
||||
|
||||
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
*pmd = *pmd_k;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* This always deals with user addresses. */
|
||||
static void force_user_fault(unsigned long address, int write)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
unsigned int flags = FAULT_FLAG_USER;
|
||||
int code;
|
||||
|
||||
code = SEGV_MAPERR;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
if (vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if (expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
good_area:
|
||||
code = SEGV_ACCERR;
|
||||
if (write) {
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
} else {
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
switch (handle_mm_fault(mm, vma, address, flags)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
|
||||
}
|
||||
|
||||
static void check_stack_aligned(unsigned long sp)
|
||||
{
|
||||
if (sp & 0x7UL)
|
||||
force_sig(SIGILL, current);
|
||||
}
|
||||
|
||||
void window_overflow_fault(void)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
sp = current_thread_info()->rwbuf_stkptrs[0];
|
||||
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 1);
|
||||
force_user_fault(sp, 1);
|
||||
|
||||
check_stack_aligned(sp);
|
||||
}
|
||||
|
||||
void window_underflow_fault(unsigned long sp)
|
||||
{
|
||||
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 0);
|
||||
force_user_fault(sp, 0);
|
||||
|
||||
check_stack_aligned(sp);
|
||||
}
|
||||
|
||||
void window_ret_fault(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
sp = regs->u_regs[UREG_FP];
|
||||
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
|
||||
force_user_fault(sp + 0x38, 0);
|
||||
force_user_fault(sp, 0);
|
||||
|
||||
check_stack_aligned(sp);
|
||||
}
|
544
arch/sparc/mm/fault_64.c
Normal file
544
arch/sparc/mm/fault_64.c
Normal file
|
@ -0,0 +1,544 @@
|
|||
/*
|
||||
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
|
||||
*
|
||||
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
|
||||
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
*/
|
||||
|
||||
#include <asm/head.h>
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/context_tracking.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/lsu.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
static inline __kprobes int notify_page_fault(struct pt_regs *regs)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
if (kprobes_built_in() && !user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, 0))
|
||||
ret = 1;
|
||||
preempt_enable();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __kprobes unhandled_fault(unsigned long address,
|
||||
struct task_struct *tsk,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if ((unsigned long) address < PAGE_SIZE) {
|
||||
printk(KERN_ALERT "Unable to handle kernel NULL "
|
||||
"pointer dereference\n");
|
||||
} else {
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request "
|
||||
"at virtual address %016lx\n", (unsigned long)address);
|
||||
}
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
|
||||
(tsk->mm ?
|
||||
CTX_HWBITS(tsk->mm->context) :
|
||||
CTX_HWBITS(tsk->active_mm->context)));
|
||||
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
|
||||
(tsk->mm ? (unsigned long) tsk->mm->pgd :
|
||||
(unsigned long) tsk->active_mm->pgd));
|
||||
die_if_kernel("Oops", regs);
|
||||
}
|
||||
|
||||
static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
|
||||
{
|
||||
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
|
||||
regs->tpc);
|
||||
printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
|
||||
printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
|
||||
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
|
||||
dump_stack();
|
||||
unhandled_fault(regs->tpc, current, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* We now make sure that mmap_sem is held in all paths that call
|
||||
* this. Additionally, to prevent kswapd from ripping ptes from
|
||||
* under us, raise interrupts around the time that we look at the
|
||||
* pte, kswapd will have to wait to get his smp ipi response from
|
||||
* us. vmtruncate likewise. This saves us having to get pte lock.
|
||||
*/
|
||||
static unsigned int get_user_insn(unsigned long tpc)
|
||||
{
|
||||
pgd_t *pgdp = pgd_offset(current->mm, tpc);
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep, pte;
|
||||
unsigned long pa;
|
||||
u32 insn = 0;
|
||||
|
||||
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
|
||||
goto out;
|
||||
pudp = pud_offset(pgdp, tpc);
|
||||
if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
|
||||
goto out;
|
||||
|
||||
/* This disables preemption for us as well. */
|
||||
local_irq_disable();
|
||||
|
||||
pmdp = pmd_offset(pudp, tpc);
|
||||
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
|
||||
goto out_irq_enable;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
if (pmd_trans_huge(*pmdp)) {
|
||||
if (pmd_trans_splitting(*pmdp))
|
||||
goto out_irq_enable;
|
||||
|
||||
pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
|
||||
pa += tpc & ~HPAGE_MASK;
|
||||
|
||||
/* Use phys bypass so we don't pollute dtlb/dcache. */
|
||||
__asm__ __volatile__("lduwa [%1] %2, %0"
|
||||
: "=r" (insn)
|
||||
: "r" (pa), "i" (ASI_PHYS_USE_EC));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ptep = pte_offset_map(pmdp, tpc);
|
||||
pte = *ptep;
|
||||
if (pte_present(pte)) {
|
||||
pa = (pte_pfn(pte) << PAGE_SHIFT);
|
||||
pa += (tpc & ~PAGE_MASK);
|
||||
|
||||
/* Use phys bypass so we don't pollute dtlb/dcache. */
|
||||
__asm__ __volatile__("lduwa [%1] %2, %0"
|
||||
: "=r" (insn)
|
||||
: "r" (pa), "i" (ASI_PHYS_USE_EC));
|
||||
}
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
out_irq_enable:
|
||||
local_irq_enable();
|
||||
out:
|
||||
return insn;
|
||||
}
|
||||
|
||||
static inline void
|
||||
show_signal_msg(struct pt_regs *regs, int sig, int code,
|
||||
unsigned long address, struct task_struct *tsk)
|
||||
{
|
||||
if (!unhandled_signal(tsk, sig))
|
||||
return;
|
||||
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
|
||||
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
|
||||
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
|
||||
tsk->comm, task_pid_nr(tsk), address,
|
||||
(void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
|
||||
(void *)regs->u_regs[UREG_FP], code);
|
||||
|
||||
print_vma_addr(KERN_CONT " in ", regs->tpc);
|
||||
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
||||
unsigned long fault_addr, unsigned int insn,
|
||||
int fault_code)
|
||||
{
|
||||
unsigned long addr;
|
||||
siginfo_t info;
|
||||
|
||||
info.si_code = code;
|
||||
info.si_signo = sig;
|
||||
info.si_errno = 0;
|
||||
if (fault_code & FAULT_CODE_ITLB) {
|
||||
addr = regs->tpc;
|
||||
} else {
|
||||
/* If we were able to probe the faulting instruction, use it
|
||||
* to compute a precise fault address. Otherwise use the fault
|
||||
* time provided address which may only have page granularity.
|
||||
*/
|
||||
if (insn)
|
||||
addr = compute_effective_address(regs, insn, 0);
|
||||
else
|
||||
addr = fault_addr;
|
||||
}
|
||||
info.si_addr = (void __user *) addr;
|
||||
info.si_trapno = 0;
|
||||
|
||||
if (unlikely(show_unhandled_signals))
|
||||
show_signal_msg(regs, sig, code, addr, current);
|
||||
|
||||
force_sig_info(sig, &info, current);
|
||||
}
|
||||
|
||||
static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
if (!insn) {
|
||||
if (!regs->tpc || (regs->tpc & 0x3))
|
||||
return 0;
|
||||
if (regs->tstate & TSTATE_PRIV) {
|
||||
insn = *(unsigned int *) regs->tpc;
|
||||
} else {
|
||||
insn = get_user_insn(regs->tpc);
|
||||
}
|
||||
}
|
||||
return insn;
|
||||
}
|
||||
|
||||
static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
|
||||
int fault_code, unsigned int insn,
|
||||
unsigned long address)
|
||||
{
|
||||
unsigned char asi = ASI_P;
|
||||
|
||||
if ((!insn) && (regs->tstate & TSTATE_PRIV))
|
||||
goto cannot_handle;
|
||||
|
||||
/* If user insn could be read (thus insn is zero), that
|
||||
* is fine. We will just gun down the process with a signal
|
||||
* in that case.
|
||||
*/
|
||||
|
||||
if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
|
||||
(insn & 0xc0800000) == 0xc0800000) {
|
||||
if (insn & 0x2000)
|
||||
asi = (regs->tstate >> 24);
|
||||
else
|
||||
asi = (insn >> 5);
|
||||
if ((asi & 0xf2) == 0x82) {
|
||||
if (insn & 0x1000000) {
|
||||
handle_ldf_stq(insn, regs);
|
||||
} else {
|
||||
/* This was a non-faulting load. Just clear the
|
||||
* destination register(s) and continue with the next
|
||||
* instruction. -jj
|
||||
*/
|
||||
handle_ld_nf(insn, regs);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Is this in ex_table? */
|
||||
if (regs->tstate & TSTATE_PRIV) {
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_exception_tables(regs->tpc);
|
||||
if (entry) {
|
||||
regs->tpc = entry->fixup;
|
||||
regs->tnpc = regs->tpc + 4;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/* The si_code was set to make clear whether
|
||||
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
|
||||
*/
|
||||
do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
|
||||
return;
|
||||
}
|
||||
|
||||
cannot_handle:
|
||||
unhandled_fault (address, current, regs);
|
||||
}
|
||||
|
||||
static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
|
||||
{
|
||||
static int times;
|
||||
|
||||
if (times++ < 10)
|
||||
printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
|
||||
"64-bit TPC [%lx]\n",
|
||||
current->comm, current->pid,
|
||||
regs->tpc);
|
||||
show_regs(regs);
|
||||
}
|
||||
|
||||
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned int insn = 0;
|
||||
int si_code, fault_code, fault;
|
||||
unsigned long address, mm_rss;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
fault_code = get_thread_fault_code();
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
goto exit_exception;
|
||||
|
||||
si_code = SEGV_MAPERR;
|
||||
address = current_thread_info()->fault_address;
|
||||
|
||||
if ((fault_code & FAULT_CODE_ITLB) &&
|
||||
(fault_code & FAULT_CODE_DTLB))
|
||||
BUG();
|
||||
|
||||
if (test_thread_flag(TIF_32BIT)) {
|
||||
if (!(regs->tstate & TSTATE_PRIV)) {
|
||||
if (unlikely((regs->tpc >> 32) != 0)) {
|
||||
bogus_32bit_fault_tpc(regs);
|
||||
goto intr_or_no_mm;
|
||||
}
|
||||
}
|
||||
if (unlikely((address >> 32) != 0))
|
||||
goto intr_or_no_mm;
|
||||
}
|
||||
|
||||
if (regs->tstate & TSTATE_PRIV) {
|
||||
unsigned long tpc = regs->tpc;
|
||||
|
||||
/* Sanity check the PC. */
|
||||
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
|
||||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) {
|
||||
/* Valid, no problems... */
|
||||
} else {
|
||||
bad_kernel_pc(regs, address);
|
||||
goto exit_exception;
|
||||
}
|
||||
} else
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto intr_or_no_mm;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
if ((regs->tstate & TSTATE_PRIV) &&
|
||||
!search_exception_tables(regs->tpc)) {
|
||||
insn = get_fault_insn(regs, insn);
|
||||
goto handle_kernel_fault;
|
||||
}
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
if (fault_code & FAULT_CODE_BAD_RA)
|
||||
goto do_sigbus;
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
|
||||
/* Pure DTLB misses do not tell us whether the fault causing
|
||||
* load/store/atomic was a write or not, it only says that there
|
||||
* was no match. So in such a case we (carefully) read the
|
||||
* instruction to try and figure this out. It's an optimization
|
||||
* so it's ok if we can't do this.
|
||||
*
|
||||
* Special hack, window spill/fill knows the exact fault type.
|
||||
*/
|
||||
if (((fault_code &
|
||||
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
|
||||
(vma->vm_flags & VM_WRITE) != 0) {
|
||||
insn = get_fault_insn(regs, 0);
|
||||
if (!insn)
|
||||
goto continue_fault;
|
||||
/* All loads, stores and atomics have bits 30 and 31 both set
|
||||
* in the instruction. Bit 21 is set in all stores, but we
|
||||
* have to avoid prefetches which also have bit 21 set.
|
||||
*/
|
||||
if ((insn & 0xc0200000) == 0xc0200000 &&
|
||||
(insn & 0x01780000) != 0x01680000) {
|
||||
/* Don't bother updating thread struct value,
|
||||
* because update_mmu_cache only cares which tlb
|
||||
* the access came from.
|
||||
*/
|
||||
fault_code |= FAULT_CODE_WRITE;
|
||||
}
|
||||
}
|
||||
continue_fault:
|
||||
|
||||
if (vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if (!(fault_code & FAULT_CODE_WRITE)) {
|
||||
/* Non-faulting loads shouldn't expand stack. */
|
||||
insn = get_fault_insn(regs, insn);
|
||||
if ((insn & 0xc0800000) == 0xc0800000) {
|
||||
unsigned char asi;
|
||||
|
||||
if (insn & 0x2000)
|
||||
asi = (regs->tstate >> 24);
|
||||
else
|
||||
asi = (insn >> 5);
|
||||
if ((asi & 0xf2) == 0x82)
|
||||
goto bad_area;
|
||||
}
|
||||
}
|
||||
if (expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
si_code = SEGV_ACCERR;
|
||||
|
||||
/* If we took a ITLB miss on a non-executable page, catch
|
||||
* that here.
|
||||
*/
|
||||
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
|
||||
BUG_ON(address != regs->tpc);
|
||||
BUG_ON(regs->tstate & TSTATE_PRIV);
|
||||
goto bad_area;
|
||||
}
|
||||
|
||||
if (fault_code & FAULT_CODE_WRITE) {
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
|
||||
/* Spitfire has an icache which does not snoop
|
||||
* processor stores. Later processors do...
|
||||
*/
|
||||
if (tlb_type == spitfire &&
|
||||
(vma->vm_flags & VM_EXEC) != 0 &&
|
||||
vma->vm_file != NULL)
|
||||
set_thread_fault_code(fault_code |
|
||||
FAULT_CODE_BLKCOMMIT);
|
||||
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
} else {
|
||||
/* Allow reads even for write-only mappings */
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
goto exit_exception;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGSEGV)
|
||||
goto bad_area;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
||||
1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
||||
1, regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
mm_rss = get_mm_rss(mm);
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
|
||||
#endif
|
||||
if (unlikely(mm_rss >
|
||||
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
|
||||
tsb_grow(mm, MM_TSB_BASE, mm_rss);
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
mm_rss = mm->context.huge_pte_count;
|
||||
if (unlikely(mm_rss >
|
||||
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
|
||||
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
|
||||
else
|
||||
hugetlb_setup(regs);
|
||||
|
||||
}
|
||||
#endif
|
||||
exit_exception:
|
||||
exception_exit(prev_state);
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
handle_kernel_fault:
|
||||
do_kernel_fault(regs, si_code, fault_code, insn, address);
|
||||
goto exit_exception;
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!(regs->tstate & TSTATE_PRIV)) {
|
||||
pagefault_out_of_memory();
|
||||
goto exit_exception;
|
||||
}
|
||||
goto handle_kernel_fault;
|
||||
|
||||
intr_or_no_mm:
|
||||
insn = get_fault_insn(regs, 0);
|
||||
goto handle_kernel_fault;
|
||||
|
||||
do_sigbus:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
|
||||
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (regs->tstate & TSTATE_PRIV)
|
||||
goto handle_kernel_fault;
|
||||
}
|
267
arch/sparc/mm/gup.c
Normal file
267
arch/sparc/mm/gup.c
Normal file
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Lockless get_user_pages_fast for sparc, cribbed from powerpc
|
||||
*
|
||||
* Copyright (C) 2008 Nick Piggin
|
||||
* Copyright (C) 2008 Novell Inc.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
* register pressure.
|
||||
*/
|
||||
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask, result;
|
||||
pte_t *ptep;
|
||||
|
||||
if (tlb_type == hypervisor) {
|
||||
result = _PAGE_PRESENT_4V|_PAGE_P_4V;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE_4V;
|
||||
} else {
|
||||
result = _PAGE_PRESENT_4U|_PAGE_P_4U;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE_4U;
|
||||
}
|
||||
mask = result | _PAGE_SPECIAL;
|
||||
|
||||
ptep = pte_offset_kernel(&pmd, addr);
|
||||
do {
|
||||
struct page *page, *head;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
if ((pte_val(pte) & mask) != result)
|
||||
return 0;
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
/* The hugepage case is simplified on sparc64 because
|
||||
* we encode the sub-page pfn offsets into the
|
||||
* hugepage PTEs. We could optimize this in the future
|
||||
* use page_cache_add_speculative() for the hugepage case.
|
||||
*/
|
||||
page = pte_page(pte);
|
||||
head = compound_head(page);
|
||||
if (!page_cache_get_speculative(head))
|
||||
return 0;
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
if (head != page)
|
||||
get_huge_page_tail(page);
|
||||
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages,
|
||||
int *nr)
|
||||
{
|
||||
struct page *head, *page, *tail;
|
||||
int refs;
|
||||
|
||||
if (!(pmd_val(pmd) & _PAGE_VALID))
|
||||
return 0;
|
||||
|
||||
if (write && !pmd_write(pmd))
|
||||
return 0;
|
||||
|
||||
refs = 0;
|
||||
head = pmd_page(pmd);
|
||||
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
tail = page;
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (!page_cache_add_speculative(head, refs)) {
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
|
||||
*nr -= refs;
|
||||
while (refs--)
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Any tail page need their mapcount reference taken before we
|
||||
* return.
|
||||
*/
|
||||
while (refs--) {
|
||||
if (PageTail(tail))
|
||||
get_huge_page_tail(tail);
|
||||
tail++;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (unlikely(pmd_large(pmd))) {
|
||||
if (!gup_huge_pmd(pmdp, pmd, addr, next,
|
||||
write, pages, nr))
|
||||
return 0;
|
||||
} else if (!gup_pte_range(pmd, addr, next, write,
|
||||
pages, nr))
|
||||
return 0;
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next, flags;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
|
||||
local_irq_save(flags);
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
break;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
break;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
|
||||
/*
|
||||
* XXX: batch / limit 'nr', to avoid large irq off latency
|
||||
* needs some instrumenting to determine the common sizes used by
|
||||
* important workloads (eg. DB2), and whether limiting the batch size
|
||||
* will decrease performance.
|
||||
*
|
||||
* It seems like we're in the clear for the moment. Direct-IO is
|
||||
* the main guy that batches up lots of get_user_pages, and even
|
||||
* they are limited to 64-at-a-time which is not so many.
|
||||
*/
|
||||
/*
|
||||
* This doesn't prevent pagetable teardown, but does prevent
|
||||
* the pagetables from being freed on sparc.
|
||||
*
|
||||
* So long as we atomically load page table pointers versus teardown,
|
||||
* we can follow the address down to the the page and take a ref on it.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||
return nr;
|
||||
|
||||
{
|
||||
int ret;
|
||||
|
||||
slow:
|
||||
local_irq_enable();
|
||||
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
if (ret < 0)
|
||||
ret = nr;
|
||||
else
|
||||
ret += nr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
130
arch/sparc/mm/highmem.c
Normal file
130
arch/sparc/mm/highmem.c
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* highmem.c: virtual kernel memory mappings for high memory
|
||||
*
|
||||
* Provides kernel-static versions of atomic kmap functions originally
|
||||
* found as inlines in include/asm-sparc/highmem.h. These became
|
||||
* needed as kmap_atomic() and kunmap_atomic() started getting
|
||||
* called from within modules.
|
||||
* -- Tomas Szepe <szepe@pinerecords.com>, September 2002
|
||||
*
|
||||
* But kmap_atomic() and kunmap_atomic() cannot be inlined in
|
||||
* modules because they are loaded with btfixup-ped functions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need it.
|
||||
*
|
||||
* XXX This is an old text. Actually, it's good to use atomic kmaps,
|
||||
* provided you remember that they are atomic and not try to sleep
|
||||
* with a kmap taken, much like a spinlock. Non-atomic kmaps are
|
||||
* shared by CPUs, and so precious, and establishing them requires IPI.
|
||||
* Atomic kmaps are lightweight and we may have NCPUS more of them.
|
||||
*/
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/vaddrs.h>
|
||||
|
||||
pgprot_t kmap_prot;
|
||||
|
||||
static pte_t *kmap_pte;
|
||||
|
||||
void __init kmap_init(void)
|
||||
{
|
||||
unsigned long address;
|
||||
pmd_t *dir;
|
||||
|
||||
address = __fix_to_virt(FIX_KMAP_BEGIN);
|
||||
dir = pmd_offset(pgd_offset_k(address), address);
|
||||
|
||||
/* cache the first kmap pte */
|
||||
kmap_pte = pte_offset_kernel(dir, address);
|
||||
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
|
||||
}
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
long idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_cache_one(vaddr);
|
||||
#else
|
||||
flush_cache_all();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_tlb_one(vaddr);
|
||||
#else
|
||||
flush_tlb_all();
|
||||
#endif
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int type;
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
type = kmap_atomic_idx();
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
{
|
||||
unsigned long idx;
|
||||
|
||||
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
|
||||
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_cache_one(vaddr);
|
||||
#else
|
||||
flush_cache_all();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* force other mappings to Oops if they'll try to access
|
||||
* this pte without first remap it
|
||||
*/
|
||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
||||
/* XXX Fix - Anton */
|
||||
#if 0
|
||||
__flush_tlb_one(vaddr);
|
||||
#else
|
||||
flush_tlb_all();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
226
arch/sparc/mm/hugetlbpage.c
Normal file
226
arch/sparc/mm/hugetlbpage.c
Normal file
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* SPARC64 Huge TLB page support.
|
||||
*
|
||||
* Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <asm/mman.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/* Slightly simplified from the non-hugepage variant because by
|
||||
* definition we don't have to worry about any page coloring stuff
|
||||
*/
|
||||
|
||||
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
|
||||
unsigned long addr,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long task_size = TASK_SIZE;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (test_thread_flag(TIF_32BIT))
|
||||
task_size = STACK_TOP32;
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = min(task_size, VA_EXCLUDE_START);
|
||||
info.align_mask = PAGE_MASK & ~HPAGE_MASK;
|
||||
info.align_offset = 0;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
info.low_limit = VA_EXCLUDE_END;
|
||||
info.high_limit = task_size;
|
||||
addr = vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
const unsigned long len,
|
||||
const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
/* This should only ever run for 32-bit processes. */
|
||||
BUG_ON(!test_thread_flag(TIF_32BIT));
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = mm->mmap_base;
|
||||
info.align_mask = PAGE_MASK & ~HPAGE_MASK;
|
||||
info.align_offset = 0;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
/*
|
||||
* A failed mmap() very likely causes application failure,
|
||||
* so fall back to the bottom-up function here. This scenario
|
||||
* can happen with large stack limits and large mmap()
|
||||
* allocations.
|
||||
*/
|
||||
if (addr & ~PAGE_MASK) {
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
info.flags = 0;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = STACK_TOP32;
|
||||
addr = vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long task_size = TASK_SIZE;
|
||||
|
||||
if (test_thread_flag(TIF_32BIT))
|
||||
task_size = STACK_TOP32;
|
||||
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (len > task_size)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
if (prepare_hugepage_range(file, addr, len))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
}
|
||||
|
||||
if (addr) {
|
||||
addr = ALIGN(addr, HPAGE_SIZE);
|
||||
vma = find_vma(mm, addr);
|
||||
if (task_size - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
|
||||
pgoff, flags);
|
||||
else
|
||||
return hugetlb_get_unmapped_area_topdown(file, addr, len,
|
||||
pgoff, flags);
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
/* We must align the address, because our caller will run
|
||||
* set_huge_pte_at() on whatever we return, which writes out
|
||||
* all of the sub-ptes for the hugepage range. So we have
|
||||
* to give it the first such sub-pte.
|
||||
*/
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_alloc_map(mm, NULL, pmd, addr);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_none(*pgd)) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!pte_present(*ptep) && pte_present(entry))
|
||||
mm->context.huge_pte_count++;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
set_pte_at(mm, addr, ptep, entry);
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
pte_val(entry) += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t entry;
|
||||
int i;
|
||||
|
||||
entry = *ptep;
|
||||
if (pte_present(entry))
|
||||
mm->context.huge_pte_count--;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
pte_clear(mm, addr, ptep);
|
||||
addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
412
arch/sparc/mm/hypersparc.S
Normal file
412
arch/sparc/mm/hypersparc.S
Normal file
|
@ -0,0 +1,412 @@
|
|||
/*
|
||||
* hypersparc.S: High speed Hypersparc mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
|
||||
.globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
|
||||
.globl hypersparc_flush_page_to_ram
|
||||
.globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
|
||||
.globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
|
||||
.globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
|
||||
|
||||
hypersparc_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
sethi %hi(vac_cache_size), %g4
|
||||
ld [%g4 + %lo(vac_cache_size)], %g5
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %g2
|
||||
1:
|
||||
subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined
|
||||
bne 1b
|
||||
sta %g0, [%g5] ASI_M_FLUSH_CTX
|
||||
retl
|
||||
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
|
||||
|
||||
/* We expand the window flush to get maximum performance. */
|
||||
hypersparc_flush_cache_mm:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
be hypersparc_flush_cache_mm_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o1
|
||||
sethi %hi(vac_cache_size), %g2
|
||||
ld [%g2 + %lo(vac_cache_size)], %o0
|
||||
add %o1, %o1, %g1
|
||||
add %o1, %g1, %g2
|
||||
add %o1, %g2, %g3
|
||||
add %o1, %g3, %g4
|
||||
add %o1, %g4, %g5
|
||||
add %o1, %g5, %o4
|
||||
add %o1, %o4, %o5
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
subcc %o0, %o5, %o0 ! hyper_flush_cache_user
|
||||
sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER
|
||||
bne 1b
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER
|
||||
hypersparc_flush_cache_mm_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* The things we do for performance... */
|
||||
hypersparc_flush_cache_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
be hypersparc_flush_cache_range_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
sethi %hi(vac_cache_size), %g2
|
||||
ld [%g2 + %lo(vac_cache_size)], %o3
|
||||
|
||||
/* Here comes the fun part... */
|
||||
add %o2, (PAGE_SIZE - 1), %o2
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
add %o4, %o4, %o5
|
||||
andn %o2, (PAGE_SIZE - 1), %o2
|
||||
add %o4, %o5, %g1
|
||||
sub %o2, %o1, %g4
|
||||
add %o4, %g1, %g2
|
||||
sll %o3, 2, %g5
|
||||
add %o4, %g2, %g3
|
||||
cmp %g4, %g5
|
||||
add %o4, %g3, %g4
|
||||
blu 0f
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* Flush entire user space, believe it or not this is quicker
|
||||
* than page at a time flushings for range > (cache_size<<2).
|
||||
*/
|
||||
1:
|
||||
subcc %o3, %g7, %o3
|
||||
sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER
|
||||
sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER
|
||||
bne 1b
|
||||
sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER
|
||||
retl
|
||||
nop
|
||||
|
||||
/* Below our threshold, flush one page at a time. */
|
||||
0:
|
||||
ld [%o0 + AOFF_mm_context], %o0
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %o3
|
||||
sta %o0, [%g7] ASI_M_MMUREGS
|
||||
add %o2, -PAGE_SIZE, %o0
|
||||
1:
|
||||
or %o0, 0x400, %g7
|
||||
lda [%g7] ASI_M_FLUSH_PROBE, %g7
|
||||
orcc %g7, 0, %g0
|
||||
be,a 3f
|
||||
mov %o0, %o2
|
||||
add %o4, %g5, %g7
|
||||
2:
|
||||
sub %o2, %g7, %o2
|
||||
sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o2, 0xffc, %g0
|
||||
sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 2b
|
||||
sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
|
||||
3:
|
||||
cmp %o2, %o1
|
||||
bne 1b
|
||||
add %o2, -PAGE_SIZE, %o0
|
||||
mov SRMMU_FAULT_STATUS, %g5
|
||||
lda [%g5] ASI_M_MMUREGS, %g0
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %o3, [%g7] ASI_M_MMUREGS
|
||||
hypersparc_flush_cache_range_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* HyperSparc requires a valid mapping where we are about to flush
|
||||
* in order to check for a physical tag match during the flush.
|
||||
*/
|
||||
/* Verified, my ass... */
|
||||
hypersparc_flush_cache_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %g2, -1
|
||||
be hypersparc_flush_cache_page_out
|
||||
#endif
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
mov SRMMU_CTX_REG, %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
lda [%o3] ASI_M_MMUREGS, %o2
|
||||
sta %g2, [%o3] ASI_M_MMUREGS
|
||||
or %o1, 0x400, %o5
|
||||
lda [%o5] ASI_M_FLUSH_PROBE, %g1
|
||||
orcc %g0, %g1, %g0
|
||||
be 2f
|
||||
add %o4, %o4, %o5
|
||||
sub %o1, -PAGE_SIZE, %o1
|
||||
add %o4, %o5, %g1
|
||||
add %o4, %g1, %g2
|
||||
add %o4, %g2, %g3
|
||||
add %o4, %g3, %g4
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
sub %o1, %g7, %o1
|
||||
sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o1, 0xffc, %g0
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
|
||||
2:
|
||||
mov SRMMU_FAULT_STATUS, %g7
|
||||
mov SRMMU_CTX_REG, %g4
|
||||
lda [%g7] ASI_M_MMUREGS, %g0
|
||||
sta %o2, [%g4] ASI_M_MMUREGS
|
||||
hypersparc_flush_cache_page_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
/* HyperSparc is copy-back. */
|
||||
hypersparc_flush_page_to_ram:
|
||||
sethi %hi(vac_line_size), %g1
|
||||
ld [%g1 + %lo(vac_line_size)], %o4
|
||||
andn %o0, (PAGE_SIZE - 1), %o0
|
||||
add %o4, %o4, %o5
|
||||
or %o0, 0x400, %g7
|
||||
lda [%g7] ASI_M_FLUSH_PROBE, %g5
|
||||
add %o4, %o5, %g1
|
||||
orcc %g5, 0, %g0
|
||||
be 2f
|
||||
add %o4, %g1, %g2
|
||||
add %o4, %g2, %g3
|
||||
sub %o0, -PAGE_SIZE, %o0
|
||||
add %o4, %g3, %g4
|
||||
add %o4, %g4, %g5
|
||||
add %o4, %g5, %g7
|
||||
|
||||
/* BLAMMO! */
|
||||
1:
|
||||
sub %o0, %g7, %o0
|
||||
sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
|
||||
andcc %o0, 0xffc, %g0
|
||||
sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
|
||||
2:
|
||||
mov SRMMU_FAULT_STATUS, %g1
|
||||
retl
|
||||
lda [%g1] ASI_M_MMUREGS, %g0
|
||||
|
||||
/* HyperSparc is IO cache coherent. */
|
||||
hypersparc_flush_page_for_dma:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* It was noted that at boot time a TLB flush all in a delay slot
|
||||
* can deliver an illegal instruction to the processor if the timing
|
||||
* is just right...
|
||||
*/
|
||||
hypersparc_flush_tlb_all:
|
||||
mov 0x400, %g1
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_flush_tlb_mm:
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o1, -1
|
||||
be hypersparc_flush_tlb_mm_out
|
||||
#endif
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_mm_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
hypersparc_flush_tlb_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be hypersparc_flush_tlb_range_out
|
||||
#endif
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
1:
|
||||
sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 1b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_range_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
hypersparc_flush_tlb_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be hypersparc_flush_tlb_page_out
|
||||
#endif
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
hypersparc_flush_tlb_page_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
__INIT
|
||||
|
||||
/* High speed page clear/copy. */
|
||||
hypersparc_bzero_1page:
|
||||
/* NOTE: This routine has to be shorter than 40insns --jj */
|
||||
clr %g1
|
||||
mov 32, %g2
|
||||
mov 64, %g3
|
||||
mov 96, %g4
|
||||
mov 128, %g5
|
||||
mov 160, %g7
|
||||
mov 192, %o2
|
||||
mov 224, %o3
|
||||
mov 16, %o1
|
||||
1:
|
||||
stda %g0, [%o0 + %g0] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g2] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g3] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g4] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g5] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %g7] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %o2] ASI_M_BFILL
|
||||
stda %g0, [%o0 + %o3] ASI_M_BFILL
|
||||
subcc %o1, 1, %o1
|
||||
bne 1b
|
||||
add %o0, 256, %o0
|
||||
|
||||
retl
|
||||
nop
|
||||
|
||||
hypersparc_copy_1page:
|
||||
/* NOTE: This routine has to be shorter than 70insns --jj */
|
||||
sub %o1, %o0, %o2 ! difference
|
||||
mov 16, %g1
|
||||
1:
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
add %o0, 32, %o0
|
||||
sta %o0, [%o0 + %o2] ASI_M_BCOPY
|
||||
subcc %g1, 1, %g1
|
||||
bne 1b
|
||||
add %o0, 32, %o0
|
||||
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl hypersparc_setup_blockops
|
||||
hypersparc_setup_blockops:
|
||||
sethi %hi(bzero_1page), %o0
|
||||
or %o0, %lo(bzero_1page), %o0
|
||||
sethi %hi(hypersparc_bzero_1page), %o1
|
||||
or %o1, %lo(hypersparc_bzero_1page), %o1
|
||||
sethi %hi(hypersparc_copy_1page), %o2
|
||||
or %o2, %lo(hypersparc_copy_1page), %o2
|
||||
ld [%o1], %o4
|
||||
1:
|
||||
add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sethi %hi(__copy_1page), %o0
|
||||
or %o0, %lo(__copy_1page), %o0
|
||||
sethi %hi(hypersparc_setup_blockops), %o2
|
||||
or %o2, %lo(hypersparc_setup_blockops), %o2
|
||||
ld [%o1], %o4
|
||||
1:
|
||||
add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sta %g0, [%g0] ASI_M_FLUSH_IWHOLE
|
||||
retl
|
||||
nop
|
359
arch/sparc/mm/init_32.c
Normal file
359
arch/sparc/mm/init_32.c
Normal file
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* linux/arch/sparc/mm/init.c
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
* Copyright (C) 2000 Anton Blanchard (anton@samba.org)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/vaddrs.h>
|
||||
#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/leon.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
unsigned long *sparc_valid_addr_bitmap;
|
||||
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
|
||||
|
||||
unsigned long phys_base;
|
||||
EXPORT_SYMBOL(phys_base);
|
||||
|
||||
unsigned long pfn_base;
|
||||
EXPORT_SYMBOL(pfn_base);
|
||||
|
||||
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
|
||||
|
||||
/* Initial ramdisk setup */
|
||||
extern unsigned int sparc_ramdisk_image;
|
||||
extern unsigned int sparc_ramdisk_size;
|
||||
|
||||
unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
void show_mem(unsigned int filter)
|
||||
{
|
||||
printk("Mem-info:\n");
|
||||
show_free_areas(filter);
|
||||
printk("Free swap: %6ldkB\n",
|
||||
get_nr_swap_pages() << (PAGE_SHIFT-10));
|
||||
printk("%ld pages of RAM\n", totalram_pages);
|
||||
printk("%ld free pages\n", nr_free_pages());
|
||||
}
|
||||
|
||||
|
||||
unsigned long last_valid_pfn;
|
||||
|
||||
unsigned long calc_highpages(void)
|
||||
{
|
||||
int i;
|
||||
int nr = 0;
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
|
||||
if (end_pfn <= max_low_pfn)
|
||||
continue;
|
||||
|
||||
if (start_pfn < max_low_pfn)
|
||||
start_pfn = max_low_pfn;
|
||||
|
||||
nr += end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
static unsigned long calc_max_low_pfn(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
|
||||
unsigned long curr_pfn, last_pfn;
|
||||
|
||||
last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
|
||||
for (i = 1; sp_banks[i].num_bytes != 0; i++) {
|
||||
curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
|
||||
if (curr_pfn >= tmp) {
|
||||
if (last_pfn < tmp)
|
||||
tmp = last_pfn;
|
||||
break;
|
||||
}
|
||||
|
||||
last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
unsigned long __init bootmem_init(unsigned long *pages_avail)
|
||||
{
|
||||
unsigned long bootmap_size, start_pfn;
|
||||
unsigned long end_of_phys_memory = 0UL;
|
||||
unsigned long bootmap_pfn, bytes_avail, size;
|
||||
int i;
|
||||
|
||||
bytes_avail = 0UL;
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
end_of_phys_memory = sp_banks[i].base_addr +
|
||||
sp_banks[i].num_bytes;
|
||||
bytes_avail += sp_banks[i].num_bytes;
|
||||
if (cmdline_memory_size) {
|
||||
if (bytes_avail > cmdline_memory_size) {
|
||||
unsigned long slack = bytes_avail - cmdline_memory_size;
|
||||
|
||||
bytes_avail -= slack;
|
||||
end_of_phys_memory -= slack;
|
||||
|
||||
sp_banks[i].num_bytes -= slack;
|
||||
if (sp_banks[i].num_bytes == 0) {
|
||||
sp_banks[i].base_addr = 0xdeadbeef;
|
||||
} else {
|
||||
sp_banks[i+1].num_bytes = 0;
|
||||
sp_banks[i+1].base_addr = 0xdeadbeef;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Start with page aligned address of last symbol in kernel
|
||||
* image.
|
||||
*/
|
||||
start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
|
||||
|
||||
/* Now shift down to get the real physical page frame number. */
|
||||
start_pfn >>= PAGE_SHIFT;
|
||||
|
||||
bootmap_pfn = start_pfn;
|
||||
|
||||
max_pfn = end_of_phys_memory >> PAGE_SHIFT;
|
||||
|
||||
max_low_pfn = max_pfn;
|
||||
highstart_pfn = highend_pfn = max_pfn;
|
||||
|
||||
if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
|
||||
highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
|
||||
max_low_pfn = calc_max_low_pfn();
|
||||
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
||||
calc_highpages() >> (20 - PAGE_SHIFT));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/* Now have to check initial ramdisk, so that bootmap does not overwrite it */
|
||||
if (sparc_ramdisk_image) {
|
||||
if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
|
||||
sparc_ramdisk_image -= KERNBASE;
|
||||
initrd_start = sparc_ramdisk_image + phys_base;
|
||||
initrd_end = initrd_start + sparc_ramdisk_size;
|
||||
if (initrd_end > end_of_phys_memory) {
|
||||
printk(KERN_CRIT "initrd extends beyond end of memory "
|
||||
"(0x%016lx > 0x%016lx)\ndisabling initrd\n",
|
||||
initrd_end, end_of_phys_memory);
|
||||
initrd_start = 0;
|
||||
}
|
||||
if (initrd_start) {
|
||||
if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
|
||||
initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
|
||||
bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Initialize the boot-time allocator. */
|
||||
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
|
||||
max_low_pfn);
|
||||
|
||||
/* Now register the available physical memory with the
|
||||
* allocator.
|
||||
*/
|
||||
*pages_avail = 0;
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long curr_pfn, last_pfn;
|
||||
|
||||
curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
if (curr_pfn >= max_low_pfn)
|
||||
break;
|
||||
|
||||
last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
if (last_pfn > max_low_pfn)
|
||||
last_pfn = max_low_pfn;
|
||||
|
||||
/*
|
||||
* .. finally, did all the rounding and playing
|
||||
* around just make the area go away?
|
||||
*/
|
||||
if (last_pfn <= curr_pfn)
|
||||
continue;
|
||||
|
||||
size = (last_pfn - curr_pfn) << PAGE_SHIFT;
|
||||
*pages_avail += last_pfn - curr_pfn;
|
||||
|
||||
free_bootmem(sp_banks[i].base_addr, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start) {
|
||||
/* Reserve the initrd image area. */
|
||||
size = initrd_end - initrd_start;
|
||||
reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
|
||||
initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
|
||||
}
|
||||
#endif
|
||||
/* Reserve the kernel text/data/bss. */
|
||||
size = (start_pfn << PAGE_SHIFT) - phys_base;
|
||||
reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
/* Reserve the bootmem map. We do not account for it
|
||||
* in pages_avail because we will release that memory
|
||||
* in free_all_bootmem.
|
||||
*/
|
||||
size = bootmap_size;
|
||||
reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
|
||||
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
return max_pfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables: We call the MMU specific
|
||||
* init routine based upon the Sun model type on the Sparc.
|
||||
*
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
srmmu_paging_init();
|
||||
prom_build_devicetree();
|
||||
of_fill_in_cpu_data();
|
||||
device_scan();
|
||||
}
|
||||
|
||||
static void __init taint_real_pages(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes; i++) {
|
||||
unsigned long start, end;
|
||||
|
||||
start = sp_banks[i].base_addr;
|
||||
end = start + sp_banks[i].num_bytes;
|
||||
|
||||
while (start < end) {
|
||||
set_bit(start >> 20, sparc_valid_addr_bitmap);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
|
||||
#endif
|
||||
|
||||
for (tmp = start_pfn; tmp < end_pfn; tmp++)
|
||||
free_highmem_page(pfn_to_page(tmp));
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
|
||||
prom_printf("BUG: fixmap and pkmap areas overlap\n");
|
||||
prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
|
||||
PKMAP_BASE,
|
||||
(unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||
FIXADDR_START);
|
||||
prom_printf("Please mail sparclinux@vger.kernel.org.\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
|
||||
/* Saves us work later. */
|
||||
memset((void *)&empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
|
||||
i += 1;
|
||||
sparc_valid_addr_bitmap = (unsigned long *)
|
||||
__alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
|
||||
|
||||
if (sparc_valid_addr_bitmap == NULL) {
|
||||
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
|
||||
prom_halt();
|
||||
}
|
||||
memset(sparc_valid_addr_bitmap, 0, i << 2);
|
||||
|
||||
taint_real_pages();
|
||||
|
||||
max_mapnr = last_valid_pfn - pfn_base;
|
||||
high_memory = __va(max_low_pfn << PAGE_SHIFT);
|
||||
free_all_bootmem();
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
|
||||
if (end_pfn <= highstart_pfn)
|
||||
continue;
|
||||
|
||||
if (start_pfn < highstart_pfn)
|
||||
start_pfn = highstart_pfn;
|
||||
|
||||
map_high_region(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
void free_initmem (void)
|
||||
{
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void sparc_flush_page_to_ram(struct page *page)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)page_address(page);
|
||||
|
||||
if (vaddr)
|
||||
__flush_page_to_ram(vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL(sparc_flush_page_to_ram);
|
2846
arch/sparc/mm/init_64.c
Normal file
2846
arch/sparc/mm/init_64.c
Normal file
File diff suppressed because it is too large
Load diff
34
arch/sparc/mm/init_64.h
Normal file
34
arch/sparc/mm/init_64.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
#ifndef _SPARC64_MM_INIT_H
|
||||
#define _SPARC64_MM_INIT_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/* Most of the symbols in this file are defined in init.c and
|
||||
* marked non-static so that assembler code can get at them.
|
||||
*/
|
||||
|
||||
#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
|
||||
|
||||
extern unsigned long kern_linear_pte_xor[4];
|
||||
extern unsigned int sparc64_highest_unlocked_tlb_ent;
|
||||
extern unsigned long sparc64_kern_pri_context;
|
||||
extern unsigned long sparc64_kern_pri_nuc_bits;
|
||||
extern unsigned long sparc64_kern_sec_context;
|
||||
void mmu_info(struct seq_file *m);
|
||||
|
||||
struct linux_prom_translation {
|
||||
unsigned long virt;
|
||||
unsigned long size;
|
||||
unsigned long data;
|
||||
};
|
||||
|
||||
/* Exported for kernel TLB miss handling in ktlb.S */
|
||||
extern struct linux_prom_translation prom_trans[512];
|
||||
extern unsigned int prom_trans_ents;
|
||||
|
||||
/* Exported for SMP bootup purposes. */
|
||||
extern unsigned long kern_locked_tte_data;
|
||||
|
||||
void prom_world(int enter);
|
||||
|
||||
#endif /* _SPARC64_MM_INIT_H */
|
262
arch/sparc/mm/io-unit.c
Normal file
262
arch/sparc/mm/io-unit.c
Normal file
|
@ -0,0 +1,262 @@
|
|||
/*
|
||||
* io-unit.c: IO-UNIT specific routines for memory management.
|
||||
*
|
||||
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/io-unit.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
/* #define IOUNIT_DEBUG */
|
||||
#ifdef IOUNIT_DEBUG
|
||||
#define IOD(x) printk(x)
|
||||
#else
|
||||
#define IOD(x) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
|
||||
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
|
||||
|
||||
static void __init iounit_iommu_init(struct platform_device *op)
|
||||
{
|
||||
struct iounit_struct *iounit;
|
||||
iopte_t __iomem *xpt;
|
||||
iopte_t __iomem *xptend;
|
||||
|
||||
iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
|
||||
if (!iounit) {
|
||||
prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
iounit->limit[0] = IOUNIT_BMAP1_START;
|
||||
iounit->limit[1] = IOUNIT_BMAP2_START;
|
||||
iounit->limit[2] = IOUNIT_BMAPM_START;
|
||||
iounit->limit[3] = IOUNIT_BMAPM_END;
|
||||
iounit->rotor[1] = IOUNIT_BMAP2_START;
|
||||
iounit->rotor[2] = IOUNIT_BMAPM_START;
|
||||
|
||||
xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
|
||||
if (!xpt) {
|
||||
prom_printf("SUN4D: Cannot map External Page Table.");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
op->dev.archdata.iommu = iounit;
|
||||
iounit->page_table = xpt;
|
||||
spin_lock_init(&iounit->lock);
|
||||
|
||||
xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
|
||||
for (; xpt < xptend; xpt++)
|
||||
sbus_writel(0, xpt);
|
||||
}
|
||||
|
||||
static int __init iounit_init(void)
|
||||
{
|
||||
extern void sun4d_init_sbi_irq(void);
|
||||
struct device_node *dp;
|
||||
|
||||
for_each_node_by_name(dp, "sbi") {
|
||||
struct platform_device *op = of_find_device_by_node(dp);
|
||||
|
||||
iounit_iommu_init(op);
|
||||
of_propagate_archdata(op);
|
||||
}
|
||||
|
||||
sun4d_init_sbi_irq();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(iounit_init);
|
||||
|
||||
/* One has to hold iounit->lock to call this */
|
||||
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
|
||||
{
|
||||
int i, j, k, npages;
|
||||
unsigned long rotor, scan, limit;
|
||||
iopte_t iopte;
|
||||
|
||||
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
|
||||
/* A tiny bit of magic ingredience :) */
|
||||
switch (npages) {
|
||||
case 1: i = 0x0231; break;
|
||||
case 2: i = 0x0132; break;
|
||||
default: i = 0x0213; break;
|
||||
}
|
||||
|
||||
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
|
||||
|
||||
next: j = (i & 15);
|
||||
rotor = iounit->rotor[j - 1];
|
||||
limit = iounit->limit[j];
|
||||
scan = rotor;
|
||||
nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
||||
if (scan + npages > limit) {
|
||||
if (limit != rotor) {
|
||||
limit = rotor;
|
||||
scan = iounit->limit[j - 1];
|
||||
goto nexti;
|
||||
}
|
||||
i >>= 4;
|
||||
if (!(i & 15))
|
||||
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
|
||||
goto next;
|
||||
}
|
||||
for (k = 1, scan++; k < npages; k++)
|
||||
if (test_bit(scan++, iounit->bmap))
|
||||
goto nexti;
|
||||
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
|
||||
scan -= npages;
|
||||
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
|
||||
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
|
||||
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
|
||||
set_bit(scan, iounit->bmap);
|
||||
sbus_writel(iopte, &iounit->page_table[scan]);
|
||||
}
|
||||
IOD(("%08lx\n", vaddr));
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long ret, flags;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long flags;
|
||||
|
||||
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long flags;
|
||||
unsigned long vaddr, len;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long page, end;
|
||||
pgprot_t dvma_prot;
|
||||
iopte_t __iomem *iopte;
|
||||
|
||||
*pba = addr;
|
||||
|
||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
end = PAGE_ALIGN((addr + len));
|
||||
while(addr < end) {
|
||||
page = va;
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
long i;
|
||||
|
||||
pgdp = pgd_offset(&init_mm, addr);
|
||||
pmdp = pmd_offset(pgdp, addr);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
|
||||
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
|
||||
|
||||
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
|
||||
|
||||
iopte = iounit->page_table + i;
|
||||
sbus_writel(MKIOPTE(__pa(page)), iopte);
|
||||
}
|
||||
addr += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
|
||||
{
|
||||
/* XXX Somebody please fill this in */
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct sparc32_dma_ops iounit_dma_ops = {
|
||||
.get_scsi_one = iounit_get_scsi_one,
|
||||
.get_scsi_sgl = iounit_get_scsi_sgl,
|
||||
.release_scsi_one = iounit_release_scsi_one,
|
||||
.release_scsi_sgl = iounit_release_scsi_sgl,
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iounit_map_dma_area,
|
||||
.unmap_dma_area = iounit_unmap_dma_area,
|
||||
#endif
|
||||
};
|
||||
|
||||
void __init ld_mmu_iounit(void)
|
||||
{
|
||||
sparc32_dma_ops = &iounit_dma_ops;
|
||||
}
|
451
arch/sparc/mm/iommu.c
Normal file
451
arch/sparc/mm/iommu.c
Normal file
|
@ -0,0 +1,451 @@
|
|||
/*
|
||||
* iommu.c: IOMMU specific routines for memory management.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
|
||||
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/mbus.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/bitext.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
/*
|
||||
* This can be sized dynamically, but we will do this
|
||||
* only when we have a guidance about actual I/O pressures.
|
||||
*/
|
||||
#define IOMMU_RNGE IOMMU_RNGE_256MB
|
||||
#define IOMMU_START 0xF0000000
|
||||
#define IOMMU_WINSIZE (256*1024*1024U)
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
|
||||
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
|
||||
|
||||
static int viking_flush;
|
||||
/* viking.S */
|
||||
extern void viking_flush_page(unsigned long page);
|
||||
extern void viking_mxcc_flush_page(unsigned long page);
|
||||
|
||||
/*
|
||||
* Values precomputed according to CPU type.
|
||||
*/
|
||||
static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
|
||||
static pgprot_t dvma_prot; /* Consistent mapping pte flags */
|
||||
|
||||
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
|
||||
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
|
||||
|
||||
static void __init sbus_iommu_init(struct platform_device *op)
|
||||
{
|
||||
struct iommu_struct *iommu;
|
||||
unsigned int impl, vers;
|
||||
unsigned long *bitmap;
|
||||
unsigned long control;
|
||||
unsigned long base;
|
||||
unsigned long tmp;
|
||||
|
||||
iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
|
||||
if (!iommu) {
|
||||
prom_printf("Unable to allocate iommu structure\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
|
||||
"iommu_regs");
|
||||
if (!iommu->regs) {
|
||||
prom_printf("Cannot map IOMMU registers\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
control = sbus_readl(&iommu->regs->control);
|
||||
impl = (control & IOMMU_CTRL_IMPL) >> 28;
|
||||
vers = (control & IOMMU_CTRL_VERS) >> 24;
|
||||
control &= ~(IOMMU_CTRL_RNGE);
|
||||
control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
|
||||
sbus_writel(control, &iommu->regs->control);
|
||||
|
||||
iommu_invalidate(iommu->regs);
|
||||
iommu->start = IOMMU_START;
|
||||
iommu->end = 0xffffffff;
|
||||
|
||||
/* Allocate IOMMU page table */
|
||||
/* Stupid alignment constraints give me a headache.
|
||||
We need 256K or 512K or 1M or 2M area aligned to
|
||||
its size and current gfp will fortunately give
|
||||
it to us. */
|
||||
tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
|
||||
if (!tmp) {
|
||||
prom_printf("Unable to allocate iommu table [0x%lx]\n",
|
||||
IOMMU_NPTES * sizeof(iopte_t));
|
||||
prom_halt();
|
||||
}
|
||||
iommu->page_table = (iopte_t *)tmp;
|
||||
|
||||
/* Initialize new table. */
|
||||
memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
|
||||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
|
||||
base = __pa((unsigned long)iommu->page_table) >> 4;
|
||||
sbus_writel(base, &iommu->regs->base);
|
||||
iommu_invalidate(iommu->regs);
|
||||
|
||||
bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
|
||||
if (!bitmap) {
|
||||
prom_printf("Unable to allocate iommu bitmap [%d]\n",
|
||||
(int)(IOMMU_NPTES>>3));
|
||||
prom_halt();
|
||||
}
|
||||
bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
|
||||
/* To be coherent on HyperSparc, the page color of DVMA
|
||||
* and physical addresses must match.
|
||||
*/
|
||||
if (srmmu_modtype == HyperSparc)
|
||||
iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
|
||||
else
|
||||
iommu->usemap.num_colors = 1;
|
||||
|
||||
printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
|
||||
impl, vers, iommu->page_table,
|
||||
(int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
|
||||
|
||||
op->dev.archdata.iommu = iommu;
|
||||
}
|
||||
|
||||
static int __init iommu_init(void)
|
||||
{
|
||||
struct device_node *dp;
|
||||
|
||||
for_each_node_by_name(dp, "iommu") {
|
||||
struct platform_device *op = of_find_device_by_node(dp);
|
||||
|
||||
sbus_iommu_init(op);
|
||||
of_propagate_archdata(op);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(iommu_init);
|
||||
|
||||
/* Flush the iotlb entries to ram. */
|
||||
/* This could be better if we didn't have to flush whole pages. */
|
||||
static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
|
||||
start = (unsigned long)iopte;
|
||||
end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
|
||||
start &= PAGE_MASK;
|
||||
if (viking_mxcc_present) {
|
||||
while(start < end) {
|
||||
viking_mxcc_flush_page(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
} else if (viking_flush) {
|
||||
while(start < end) {
|
||||
viking_flush_page(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
while(start < end) {
|
||||
__flush_page_to_ram(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
int ioptex;
|
||||
iopte_t *iopte, *iopte0;
|
||||
unsigned int busa, busa0;
|
||||
int i;
|
||||
|
||||
/* page color = pfn of page */
|
||||
ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
|
||||
if (ioptex < 0)
|
||||
panic("iommu out");
|
||||
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
iopte0 = &iommu->page_table[ioptex];
|
||||
|
||||
busa = busa0;
|
||||
iopte = iopte0;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
busa += PAGE_SIZE;
|
||||
iopte++;
|
||||
page++;
|
||||
}
|
||||
|
||||
iommu_flush_iotlb(iopte0, npages);
|
||||
|
||||
return busa0;
|
||||
}
|
||||
|
||||
static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
|
||||
{
|
||||
unsigned long off;
|
||||
int npages;
|
||||
struct page *page;
|
||||
u32 busa;
|
||||
|
||||
off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
|
||||
busa = iommu_get_one(dev, page, npages);
|
||||
return busa + off;
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
|
||||
{
|
||||
flush_page_for_dma(0);
|
||||
return iommu_get_scsi_one(dev, vaddr, len);
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
|
||||
{
|
||||
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
|
||||
|
||||
while(page < ((unsigned long)(vaddr + len))) {
|
||||
flush_page_for_dma(page);
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
return iommu_get_scsi_one(dev, vaddr, len);
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
|
||||
{
|
||||
int n;
|
||||
|
||||
flush_page_for_dma(0);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
|
||||
{
|
||||
unsigned long page, oldpage = 0;
|
||||
int n, i;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* We expect unmapped highmem pages to be not in the cache.
|
||||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (page != oldpage) { /* Already flushed? */
|
||||
flush_page_for_dma(page);
|
||||
oldpage = page;
|
||||
}
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
int ioptex;
|
||||
int i;
|
||||
|
||||
BUG_ON(busa < iommu->start);
|
||||
ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(iommu->page_table[ioptex + i]) = 0;
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
busa += PAGE_SIZE;
|
||||
}
|
||||
bit_map_clear(&iommu->usemap, ioptex, npages);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
||||
{
|
||||
unsigned long off;
|
||||
int npages;
|
||||
|
||||
off = vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, vaddr & PAGE_MASK, npages);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
{
|
||||
int n;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
|
||||
sg->dma_address = 0x21212121;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
|
||||
unsigned long addr, int len)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
unsigned long page, end;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
iopte_t *first;
|
||||
int ioptex;
|
||||
|
||||
BUG_ON((va & ~PAGE_MASK) != 0);
|
||||
BUG_ON((addr & ~PAGE_MASK) != 0);
|
||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||
|
||||
/* page color = physical address */
|
||||
ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
|
||||
addr >> PAGE_SHIFT);
|
||||
if (ioptex < 0)
|
||||
panic("iommu out");
|
||||
|
||||
iopte += ioptex;
|
||||
first = iopte;
|
||||
end = addr + len;
|
||||
while(addr < end) {
|
||||
page = va;
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
if (viking_mxcc_present)
|
||||
viking_mxcc_flush_page(page);
|
||||
else if (viking_flush)
|
||||
viking_flush_page(page);
|
||||
else
|
||||
__flush_page_to_ram(page);
|
||||
|
||||
pgdp = pgd_offset(&init_mm, addr);
|
||||
pmdp = pmd_offset(pgdp, addr);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
|
||||
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
|
||||
}
|
||||
iopte_val(*iopte++) =
|
||||
MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
|
||||
addr += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
/* P3: why do we need this?
|
||||
*
|
||||
* DAVEM: Because there are several aspects, none of which
|
||||
* are handled by a single interface. Some cpus are
|
||||
* completely not I/O DMA coherent, and some have
|
||||
* virtually indexed caches. The driver DMA flushing
|
||||
* methods handle the former case, but here during
|
||||
* IOMMU page table modifications, and usage of non-cacheable
|
||||
* cpu mappings of pages potentially in the cpu caches, we have
|
||||
* to handle the latter case as well.
|
||||
*/
|
||||
flush_cache_all();
|
||||
iommu_flush_iotlb(first, len >> PAGE_SHIFT);
|
||||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
|
||||
*pba = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
unsigned long end;
|
||||
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
|
||||
BUG_ON((busa & ~PAGE_MASK) != 0);
|
||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||
|
||||
iopte += ioptex;
|
||||
end = busa + len;
|
||||
while (busa < end) {
|
||||
iopte_val(*iopte++) = 0;
|
||||
busa += PAGE_SIZE;
|
||||
}
|
||||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
|
||||
.get_scsi_one = iommu_get_scsi_one_gflush,
|
||||
.get_scsi_sgl = iommu_get_scsi_sgl_gflush,
|
||||
.release_scsi_one = iommu_release_scsi_one,
|
||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iommu_map_dma_area,
|
||||
.unmap_dma_area = iommu_unmap_dma_area,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
|
||||
.get_scsi_one = iommu_get_scsi_one_pflush,
|
||||
.get_scsi_sgl = iommu_get_scsi_sgl_pflush,
|
||||
.release_scsi_one = iommu_release_scsi_one,
|
||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iommu_map_dma_area,
|
||||
.unmap_dma_area = iommu_unmap_dma_area,
|
||||
#endif
|
||||
};
|
||||
|
||||
void __init ld_mmu_iommu(void)
|
||||
{
|
||||
if (flush_page_for_dma_global) {
|
||||
/* flush_page_for_dma flushes everything, no matter of what page is it */
|
||||
sparc32_dma_ops = &iommu_dma_gflush_ops;
|
||||
} else {
|
||||
sparc32_dma_ops = &iommu_dma_pflush_ops;
|
||||
}
|
||||
|
||||
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
|
||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
|
||||
} else {
|
||||
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
|
||||
}
|
||||
}
|
351
arch/sparc/mm/leon_mm.c
Normal file
351
arch/sparc/mm/leon_mm.c
Normal file
|
@ -0,0 +1,351 @@
|
|||
/*
|
||||
* linux/arch/sparc/mm/leon_m.c
|
||||
*
|
||||
* Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
|
||||
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
|
||||
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
|
||||
*
|
||||
* do srmmu probe in software
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/leon.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "mm_32.h"
|
||||
|
||||
int leon_flush_during_switch = 1;
|
||||
static int srmmu_swprobe_trace;
|
||||
|
||||
static inline unsigned long leon_get_ctable_ptr(void)
|
||||
{
|
||||
unsigned int retval;
|
||||
|
||||
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
|
||||
"=r" (retval) :
|
||||
"r" (SRMMU_CTXTBL_PTR),
|
||||
"i" (ASI_LEON_MMUREGS));
|
||||
return (retval & SRMMU_CTX_PMASK) << 4;
|
||||
}
|
||||
|
||||
|
||||
unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
|
||||
{
|
||||
|
||||
unsigned int ctxtbl;
|
||||
unsigned int pgd, pmd, ped;
|
||||
unsigned int ptr;
|
||||
unsigned int lvl, pte, paddrbase;
|
||||
unsigned int ctx;
|
||||
unsigned int paddr_calc;
|
||||
|
||||
paddrbase = 0;
|
||||
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: trace on\n");
|
||||
|
||||
ctxtbl = leon_get_ctable_ptr();
|
||||
if (!(ctxtbl)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
|
||||
return 0;
|
||||
}
|
||||
if (!_pfn_valid(PFN(ctxtbl))) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO
|
||||
"swprobe: !_pfn_valid(%x)=>0\n",
|
||||
PFN(ctxtbl));
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = srmmu_get_context();
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
|
||||
|
||||
pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
|
||||
|
||||
if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
|
||||
lvl = 3;
|
||||
pte = pgd;
|
||||
paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
|
||||
goto ready;
|
||||
}
|
||||
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
|
||||
|
||||
ptr = (pgd & SRMMU_PTD_PMASK) << 4;
|
||||
ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
|
||||
if (!_pfn_valid(PFN(ptr)))
|
||||
return 0;
|
||||
|
||||
pmd = LEON_BYPASS_LOAD_PA(ptr);
|
||||
if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
|
||||
lvl = 2;
|
||||
pte = pmd;
|
||||
paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
|
||||
goto ready;
|
||||
}
|
||||
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
|
||||
|
||||
ptr = (pmd & SRMMU_PTD_PMASK) << 4;
|
||||
ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
|
||||
if (!_pfn_valid(PFN(ptr))) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
|
||||
PFN(ptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
ped = LEON_BYPASS_LOAD_PA(ptr);
|
||||
|
||||
if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: ped is entry level 1\n");
|
||||
lvl = 1;
|
||||
pte = ped;
|
||||
paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
|
||||
goto ready;
|
||||
}
|
||||
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: ped is invalid => 0\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
|
||||
|
||||
ptr = (ped & SRMMU_PTD_PMASK) << 4;
|
||||
ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
|
||||
if (!_pfn_valid(PFN(ptr)))
|
||||
return 0;
|
||||
|
||||
ptr = LEON_BYPASS_LOAD_PA(ptr);
|
||||
if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
|
||||
lvl = 0;
|
||||
pte = ptr;
|
||||
paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
|
||||
goto ready;
|
||||
}
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
|
||||
return 0;
|
||||
|
||||
ready:
|
||||
switch (lvl) {
|
||||
case 0:
|
||||
paddr_calc =
|
||||
(vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
|
||||
break;
|
||||
case 1:
|
||||
paddr_calc =
|
||||
(vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
|
||||
break;
|
||||
case 2:
|
||||
paddr_calc =
|
||||
(vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
|
||||
break;
|
||||
default:
|
||||
case 3:
|
||||
paddr_calc = vaddr;
|
||||
break;
|
||||
}
|
||||
if (srmmu_swprobe_trace)
|
||||
printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
|
||||
if (paddr)
|
||||
*paddr = paddr_calc;
|
||||
return pte;
|
||||
}
|
||||
|
||||
void leon_flush_icache_all(void)
|
||||
{
|
||||
__asm__ __volatile__(" flush "); /*iflush*/
|
||||
}
|
||||
|
||||
void leon_flush_dcache_all(void)
|
||||
{
|
||||
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
|
||||
"i"(ASI_LEON_DFLUSH) : "memory");
|
||||
}
|
||||
|
||||
void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
leon_flush_icache_all();
|
||||
leon_flush_dcache_all();
|
||||
}
|
||||
|
||||
void leon_flush_cache_all(void)
|
||||
{
|
||||
__asm__ __volatile__(" flush "); /*iflush*/
|
||||
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
|
||||
"i"(ASI_LEON_DFLUSH) : "memory");
|
||||
}
|
||||
|
||||
void leon_flush_tlb_all(void)
|
||||
{
|
||||
leon_flush_cache_all();
|
||||
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
|
||||
"i"(ASI_LEON_MMUFLUSH) : "memory");
|
||||
}
|
||||
|
||||
/* get all cache regs */
|
||||
void leon3_getCacheRegs(struct leon3_cacheregs *regs)
|
||||
{
|
||||
unsigned long ccr, iccr, dccr;
|
||||
|
||||
if (!regs)
|
||||
return;
|
||||
/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
|
||||
__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
|
||||
"mov 0x08, %%g1\n\t"
|
||||
"lda [%%g1] %3, %1\n\t"
|
||||
"mov 0x0c, %%g1\n\t"
|
||||
"lda [%%g1] %3, %2\n\t"
|
||||
: "=r"(ccr), "=r"(iccr), "=r"(dccr)
|
||||
/* output */
|
||||
: "i"(ASI_LEON_CACHEREGS) /* input */
|
||||
: "g1" /* clobber list */
|
||||
);
|
||||
regs->ccr = ccr;
|
||||
regs->iccr = iccr;
|
||||
regs->dccr = dccr;
|
||||
}
|
||||
|
||||
/* Due to virtual cache we need to check cache configuration if
|
||||
* it is possible to skip flushing in some cases.
|
||||
*
|
||||
* Leon2 and Leon3 differ in their way of telling cache information
|
||||
*
|
||||
*/
|
||||
int __init leon_flush_needed(void)
|
||||
{
|
||||
int flush_needed = -1;
|
||||
unsigned int ssize, sets;
|
||||
char *setStr[4] =
|
||||
{ "direct mapped", "2-way associative", "3-way associative",
|
||||
"4-way associative"
|
||||
};
|
||||
/* leon 3 */
|
||||
struct leon3_cacheregs cregs;
|
||||
leon3_getCacheRegs(&cregs);
|
||||
sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
|
||||
/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
|
||||
ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
|
||||
|
||||
printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
|
||||
sets > 3 ? "unknown" : setStr[sets], ssize);
|
||||
if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
|
||||
/* Set Size <= Page size ==>
|
||||
flush on every context switch not needed. */
|
||||
flush_needed = 0;
|
||||
printk(KERN_INFO "CACHE: not flushing on every context switch\n");
|
||||
}
|
||||
return flush_needed;
|
||||
}
|
||||
|
||||
void leon_switch_mm(void)
|
||||
{
|
||||
flush_tlb_mm((void *)0);
|
||||
if (leon_flush_during_switch)
|
||||
leon_flush_cache_all();
|
||||
}
|
||||
|
||||
static void leon_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
leon_flush_cache_all();
|
||||
}
|
||||
|
||||
static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
leon_flush_pcache_all(vma, page);
|
||||
}
|
||||
|
||||
static void leon_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
leon_flush_cache_all();
|
||||
}
|
||||
|
||||
static void leon_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
leon_flush_tlb_all();
|
||||
}
|
||||
|
||||
static void leon_flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long page)
|
||||
{
|
||||
leon_flush_tlb_all();
|
||||
}
|
||||
|
||||
static void leon_flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
leon_flush_tlb_all();
|
||||
}
|
||||
|
||||
static void leon_flush_page_to_ram(unsigned long page)
|
||||
{
|
||||
leon_flush_cache_all();
|
||||
}
|
||||
|
||||
static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
|
||||
{
|
||||
leon_flush_cache_all();
|
||||
}
|
||||
|
||||
static void leon_flush_page_for_dma(unsigned long page)
|
||||
{
|
||||
leon_flush_dcache_all();
|
||||
}
|
||||
|
||||
void __init poke_leonsparc(void)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct sparc32_cachetlb_ops leon_ops = {
|
||||
.cache_all = leon_flush_cache_all,
|
||||
.cache_mm = leon_flush_cache_mm,
|
||||
.cache_page = leon_flush_cache_page,
|
||||
.cache_range = leon_flush_cache_range,
|
||||
.tlb_all = leon_flush_tlb_all,
|
||||
.tlb_mm = leon_flush_tlb_mm,
|
||||
.tlb_page = leon_flush_tlb_page,
|
||||
.tlb_range = leon_flush_tlb_range,
|
||||
.page_to_ram = leon_flush_page_to_ram,
|
||||
.sig_insns = leon_flush_sig_insns,
|
||||
.page_for_dma = leon_flush_page_for_dma,
|
||||
};
|
||||
|
||||
void __init init_leon(void)
|
||||
{
|
||||
srmmu_name = "LEON";
|
||||
sparc32_cachetlb_ops = &leon_ops;
|
||||
poke_srmmu = poke_leonsparc;
|
||||
|
||||
leon_flush_during_switch = leon_flush_needed();
|
||||
}
|
24
arch/sparc/mm/mm_32.h
Normal file
24
arch/sparc/mm/mm_32.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* fault_32.c - visible as they are called from assembler */
|
||||
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
|
||||
unsigned long address);
|
||||
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
unsigned long address);
|
||||
|
||||
void window_overflow_fault(void);
|
||||
void window_underflow_fault(unsigned long sp);
|
||||
void window_ret_fault(struct pt_regs *regs);
|
||||
|
||||
/* srmmu.c */
|
||||
extern char *srmmu_name;
|
||||
extern int viking_mxcc_present;
|
||||
extern int flush_page_for_dma_global;
|
||||
|
||||
extern void (*poke_srmmu)(void);
|
||||
|
||||
void __init srmmu_paging_init(void);
|
||||
|
||||
/* iommu.c */
|
||||
void ld_mmu_iommu(void);
|
||||
|
||||
/* io-unit.c */
|
||||
void ld_mmu_iounit(void);
|
1818
arch/sparc/mm/srmmu.c
Normal file
1818
arch/sparc/mm/srmmu.c
Normal file
File diff suppressed because it is too large
Load diff
82
arch/sparc/mm/srmmu_access.S
Normal file
82
arch/sparc/mm/srmmu_access.S
Normal file
|
@ -0,0 +1,82 @@
|
|||
/* Assembler variants of srmmu access functions.
|
||||
* Implemented in assembler to allow run-time patching.
|
||||
* LEON uses a different ASI for MMUREGS than SUN.
|
||||
*
|
||||
* The leon_1insn_patch infrastructure is used
|
||||
* for the run-time patching.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <asm/asi.h>
|
||||
|
||||
/* unsigned int srmmu_get_mmureg(void) */
|
||||
ENTRY(srmmu_get_mmureg)
|
||||
LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0)
|
||||
SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_get_mmureg)
|
||||
|
||||
/* void srmmu_set_mmureg(unsigned long regval) */
|
||||
ENTRY(srmmu_set_mmureg)
|
||||
LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS)
|
||||
SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_set_mmureg)
|
||||
|
||||
/* void srmmu_set_ctable_ptr(unsigned long paddr) */
|
||||
ENTRY(srmmu_set_ctable_ptr)
|
||||
/* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
|
||||
srl %o0, 4, %g1
|
||||
and %g1, SRMMU_CTX_PMASK, %g1
|
||||
|
||||
mov SRMMU_CTXTBL_PTR, %g2
|
||||
LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS)
|
||||
SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_set_ctable_ptr)
|
||||
|
||||
|
||||
/* void srmmu_set_context(int context) */
|
||||
ENTRY(srmmu_set_context)
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS)
|
||||
SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_set_context)
|
||||
|
||||
|
||||
/* int srmmu_get_context(void) */
|
||||
ENTRY(srmmu_get_context)
|
||||
mov SRMMU_CTX_REG, %o0
|
||||
LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
|
||||
SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_get_context)
|
||||
|
||||
|
||||
/* unsigned int srmmu_get_fstatus(void) */
|
||||
ENTRY(srmmu_get_fstatus)
|
||||
mov SRMMU_FAULT_STATUS, %o0
|
||||
LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
|
||||
SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_get_fstatus)
|
||||
|
||||
|
||||
/* unsigned int srmmu_get_faddr(void) */
|
||||
ENTRY(srmmu_get_faddr)
|
||||
mov SRMMU_FAULT_ADDR, %o0
|
||||
LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
|
||||
SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
|
||||
retl
|
||||
nop
|
||||
ENDPROC(srmmu_get_faddr)
|
255
arch/sparc/mm/swift.S
Normal file
255
arch/sparc/mm/swift.S
Normal file
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* swift.S: MicroSparc-II mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
|
||||
*/
|
||||
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
#if 1 /* XXX screw this, I can't get the VAC flushes working
|
||||
* XXX reliably... -DaveM
|
||||
*/
|
||||
.globl swift_flush_cache_all, swift_flush_cache_mm
|
||||
.globl swift_flush_cache_range, swift_flush_cache_page
|
||||
.globl swift_flush_page_for_dma
|
||||
.globl swift_flush_page_to_ram
|
||||
|
||||
swift_flush_cache_all:
|
||||
swift_flush_cache_mm:
|
||||
swift_flush_cache_range:
|
||||
swift_flush_cache_page:
|
||||
swift_flush_page_for_dma:
|
||||
swift_flush_page_to_ram:
|
||||
sethi %hi(0x2000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
add %o0, %o0, %o1
|
||||
sta %g0, [%o0] ASI_M_DATAC_TAG
|
||||
bne 1b
|
||||
sta %g0, [%o1] ASI_M_TXTC_TAG
|
||||
retl
|
||||
nop
|
||||
#else
|
||||
|
||||
.globl swift_flush_cache_all
|
||||
swift_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
|
||||
/* Just clear out all the tags. */
|
||||
sethi %hi(16 * 1024), %o0
|
||||
1: subcc %o0, 16, %o0
|
||||
sta %g0, [%o0] ASI_M_TXTC_TAG
|
||||
bne 1b
|
||||
sta %g0, [%o0] ASI_M_DATAC_TAG
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_mm
|
||||
swift_flush_cache_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_cache_mm_out
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
rd %psr, %g1
|
||||
andn %g1, PSR_ET, %g3
|
||||
wr %g3, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %g5
|
||||
sta %g2, [%g7] ASI_M_MMUREGS
|
||||
|
||||
#if 1
|
||||
sethi %hi(0x2000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o0] ASI_M_FLUSH_CTX
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
clr %o0
|
||||
or %g0, 2048, %g7
|
||||
or %g0, 2048, %o1
|
||||
add %o1, 2048, %o2
|
||||
add %o2, 2048, %o3
|
||||
mov 16, %o4
|
||||
add %o4, 2048, %o5
|
||||
add %o5, 2048, %g2
|
||||
add %g2, 2048, %g3
|
||||
1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX
|
||||
sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX
|
||||
subcc %g7, 32, %g7
|
||||
bne 1b
|
||||
add %o0, 32, %o0
|
||||
#endif
|
||||
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %g5, [%g7] ASI_M_MMUREGS
|
||||
wr %g1, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
swift_flush_cache_mm_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_range
|
||||
swift_flush_cache_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
sub %o2, %o1, %o2
|
||||
sethi %hi(4096), %o3
|
||||
cmp %o2, %o3
|
||||
bgu swift_flush_cache_mm
|
||||
nop
|
||||
b 70f
|
||||
nop
|
||||
|
||||
.globl swift_flush_cache_page
|
||||
swift_flush_cache_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
70:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_cache_page_out
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
rd %psr, %g1
|
||||
andn %g1, PSR_ET, %g3
|
||||
wr %g3, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
lda [%g7] ASI_M_MMUREGS, %g5
|
||||
sta %g2, [%g7] ASI_M_MMUREGS
|
||||
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
#if 1
|
||||
sethi %hi(0x1000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
or %g0, 512, %g7
|
||||
or %g0, 512, %o0
|
||||
add %o0, 512, %o2
|
||||
add %o2, 512, %o3
|
||||
add %o3, 512, %o4
|
||||
add %o4, 512, %o5
|
||||
add %o5, 512, %g3
|
||||
add %g3, 512, %g4
|
||||
1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
subcc %g7, 16, %g7
|
||||
bne 1b
|
||||
add %o1, 16, %o1
|
||||
#endif
|
||||
|
||||
mov SRMMU_CTX_REG, %g7
|
||||
sta %g5, [%g7] ASI_M_MMUREGS
|
||||
wr %g1, 0x0, %psr
|
||||
nop
|
||||
nop
|
||||
swift_flush_cache_page_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* Swift is write-thru, however it is not
|
||||
* I/O nor TLB-walk coherent. Also it has
|
||||
* caches which are virtually indexed and tagged.
|
||||
*/
|
||||
.globl swift_flush_page_for_dma
|
||||
.globl swift_flush_page_to_ram
|
||||
swift_flush_page_for_dma:
|
||||
swift_flush_page_to_ram:
|
||||
andn %o0, (PAGE_SIZE - 1), %o1
|
||||
#if 1
|
||||
sethi %hi(0x1000), %o0
|
||||
1: subcc %o0, 0x10, %o0
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
bne 1b
|
||||
nop
|
||||
#else
|
||||
or %g0, 512, %g7
|
||||
or %g0, 512, %o0
|
||||
add %o0, 512, %o2
|
||||
add %o2, 512, %o3
|
||||
add %o3, 512, %o4
|
||||
add %o4, 512, %o5
|
||||
add %o5, 512, %g3
|
||||
add %g3, 512, %g4
|
||||
1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
|
||||
sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
|
||||
subcc %g7, 16, %g7
|
||||
bne 1b
|
||||
add %o1, 16, %o1
|
||||
#endif
|
||||
retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
.globl swift_flush_sig_insns
|
||||
swift_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
.globl swift_flush_tlb_mm
|
||||
.globl swift_flush_tlb_range
|
||||
.globl swift_flush_tlb_all
|
||||
swift_flush_tlb_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
swift_flush_tlb_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be swift_flush_tlb_all_out
|
||||
swift_flush_tlb_all:
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
swift_flush_tlb_all_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl swift_flush_tlb_page
|
||||
swift_flush_tlb_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
cmp %o3, -1
|
||||
be swift_flush_tlb_page_out
|
||||
nop
|
||||
#if 1
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
#else
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#endif
|
||||
swift_flush_tlb_page_out:
|
||||
retl
|
||||
nop
|
243
arch/sparc/mm/tlb.c
Normal file
243
arch/sparc/mm/tlb.c
Normal file
|
@ -0,0 +1,243 @@
|
|||
/* arch/sparc64/mm/tlb.c
|
||||
*
|
||||
* Copyright (C) 2004 David S. Miller <davem@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/preempt.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
/* Heavily inspired by the ppc64 code. */
|
||||
|
||||
static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
||||
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
struct mm_struct *mm = tb->mm;
|
||||
|
||||
if (!tb->tlb_nr)
|
||||
goto out;
|
||||
|
||||
flush_tsb_user(tb);
|
||||
|
||||
if (CTX_VALID(mm->context)) {
|
||||
if (tb->tlb_nr == 1) {
|
||||
global_flush_tlb_page(mm, tb->vaddrs[0]);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
||||
&tb->vaddrs[0]);
|
||||
#else
|
||||
__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
|
||||
tb->tlb_nr, &tb->vaddrs[0]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
tb->tlb_nr = 0;
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void arch_enter_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
|
||||
|
||||
tb->active = 1;
|
||||
}
|
||||
|
||||
void arch_leave_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
|
||||
|
||||
if (tb->tlb_nr)
|
||||
flush_tlb_pending();
|
||||
tb->active = 0;
|
||||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
unsigned long nr;
|
||||
|
||||
vaddr &= PAGE_MASK;
|
||||
if (exec)
|
||||
vaddr |= 0x1UL;
|
||||
|
||||
nr = tb->tlb_nr;
|
||||
|
||||
if (unlikely(nr != 0 && mm != tb->mm)) {
|
||||
flush_tlb_pending();
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
if (!tb->active) {
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
tb->mm = mm;
|
||||
|
||||
tb->vaddrs[nr] = vaddr;
|
||||
tb->tlb_nr = ++nr;
|
||||
if (nr >= TLB_BATCH_NR)
|
||||
flush_tlb_pending();
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
if (tlb_type != hypervisor &&
|
||||
pte_dirty(orig)) {
|
||||
unsigned long paddr, pfn = pte_pfn(orig);
|
||||
struct address_space *mapping;
|
||||
struct page *page;
|
||||
|
||||
if (!pfn_valid(pfn))
|
||||
goto no_cache_flush;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageReserved(page))
|
||||
goto no_cache_flush;
|
||||
|
||||
/* A real file page? */
|
||||
mapping = page_mapping(page);
|
||||
if (!mapping)
|
||||
goto no_cache_flush;
|
||||
|
||||
paddr = (unsigned long) page_address(page);
|
||||
if ((paddr ^ vaddr) & (1 << 13))
|
||||
flush_dcache_page_all(mm, page);
|
||||
}
|
||||
|
||||
no_cache_flush:
|
||||
if (!fullmm)
|
||||
tlb_batch_add_one(mm, vaddr, pte_exec(orig));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
||||
pmd_t pmd)
|
||||
{
|
||||
unsigned long end;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_offset_map(&pmd, vaddr);
|
||||
end = vaddr + HPAGE_SIZE;
|
||||
while (vaddr < end) {
|
||||
if (pte_val(*pte) & _PAGE_VALID) {
|
||||
bool exec = pte_exec(*pte);
|
||||
|
||||
tlb_batch_add_one(mm, vaddr, exec);
|
||||
}
|
||||
pte++;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
pte_unmap(pte);
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
pmd_t orig = *pmdp;
|
||||
|
||||
*pmdp = pmd;
|
||||
|
||||
if (mm == &init_mm)
|
||||
return;
|
||||
|
||||
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
|
||||
if (pmd_val(pmd) & _PAGE_PMD_HUGE)
|
||||
mm->context.huge_pte_count++;
|
||||
else
|
||||
mm->context.huge_pte_count--;
|
||||
|
||||
/* Do not try to allocate the TSB hash table if we
|
||||
* don't have one already. We have various locks held
|
||||
* and thus we'll end up doing a GFP_KERNEL allocation
|
||||
* in an atomic context.
|
||||
*
|
||||
* Instead, we let the first TLB miss on a hugepage
|
||||
* take care of this.
|
||||
*/
|
||||
}
|
||||
|
||||
if (!pmd_none(orig)) {
|
||||
addr &= HPAGE_MASK;
|
||||
if (pmd_trans_huge(orig)) {
|
||||
pte_t orig_pte = __pte(pmd_val(orig));
|
||||
bool exec = pte_exec(orig_pte);
|
||||
|
||||
tlb_batch_add_one(mm, addr, exec);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
|
||||
} else {
|
||||
tlb_batch_pmd_scan(mm, addr, orig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t entry = *pmdp;
|
||||
|
||||
pmd_val(entry) &= ~_PAGE_VALID;
|
||||
|
||||
set_pmd_at(vma->vm_mm, address, pmdp, entry);
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
|
||||
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
pgtable_t pgtable)
|
||||
{
|
||||
struct list_head *lh = (struct list_head *) pgtable;
|
||||
|
||||
assert_spin_locked(&mm->page_table_lock);
|
||||
|
||||
/* FIFO */
|
||||
if (!pmd_huge_pte(mm, pmdp))
|
||||
INIT_LIST_HEAD(lh);
|
||||
else
|
||||
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
|
||||
pmd_huge_pte(mm, pmdp) = pgtable;
|
||||
}
|
||||
|
||||
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
||||
{
|
||||
struct list_head *lh;
|
||||
pgtable_t pgtable;
|
||||
|
||||
assert_spin_locked(&mm->page_table_lock);
|
||||
|
||||
/* FIFO */
|
||||
pgtable = pmd_huge_pte(mm, pmdp);
|
||||
lh = (struct list_head *) pgtable;
|
||||
if (list_empty(lh))
|
||||
pmd_huge_pte(mm, pmdp) = NULL;
|
||||
else {
|
||||
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
|
||||
list_del(lh);
|
||||
}
|
||||
pte_val(pgtable[0]) = 0;
|
||||
pte_val(pgtable[1]) = 0;
|
||||
|
||||
return pgtable;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
538
arch/sparc/mm/tsb.c
Normal file
538
arch/sparc/mm/tsb.c
Normal file
|
@ -0,0 +1,538 @@
|
|||
/* arch/sparc64/mm/tsb.c
|
||||
*
|
||||
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tsb.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
||||
|
||||
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
|
||||
{
|
||||
vaddr >>= hash_shift;
|
||||
return vaddr & (nentries - 1);
|
||||
}
|
||||
|
||||
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
|
||||
{
|
||||
return (tag == (vaddr >> 22));
|
||||
}
|
||||
|
||||
/* TSB flushes need only occur on the processor initiating the address
|
||||
* space modification, not on each cpu the address space has run on.
|
||||
* Only the TLB flush needs that treatment.
|
||||
*/
|
||||
|
||||
void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long v;
|
||||
|
||||
for (v = start; v < end; v += PAGE_SIZE) {
|
||||
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
|
||||
KERNEL_TSB_NENTRIES);
|
||||
struct tsb *ent = &swapper_tsb[hash];
|
||||
|
||||
if (tag_compare(ent->tag, v))
|
||||
ent->tag = (1UL << TSB_TAG_INVALID_BIT);
|
||||
}
|
||||
}
|
||||
|
||||
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
|
||||
unsigned long hash_shift,
|
||||
unsigned long nentries)
|
||||
{
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
|
||||
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
|
||||
unsigned long tsb, unsigned long nentries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < tb->tlb_nr; i++)
|
||||
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
|
||||
}
|
||||
|
||||
void flush_tsb_user(struct tlb_batch *tb)
|
||||
{
|
||||
struct mm_struct *mm = tb->mm;
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
|
||||
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
|
||||
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
|
||||
#endif
|
||||
|
||||
static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
|
||||
{
|
||||
unsigned long tsb_reg, base, tsb_paddr;
|
||||
unsigned long page_sz, tte;
|
||||
|
||||
mm->context.tsb_block[tsb_idx].tsb_nentries =
|
||||
tsb_bytes / sizeof(struct tsb);
|
||||
|
||||
switch (tsb_idx) {
|
||||
case MM_TSB_BASE:
|
||||
base = TSBMAP_8K_BASE;
|
||||
break;
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
case MM_TSB_HUGE:
|
||||
base = TSBMAP_4M_BASE;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
tte = pgprot_val(PAGE_KERNEL_LOCKED);
|
||||
tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
|
||||
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
|
||||
|
||||
/* Use the smallest page size that can map the whole TSB
|
||||
* in one TLB entry.
|
||||
*/
|
||||
switch (tsb_bytes) {
|
||||
case 8192 << 0:
|
||||
tsb_reg = 0x0UL;
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
base += (tsb_paddr & 8192);
|
||||
#endif
|
||||
page_sz = 8192;
|
||||
break;
|
||||
|
||||
case 8192 << 1:
|
||||
tsb_reg = 0x1UL;
|
||||
page_sz = 64 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 2:
|
||||
tsb_reg = 0x2UL;
|
||||
page_sz = 64 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 3:
|
||||
tsb_reg = 0x3UL;
|
||||
page_sz = 64 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 4:
|
||||
tsb_reg = 0x4UL;
|
||||
page_sz = 512 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 5:
|
||||
tsb_reg = 0x5UL;
|
||||
page_sz = 512 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 6:
|
||||
tsb_reg = 0x6UL;
|
||||
page_sz = 512 * 1024;
|
||||
break;
|
||||
|
||||
case 8192 << 7:
|
||||
tsb_reg = 0x7UL;
|
||||
page_sz = 4 * 1024 * 1024;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
|
||||
current->comm, current->pid, tsb_bytes);
|
||||
do_exit(SIGSEGV);
|
||||
}
|
||||
tte |= pte_sz_bits(page_sz);
|
||||
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
|
||||
/* Physical mapping, no locked TLB entry for TSB. */
|
||||
tsb_reg |= tsb_paddr;
|
||||
|
||||
mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
|
||||
mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
|
||||
mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
|
||||
} else {
|
||||
tsb_reg |= base;
|
||||
tsb_reg |= (tsb_paddr & (page_sz - 1UL));
|
||||
tte |= (tsb_paddr & ~(page_sz - 1UL));
|
||||
|
||||
mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
|
||||
mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
|
||||
mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
|
||||
}
|
||||
|
||||
/* Setup the Hypervisor TSB descriptor. */
|
||||
if (tlb_type == hypervisor) {
|
||||
struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
|
||||
|
||||
switch (tsb_idx) {
|
||||
case MM_TSB_BASE:
|
||||
hp->pgsz_idx = HV_PGSZ_IDX_BASE;
|
||||
break;
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
case MM_TSB_HUGE:
|
||||
hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
hp->assoc = 1;
|
||||
hp->num_ttes = tsb_bytes / 16;
|
||||
hp->ctx_idx = 0;
|
||||
switch (tsb_idx) {
|
||||
case MM_TSB_BASE:
|
||||
hp->pgsz_mask = HV_PGSZ_MASK_BASE;
|
||||
break;
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
case MM_TSB_HUGE:
|
||||
hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
hp->tsb_base = tsb_paddr;
|
||||
hp->resv = 0;
|
||||
}
|
||||
}
|
||||
|
||||
struct kmem_cache *pgtable_cache __read_mostly;
|
||||
|
||||
static struct kmem_cache *tsb_caches[8] __read_mostly;
|
||||
|
||||
static const char *tsb_cache_names[8] = {
|
||||
"tsb_8KB",
|
||||
"tsb_16KB",
|
||||
"tsb_32KB",
|
||||
"tsb_64KB",
|
||||
"tsb_128KB",
|
||||
"tsb_256KB",
|
||||
"tsb_512KB",
|
||||
"tsb_1MB",
|
||||
};
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
pgtable_cache = kmem_cache_create("pgtable_cache",
|
||||
PAGE_SIZE, PAGE_SIZE,
|
||||
0,
|
||||
_clear_page);
|
||||
if (!pgtable_cache) {
|
||||
prom_printf("pgtable_cache_init(): Could not create!\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
|
||||
unsigned long size = 8192 << i;
|
||||
const char *name = tsb_cache_names[i];
|
||||
|
||||
tsb_caches[i] = kmem_cache_create(name,
|
||||
size, size,
|
||||
0, NULL);
|
||||
if (!tsb_caches[i]) {
|
||||
prom_printf("Could not create %s cache\n", name);
|
||||
prom_halt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int sysctl_tsb_ratio = -2;
|
||||
|
||||
static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
|
||||
{
|
||||
unsigned long num_ents = (new_size / sizeof(struct tsb));
|
||||
|
||||
if (sysctl_tsb_ratio < 0)
|
||||
return num_ents - (num_ents >> -sysctl_tsb_ratio);
|
||||
else
|
||||
return num_ents + (num_ents >> sysctl_tsb_ratio);
|
||||
}
|
||||
|
||||
/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
|
||||
* do_sparc64_fault() invokes this routine to try and grow it.
|
||||
*
|
||||
* When we reach the maximum TSB size supported, we stick ~0UL into
|
||||
* tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
|
||||
* will not trigger any longer.
|
||||
*
|
||||
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
|
||||
* of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
|
||||
* must be 512K aligned. It also must be physically contiguous, so we
|
||||
* cannot use vmalloc().
|
||||
*
|
||||
* The idea here is to grow the TSB when the RSS of the process approaches
|
||||
* the number of entries that the current TSB can hold at once. Currently,
|
||||
* we trigger when the RSS hits 3/4 of the TSB capacity.
|
||||
*/
|
||||
void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
|
||||
{
|
||||
unsigned long max_tsb_size = 1 * 1024 * 1024;
|
||||
unsigned long new_size, old_size, flags;
|
||||
struct tsb *old_tsb, *new_tsb;
|
||||
unsigned long new_cache_index, old_cache_index;
|
||||
unsigned long new_rss_limit;
|
||||
gfp_t gfp_flags;
|
||||
|
||||
if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
|
||||
max_tsb_size = (PAGE_SIZE << MAX_ORDER);
|
||||
|
||||
new_cache_index = 0;
|
||||
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
|
||||
new_rss_limit = tsb_size_to_rss_limit(new_size);
|
||||
if (new_rss_limit > rss)
|
||||
break;
|
||||
new_cache_index++;
|
||||
}
|
||||
|
||||
if (new_size == max_tsb_size)
|
||||
new_rss_limit = ~0UL;
|
||||
|
||||
retry_tsb_alloc:
|
||||
gfp_flags = GFP_KERNEL;
|
||||
if (new_size > (PAGE_SIZE * 2))
|
||||
gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
|
||||
|
||||
new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
|
||||
gfp_flags, numa_node_id());
|
||||
if (unlikely(!new_tsb)) {
|
||||
/* Not being able to fork due to a high-order TSB
|
||||
* allocation failure is very bad behavior. Just back
|
||||
* down to a 0-order allocation and force no TSB
|
||||
* growing for this address space.
|
||||
*/
|
||||
if (mm->context.tsb_block[tsb_index].tsb == NULL &&
|
||||
new_cache_index > 0) {
|
||||
new_cache_index = 0;
|
||||
new_size = 8192;
|
||||
new_rss_limit = ~0UL;
|
||||
goto retry_tsb_alloc;
|
||||
}
|
||||
|
||||
/* If we failed on a TSB grow, we are under serious
|
||||
* memory pressure so don't try to grow any more.
|
||||
*/
|
||||
if (mm->context.tsb_block[tsb_index].tsb != NULL)
|
||||
mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Mark all tags as invalid. */
|
||||
tsb_init(new_tsb, new_size);
|
||||
|
||||
/* Ok, we are about to commit the changes. If we are
|
||||
* growing an existing TSB the locking is very tricky,
|
||||
* so WATCH OUT!
|
||||
*
|
||||
* We have to hold mm->context.lock while committing to the
|
||||
* new TSB, this synchronizes us with processors in
|
||||
* flush_tsb_user() and switch_mm() for this address space.
|
||||
*
|
||||
* But even with that lock held, processors run asynchronously
|
||||
* accessing the old TSB via TLB miss handling. This is OK
|
||||
* because those actions are just propagating state from the
|
||||
* Linux page tables into the TSB, page table mappings are not
|
||||
* being changed. If a real fault occurs, the processor will
|
||||
* synchronize with us when it hits flush_tsb_user(), this is
|
||||
* also true for the case where vmscan is modifying the page
|
||||
* tables. The only thing we need to be careful with is to
|
||||
* skip any locked TSB entries during copy_tsb().
|
||||
*
|
||||
* When we finish committing to the new TSB, we have to drop
|
||||
* the lock and ask all other cpus running this address space
|
||||
* to run tsb_context_switch() to see the new TSB table.
|
||||
*/
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
old_tsb = mm->context.tsb_block[tsb_index].tsb;
|
||||
old_cache_index =
|
||||
(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
|
||||
old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
|
||||
sizeof(struct tsb));
|
||||
|
||||
|
||||
/* Handle multiple threads trying to grow the TSB at the same time.
|
||||
* One will get in here first, and bump the size and the RSS limit.
|
||||
* The others will get in here next and hit this check.
|
||||
*/
|
||||
if (unlikely(old_tsb &&
|
||||
(rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
|
||||
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
|
||||
return;
|
||||
}
|
||||
|
||||
mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
|
||||
|
||||
if (old_tsb) {
|
||||
extern void copy_tsb(unsigned long old_tsb_base,
|
||||
unsigned long old_tsb_size,
|
||||
unsigned long new_tsb_base,
|
||||
unsigned long new_tsb_size);
|
||||
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
||||
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
||||
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
|
||||
old_tsb_base = __pa(old_tsb_base);
|
||||
new_tsb_base = __pa(new_tsb_base);
|
||||
}
|
||||
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
|
||||
}
|
||||
|
||||
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
||||
setup_tsb_params(mm, tsb_index, new_size);
|
||||
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
|
||||
/* If old_tsb is NULL, we're being invoked for the first time
|
||||
* from init_new_context().
|
||||
*/
|
||||
if (old_tsb) {
|
||||
/* Reload it on the local cpu. */
|
||||
tsb_context_switch(mm);
|
||||
|
||||
/* Now force other processors to do the same. */
|
||||
preempt_disable();
|
||||
smp_tsb_sync(mm);
|
||||
preempt_enable();
|
||||
|
||||
/* Now it is safe to free the old tsb. */
|
||||
kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
|
||||
}
|
||||
}
|
||||
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
unsigned long huge_pte_count;
|
||||
#endif
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_init(&mm->context.lock);
|
||||
|
||||
mm->context.sparc64_ctx_val = 0UL;
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
/* We reset it to zero because the fork() page copying
|
||||
* will re-increment the counters as the parent PTEs are
|
||||
* copied into the child address space.
|
||||
*/
|
||||
huge_pte_count = mm->context.huge_pte_count;
|
||||
mm->context.huge_pte_count = 0;
|
||||
#endif
|
||||
|
||||
/* copy_mm() copies over the parent's mm_struct before calling
|
||||
* us, so we need to zero out the TSB pointer or else tsb_grow()
|
||||
* will be confused and think there is an older TSB to free up.
|
||||
*/
|
||||
for (i = 0; i < MM_NUM_TSBS; i++)
|
||||
mm->context.tsb_block[i].tsb = NULL;
|
||||
|
||||
/* If this is fork, inherit the parent's TSB size. We would
|
||||
* grow it to that size on the first page fault anyways.
|
||||
*/
|
||||
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (unlikely(huge_pte_count))
|
||||
tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
|
||||
#endif
|
||||
|
||||
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tsb_destroy_one(struct tsb_config *tp)
|
||||
{
|
||||
unsigned long cache_index;
|
||||
|
||||
if (!tp->tsb)
|
||||
return;
|
||||
cache_index = tp->tsb_reg_val & 0x7UL;
|
||||
kmem_cache_free(tsb_caches[cache_index], tp->tsb);
|
||||
tp->tsb = NULL;
|
||||
tp->tsb_reg_val = 0UL;
|
||||
}
|
||||
|
||||
void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags, i;
|
||||
|
||||
for (i = 0; i < MM_NUM_TSBS; i++)
|
||||
tsb_destroy_one(&mm->context.tsb_block[i]);
|
||||
|
||||
spin_lock_irqsave(&ctx_alloc_lock, flags);
|
||||
|
||||
if (CTX_VALID(mm->context)) {
|
||||
unsigned long nr = CTX_NRBITS(mm->context);
|
||||
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ctx_alloc_lock, flags);
|
||||
}
|
131
arch/sparc/mm/tsunami.S
Normal file
131
arch/sparc/mm/tsunami.S
Normal file
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* tsunami.S: High speed MicroSparc-I mmu/cache operations.
|
||||
*
|
||||
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl tsunami_flush_cache_all, tsunami_flush_cache_mm
|
||||
.globl tsunami_flush_cache_range, tsunami_flush_cache_page
|
||||
.globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
|
||||
.globl tsunami_flush_sig_insns
|
||||
.globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm
|
||||
.globl tsunami_flush_tlb_range, tsunami_flush_tlb_page
|
||||
|
||||
/* Sliiick... */
|
||||
tsunami_flush_cache_page:
|
||||
tsunami_flush_cache_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
tsunami_flush_cache_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be tsunami_flush_cache_out
|
||||
tsunami_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
tsunami_flush_page_for_dma:
|
||||
sta %g0, [%g0] ASI_M_IC_FLCLEAR
|
||||
sta %g0, [%g0] ASI_M_DC_FLCLEAR
|
||||
tsunami_flush_cache_out:
|
||||
tsunami_flush_page_to_ram:
|
||||
retl
|
||||
nop
|
||||
|
||||
tsunami_flush_sig_insns:
|
||||
flush %o1
|
||||
retl
|
||||
flush %o1 + 4
|
||||
|
||||
/* More slick stuff... */
|
||||
tsunami_flush_tlb_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
tsunami_flush_tlb_mm:
|
||||
ld [%o0 + AOFF_mm_context], %g2
|
||||
cmp %g2, -1
|
||||
be tsunami_flush_tlb_out
|
||||
tsunami_flush_tlb_all:
|
||||
mov 0x400, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
tsunami_flush_tlb_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
/* This one can be done in a fine grained manner... */
|
||||
tsunami_flush_tlb_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
andn %o1, (PAGE_SIZE - 1), %o1
|
||||
cmp %o3, -1
|
||||
be tsunami_flush_tlb_page_out
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
tsunami_flush_tlb_page_out:
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
|
||||
#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
|
||||
ldd [src + offset + 0x18], t0; \
|
||||
std t0, [dst + offset + 0x18]; \
|
||||
ldd [src + offset + 0x10], t2; \
|
||||
std t2, [dst + offset + 0x10]; \
|
||||
ldd [src + offset + 0x08], t0; \
|
||||
std t0, [dst + offset + 0x08]; \
|
||||
ldd [src + offset + 0x00], t2; \
|
||||
std t2, [dst + offset + 0x00];
|
||||
|
||||
tsunami_copy_1page:
|
||||
/* NOTE: This routine has to be shorter than 70insns --jj */
|
||||
or %g0, (PAGE_SIZE >> 8), %g1
|
||||
1:
|
||||
MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
|
||||
MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
|
||||
subcc %g1, 1, %g1
|
||||
add %o0, 0x100, %o0
|
||||
bne 1b
|
||||
add %o1, 0x100, %o1
|
||||
|
||||
.globl tsunami_setup_blockops
|
||||
tsunami_setup_blockops:
|
||||
sethi %hi(__copy_1page), %o0
|
||||
or %o0, %lo(__copy_1page), %o0
|
||||
sethi %hi(tsunami_copy_1page), %o1
|
||||
or %o1, %lo(tsunami_copy_1page), %o1
|
||||
sethi %hi(tsunami_setup_blockops), %o2
|
||||
or %o2, %lo(tsunami_setup_blockops), %o2
|
||||
ld [%o1], %o4
|
||||
1: add %o1, 4, %o1
|
||||
st %o4, [%o0]
|
||||
add %o0, 4, %o0
|
||||
cmp %o1, %o2
|
||||
bne 1b
|
||||
ld [%o1], %o4
|
||||
sta %g0, [%g0] ASI_M_IC_FLCLEAR
|
||||
sta %g0, [%g0] ASI_M_DC_FLCLEAR
|
||||
retl
|
||||
nop
|
878
arch/sparc/mm/ultra.S
Normal file
878
arch/sparc/mm/ultra.S
Normal file
|
@ -0,0 +1,878 @@
|
|||
/*
|
||||
* ultra.S: Don't expand these all over the place...
|
||||
*
|
||||
* Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
|
||||
*/
|
||||
|
||||
#include <asm/asi.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/pil.h>
|
||||
#include <asm/head.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/cpudata.h>
|
||||
|
||||
/* Basically, most of the Spitfire vs. Cheetah madness
|
||||
* has to do with the fact that Cheetah does not support
|
||||
* IMMU flushes out of the secondary context. Someone needs
|
||||
* to throw a south lake birthday party for the folks
|
||||
* in Microelectronics who refused to fix this shit.
|
||||
*/
|
||||
|
||||
/* This file is meant to be read efficiently by the CPU, not humans.
|
||||
* Staraj sie tego nikomu nie pierdolnac...
|
||||
*/
|
||||
.text
|
||||
.align 32
|
||||
.globl __flush_tlb_mm
|
||||
__flush_tlb_mm: /* 18 insns */
|
||||
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
|
||||
ldxa [%o1] ASI_DMMU, %g2
|
||||
cmp %g2, %o0
|
||||
bne,pn %icc, __spitfire_flush_tlb_mm_slow
|
||||
mov 0x50, %g3
|
||||
stxa %g0, [%g3] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%g3] ASI_IMMU_DEMAP
|
||||
sethi %hi(KERNBASE), %g3
|
||||
flush %g3
|
||||
retl
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_page
|
||||
__flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, %pstate
|
||||
mov SECONDARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
andn %o1, 1, %o3
|
||||
be,pn %icc, 1f
|
||||
or %o3, 0x10, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_pending
|
||||
__flush_tlb_pending: /* 26 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
rdpr %pstate, %g7
|
||||
sllx %o1, 3, %o1
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, %pstate
|
||||
mov SECONDARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
1: sub %o1, (1 << 3), %o1
|
||||
ldx [%o2 + %o1], %o3
|
||||
andcc %o3, 1, %g0
|
||||
andn %o3, 1, %o3
|
||||
be,pn %icc, 2f
|
||||
or %o3, 0x10, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %o1, 1b
|
||||
nop
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_kernel_range
|
||||
__flush_tlb_kernel_range: /* 16 insns */
|
||||
/* %o0=start, %o1=end */
|
||||
cmp %o0, %o1
|
||||
be,pn %xcc, 2f
|
||||
sethi %hi(PAGE_SIZE), %o4
|
||||
sub %o1, %o0, %o3
|
||||
sub %o3, %o4, %o3
|
||||
or %o0, 0x20, %o0 ! Nucleus
|
||||
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %o3, 1b
|
||||
sub %o3, %o4, %o3
|
||||
2: sethi %hi(KERNBASE), %o3
|
||||
flush %o3
|
||||
retl
|
||||
nop
|
||||
nop
|
||||
|
||||
__spitfire_flush_tlb_mm_slow:
|
||||
rdpr %pstate, %g1
|
||||
wrpr %g1, PSTATE_IE, %pstate
|
||||
stxa %o0, [%o1] ASI_DMMU
|
||||
stxa %g0, [%g3] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%g3] ASI_IMMU_DEMAP
|
||||
flush %g6
|
||||
stxa %g2, [%o1] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o1
|
||||
flush %o1
|
||||
retl
|
||||
wrpr %g1, 0, %pstate
|
||||
|
||||
/*
|
||||
* The following code flushes one page_size worth.
|
||||
*/
|
||||
.section .kprobes.text, "ax"
|
||||
.align 32
|
||||
.globl __flush_icache_page
|
||||
__flush_icache_page: /* %o0 = phys_page */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sethi %hi(PAGE_OFFSET), %g1
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
sethi %hi(PAGE_SIZE), %g2
|
||||
ldx [%g1 + %lo(PAGE_OFFSET)], %g1
|
||||
add %o0, %g1, %o0
|
||||
1: subcc %g2, 32, %g2
|
||||
bne,pt %icc, 1b
|
||||
flush %o0 + %g2
|
||||
retl
|
||||
nop
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
|
||||
#if (PAGE_SHIFT != 13)
|
||||
#error only page shift of 13 is supported by dcache flush
|
||||
#endif
|
||||
|
||||
#define DTAG_MASK 0x3
|
||||
|
||||
/* This routine is Spitfire specific so the hardcoded
|
||||
* D-cache size and line-size are OK.
|
||||
*/
|
||||
.align 64
|
||||
.globl __flush_dcache_page
|
||||
__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
|
||||
sethi %hi(PAGE_OFFSET), %g1
|
||||
ldx [%g1 + %lo(PAGE_OFFSET)], %g1
|
||||
sub %o0, %g1, %o0 ! physical address
|
||||
srlx %o0, 11, %o0 ! make D-cache TAG
|
||||
sethi %hi(1 << 14), %o2 ! D-cache size
|
||||
sub %o2, (1 << 5), %o2 ! D-cache line size
|
||||
1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
|
||||
andcc %o3, DTAG_MASK, %g0 ! Valid?
|
||||
be,pn %xcc, 2f ! Nope, branch
|
||||
andn %o3, DTAG_MASK, %o3 ! Clear valid bits
|
||||
cmp %o3, %o0 ! TAG match?
|
||||
bne,pt %xcc, 2f ! Nope, branch
|
||||
nop
|
||||
stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
|
||||
membar #Sync
|
||||
2: brnz,pt %o2, 1b
|
||||
sub %o2, (1 << 5), %o2 ! D-cache line size
|
||||
|
||||
/* The I-cache does not snoop local stores so we
|
||||
* better flush that too when necessary.
|
||||
*/
|
||||
brnz,pt %o1, __flush_icache_page
|
||||
sllx %o0, 11, %o0
|
||||
retl
|
||||
nop
|
||||
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
|
||||
.previous
|
||||
|
||||
/* Cheetah specific versions, patched at boot time. */
|
||||
__cheetah_flush_tlb_mm: /* 19 insns */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o2
|
||||
mov 0x40, %g3
|
||||
ldxa [%o2] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
|
||||
sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
|
||||
or %o0, %o1, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o2] ASI_DMMU
|
||||
stxa %g0, [%g3] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%g3] ASI_IMMU_DEMAP
|
||||
stxa %g2, [%o2] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o2
|
||||
flush %o2
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
be,pn %icc, 1f
|
||||
andn %o1, 1, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_pending: /* 27 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
rdpr %pstate, %g7
|
||||
sllx %o1, 3, %o1
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
1: sub %o1, (1 << 3), %o1
|
||||
ldx [%o2 + %o1], %o3
|
||||
andcc %o3, 1, %g0
|
||||
be,pn %icc, 2f
|
||||
andn %o3, 1, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %o1, 1b
|
||||
nop
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
__cheetah_flush_dcache_page: /* 11 insns */
|
||||
sethi %hi(PAGE_OFFSET), %g1
|
||||
ldx [%g1 + %lo(PAGE_OFFSET)], %g1
|
||||
sub %o0, %g1, %o0
|
||||
sethi %hi(PAGE_SIZE), %o4
|
||||
1: subcc %o4, (1 << 5), %o4
|
||||
stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
|
||||
membar #Sync
|
||||
bne,pt %icc, 1b
|
||||
nop
|
||||
retl /* I-cache flush never needed on Cheetah, see callers. */
|
||||
nop
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
|
||||
/* Hypervisor specific versions, patched at boot time. */
|
||||
__hypervisor_tlb_tl0_error:
|
||||
save %sp, -192, %sp
|
||||
mov %i0, %o0
|
||||
call hypervisor_tlbop_error
|
||||
mov %i1, %o1
|
||||
ret
|
||||
restore
|
||||
|
||||
__hypervisor_flush_tlb_mm: /* 10 insns */
|
||||
mov %o0, %o2 /* ARG2: mmu context */
|
||||
mov 0, %o0 /* ARG0: CPU lists unimplemented */
|
||||
mov 0, %o1 /* ARG1: CPU lists unimplemented */
|
||||
mov HV_MMU_ALL, %o3 /* ARG3: flags */
|
||||
mov HV_FAST_MMU_DEMAP_CTX, %o5
|
||||
ta HV_FAST_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_FAST_MMU_DEMAP_CTX, %o1
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_page: /* 11 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
|
||||
mov %g2, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_pending: /* 16 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
sllx %o1, 3, %g1
|
||||
mov %o2, %g2
|
||||
mov %o0, %g3
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
|
||||
mov %g3, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_kernel_range: /* 16 insns */
|
||||
/* %o0=start, %o1=end */
|
||||
cmp %o0, %o1
|
||||
be,pn %xcc, 2f
|
||||
sethi %hi(PAGE_SIZE), %g3
|
||||
mov %o0, %g1
|
||||
sub %o1, %g1, %g2
|
||||
sub %g2, %g3, %g2
|
||||
1: add %g1, %g2, %o0 /* ARG0: virtual address */
|
||||
mov 0, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
brnz,pt %g2, 1b
|
||||
sub %g2, %g3, %g2
|
||||
2: retl
|
||||
nop
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
/* XXX Niagara and friends have an 8K cache, so no aliasing is
|
||||
* XXX possible, but nothing explicit in the Hypervisor API
|
||||
* XXX guarantees this.
|
||||
*/
|
||||
__hypervisor_flush_dcache_page: /* 2 insns */
|
||||
retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
tlb_patch_one:
|
||||
1: lduw [%o1], %g1
|
||||
stw %g1, [%o0]
|
||||
flush %o0
|
||||
subcc %o2, 1, %o2
|
||||
add %o1, 4, %o1
|
||||
bne,pt %icc, 1b
|
||||
add %o0, 4, %o0
|
||||
retl
|
||||
nop
|
||||
|
||||
.globl cheetah_patch_cachetlbops
|
||||
cheetah_patch_cachetlbops:
|
||||
save %sp, -128, %sp
|
||||
|
||||
sethi %hi(__flush_tlb_mm), %o0
|
||||
or %o0, %lo(__flush_tlb_mm), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_mm), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_mm), %o1
|
||||
call tlb_patch_one
|
||||
mov 19, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_page), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 22, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_pending), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_pending), %o1
|
||||
call tlb_patch_one
|
||||
mov 27, %o2
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
sethi %hi(__flush_dcache_page), %o0
|
||||
or %o0, %lo(__flush_dcache_page), %o0
|
||||
sethi %hi(__cheetah_flush_dcache_page), %o1
|
||||
or %o1, %lo(__cheetah_flush_dcache_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 11, %o2
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
|
||||
ret
|
||||
restore
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* These are all called by the slaves of a cross call, at
|
||||
* trap level 1, with interrupts fully disabled.
|
||||
*
|
||||
* Register usage:
|
||||
* %g5 mm->context (all tlb flushes)
|
||||
* %g1 address arg 1 (tlb page and range flushes)
|
||||
* %g7 address arg 2 (tlb range flush only)
|
||||
*
|
||||
* %g6 scratch 1
|
||||
* %g2 scratch 2
|
||||
* %g3 scratch 3
|
||||
* %g4 scratch 4
|
||||
*/
|
||||
.align 32
|
||||
.globl xcall_flush_tlb_mm
|
||||
xcall_flush_tlb_mm: /* 21 insns */
|
||||
mov PRIMARY_CONTEXT, %g2
|
||||
ldxa [%g2] ASI_DMMU, %g3
|
||||
srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
or %g5, %g4, %g5 /* Preserve nucleus page size fields */
|
||||
stxa %g5, [%g2] ASI_DMMU
|
||||
mov 0x40, %g4
|
||||
stxa %g0, [%g4] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%g4] ASI_IMMU_DEMAP
|
||||
stxa %g3, [%g2] ASI_DMMU
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_page
|
||||
xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=context, %g1=vaddr */
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
ldxa [%g4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
or %g5, %g4, %g5
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
stxa %g5, [%g4] ASI_DMMU
|
||||
andcc %g1, 0x1, %g0
|
||||
be,pn %icc, 2f
|
||||
andn %g1, 0x1, %g5
|
||||
stxa %g0, [%g5] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%g4] ASI_DMMU
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_kernel_range
|
||||
xcall_flush_tlb_kernel_range: /* 25 insns */
|
||||
sethi %hi(PAGE_SIZE - 1), %g2
|
||||
or %g2, %lo(PAGE_SIZE - 1), %g2
|
||||
andn %g1, %g2, %g1
|
||||
andn %g7, %g2, %g7
|
||||
sub %g7, %g1, %g3
|
||||
add %g2, 1, %g2
|
||||
sub %g3, %g2, %g3
|
||||
or %g1, 0x20, %g1 ! Nucleus
|
||||
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
|
||||
stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %g3, 1b
|
||||
sub %g3, %g2, %g3
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
/* This runs in a very controlled environment, so we do
|
||||
* not need to worry about BH races etc.
|
||||
*/
|
||||
.globl xcall_sync_tick
|
||||
xcall_sync_tick:
|
||||
|
||||
661: rdpr %pstate, %g2
|
||||
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
|
||||
.section .sun4v_2insn_patch, "ax"
|
||||
.word 661b
|
||||
nop
|
||||
nop
|
||||
.previous
|
||||
|
||||
rdpr %pil, %g2
|
||||
wrpr %g0, PIL_NORMAL_MAX, %pil
|
||||
sethi %hi(109f), %g7
|
||||
b,pt %xcc, etrap_irq
|
||||
109: or %g7, %lo(109b), %g7
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
call trace_hardirqs_off
|
||||
nop
|
||||
#endif
|
||||
call smp_synchronize_tick_client
|
||||
nop
|
||||
b rtrap_xcall
|
||||
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
||||
|
||||
.globl xcall_fetch_glob_regs
|
||||
xcall_fetch_glob_regs:
|
||||
sethi %hi(global_cpu_snapshot), %g1
|
||||
or %g1, %lo(global_cpu_snapshot), %g1
|
||||
__GET_CPUID(%g2)
|
||||
sllx %g2, 6, %g3
|
||||
add %g1, %g3, %g1
|
||||
rdpr %tstate, %g7
|
||||
stx %g7, [%g1 + GR_SNAP_TSTATE]
|
||||
rdpr %tpc, %g7
|
||||
stx %g7, [%g1 + GR_SNAP_TPC]
|
||||
rdpr %tnpc, %g7
|
||||
stx %g7, [%g1 + GR_SNAP_TNPC]
|
||||
stx %o7, [%g1 + GR_SNAP_O7]
|
||||
stx %i7, [%g1 + GR_SNAP_I7]
|
||||
/* Don't try this at home kids... */
|
||||
rdpr %cwp, %g3
|
||||
sub %g3, 1, %g7
|
||||
wrpr %g7, %cwp
|
||||
mov %i7, %g7
|
||||
wrpr %g3, %cwp
|
||||
stx %g7, [%g1 + GR_SNAP_RPC]
|
||||
sethi %hi(trap_block), %g7
|
||||
or %g7, %lo(trap_block), %g7
|
||||
sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
|
||||
add %g7, %g2, %g7
|
||||
ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
|
||||
stx %g3, [%g1 + GR_SNAP_THREAD]
|
||||
retry
|
||||
|
||||
.globl xcall_fetch_glob_pmu
|
||||
xcall_fetch_glob_pmu:
|
||||
sethi %hi(global_cpu_snapshot), %g1
|
||||
or %g1, %lo(global_cpu_snapshot), %g1
|
||||
__GET_CPUID(%g2)
|
||||
sllx %g2, 6, %g3
|
||||
add %g1, %g3, %g1
|
||||
rd %pic, %g7
|
||||
stx %g7, [%g1 + (4 * 8)]
|
||||
rd %pcr, %g7
|
||||
stx %g7, [%g1 + (0 * 8)]
|
||||
retry
|
||||
|
||||
.globl xcall_fetch_glob_pmu_n4
|
||||
xcall_fetch_glob_pmu_n4:
|
||||
sethi %hi(global_cpu_snapshot), %g1
|
||||
or %g1, %lo(global_cpu_snapshot), %g1
|
||||
__GET_CPUID(%g2)
|
||||
sllx %g2, 6, %g3
|
||||
add %g1, %g3, %g1
|
||||
|
||||
ldxa [%g0] ASI_PIC, %g7
|
||||
stx %g7, [%g1 + (4 * 8)]
|
||||
mov 0x08, %g3
|
||||
ldxa [%g3] ASI_PIC, %g7
|
||||
stx %g7, [%g1 + (5 * 8)]
|
||||
mov 0x10, %g3
|
||||
ldxa [%g3] ASI_PIC, %g7
|
||||
stx %g7, [%g1 + (6 * 8)]
|
||||
mov 0x18, %g3
|
||||
ldxa [%g3] ASI_PIC, %g7
|
||||
stx %g7, [%g1 + (7 * 8)]
|
||||
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o5, %g7
|
||||
|
||||
mov HV_FAST_VT_GET_PERFREG, %o5
|
||||
mov 3, %o0
|
||||
ta HV_FAST_TRAP
|
||||
stx %o1, [%g1 + (3 * 8)]
|
||||
mov HV_FAST_VT_GET_PERFREG, %o5
|
||||
mov 2, %o0
|
||||
ta HV_FAST_TRAP
|
||||
stx %o1, [%g1 + (2 * 8)]
|
||||
mov HV_FAST_VT_GET_PERFREG, %o5
|
||||
mov 1, %o0
|
||||
ta HV_FAST_TRAP
|
||||
stx %o1, [%g1 + (1 * 8)]
|
||||
mov HV_FAST_VT_GET_PERFREG, %o5
|
||||
mov 0, %o0
|
||||
ta HV_FAST_TRAP
|
||||
stx %o1, [%g1 + (0 * 8)]
|
||||
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g7, %o5
|
||||
|
||||
retry
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
.align 32
|
||||
.globl xcall_flush_dcache_page_cheetah
|
||||
xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
|
||||
sethi %hi(PAGE_SIZE), %g3
|
||||
1: subcc %g3, (1 << 5), %g3
|
||||
stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
|
||||
membar #Sync
|
||||
bne,pt %icc, 1b
|
||||
nop
|
||||
retry
|
||||
nop
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
|
||||
.globl xcall_flush_dcache_page_spitfire
|
||||
xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
|
||||
%g7 == kernel page virtual address
|
||||
%g5 == (page->mapping != NULL) */
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
srlx %g1, (13 - 2), %g1 ! Form tag comparitor
|
||||
sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
|
||||
sub %g3, (1 << 5), %g3 ! D$ linesize == 32
|
||||
1: ldxa [%g3] ASI_DCACHE_TAG, %g2
|
||||
andcc %g2, 0x3, %g0
|
||||
be,pn %xcc, 2f
|
||||
andn %g2, 0x3, %g2
|
||||
cmp %g2, %g1
|
||||
|
||||
bne,pt %xcc, 2f
|
||||
nop
|
||||
stxa %g0, [%g3] ASI_DCACHE_TAG
|
||||
membar #Sync
|
||||
2: cmp %g3, 0
|
||||
bne,pt %xcc, 1b
|
||||
sub %g3, (1 << 5), %g3
|
||||
|
||||
brz,pn %g5, 2f
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
sethi %hi(PAGE_SIZE), %g3
|
||||
|
||||
1: flush %g7
|
||||
subcc %g3, (1 << 5), %g3
|
||||
bne,pt %icc, 1b
|
||||
add %g7, (1 << 5), %g7
|
||||
|
||||
2: retry
|
||||
nop
|
||||
nop
|
||||
|
||||
/* %g5: error
|
||||
* %g6: tlb op
|
||||
*/
|
||||
__hypervisor_tlb_xcall_error:
|
||||
mov %g5, %g4
|
||||
mov %g6, %g5
|
||||
ba,pt %xcc, etrap
|
||||
rd %pc, %g7
|
||||
mov %l4, %o0
|
||||
call hypervisor_tlbop_error_xcall
|
||||
mov %l5, %o1
|
||||
ba,a,pt %xcc, rtrap
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_mm
|
||||
__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
|
||||
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o2, %g4
|
||||
mov %o3, %g1
|
||||
mov %o5, %g7
|
||||
clr %o0 /* ARG0: CPU lists unimplemented */
|
||||
clr %o1 /* ARG1: CPU lists unimplemented */
|
||||
mov %g5, %o2 /* ARG2: mmu context */
|
||||
mov HV_MMU_ALL, %o3 /* ARG3: flags */
|
||||
mov HV_FAST_MMU_DEMAP_CTX, %o5
|
||||
ta HV_FAST_TRAP
|
||||
mov HV_FAST_MMU_DEMAP_CTX, %g6
|
||||
brnz,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g4, %o2
|
||||
mov %g1, %o3
|
||||
mov %g7, %o5
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_page
|
||||
__hypervisor_xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=ctx, %g1=vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o2, %g4
|
||||
mov %g1, %o0 /* ARG0: virtual address */
|
||||
mov %g5, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
|
||||
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g4, %o2
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_kernel_range
|
||||
__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
|
||||
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
|
||||
sethi %hi(PAGE_SIZE - 1), %g2
|
||||
or %g2, %lo(PAGE_SIZE - 1), %g2
|
||||
andn %g1, %g2, %g1
|
||||
andn %g7, %g2, %g7
|
||||
sub %g7, %g1, %g3
|
||||
add %g2, 1, %g2
|
||||
sub %g3, %g2, %g3
|
||||
mov %o0, %g2
|
||||
mov %o1, %g4
|
||||
mov %o2, %g7
|
||||
1: add %g1, %g3, %o0 /* ARG0: virtual address */
|
||||
mov 0, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
|
||||
brnz,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
sethi %hi(PAGE_SIZE), %o2
|
||||
brnz,pt %g3, 1b
|
||||
sub %g3, %o2, %g3
|
||||
mov %g2, %o0
|
||||
mov %g4, %o1
|
||||
mov %g7, %o2
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
/* These just get rescheduled to PIL vectors. */
|
||||
.globl xcall_call_function
|
||||
xcall_call_function:
|
||||
wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
|
||||
retry
|
||||
|
||||
.globl xcall_call_function_single
|
||||
xcall_call_function_single:
|
||||
wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
|
||||
retry
|
||||
|
||||
.globl xcall_receive_signal
|
||||
xcall_receive_signal:
|
||||
wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
|
||||
retry
|
||||
|
||||
.globl xcall_capture
|
||||
xcall_capture:
|
||||
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
||||
retry
|
||||
|
||||
.globl xcall_new_mmu_context_version
|
||||
xcall_new_mmu_context_version:
|
||||
wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
|
||||
retry
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
.globl xcall_kgdb_capture
|
||||
xcall_kgdb_capture:
|
||||
wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
|
||||
retry
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
||||
.globl hypervisor_patch_cachetlbops
|
||||
hypervisor_patch_cachetlbops:
|
||||
save %sp, -128, %sp
|
||||
|
||||
sethi %hi(__flush_tlb_mm), %o0
|
||||
or %o0, %lo(__flush_tlb_mm), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_mm), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
|
||||
call tlb_patch_one
|
||||
mov 10, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 11, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_pending), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
|
||||
call tlb_patch_one
|
||||
mov 16, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_kernel_range), %o0
|
||||
or %o0, %lo(__flush_tlb_kernel_range), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
|
||||
call tlb_patch_one
|
||||
mov 16, %o2
|
||||
|
||||
#ifdef DCACHE_ALIASING_POSSIBLE
|
||||
sethi %hi(__flush_dcache_page), %o0
|
||||
or %o0, %lo(__flush_dcache_page), %o0
|
||||
sethi %hi(__hypervisor_flush_dcache_page), %o1
|
||||
or %o1, %lo(__hypervisor_flush_dcache_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 2, %o2
|
||||
#endif /* DCACHE_ALIASING_POSSIBLE */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sethi %hi(xcall_flush_tlb_mm), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_mm), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_page), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 17, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_kernel_range), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
|
||||
call tlb_patch_one
|
||||
mov 25, %o2
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
ret
|
||||
restore
|
282
arch/sparc/mm/viking.S
Normal file
282
arch/sparc/mm/viking.S
Normal file
|
@ -0,0 +1,282 @@
|
|||
/*
|
||||
* viking.S: High speed Viking cache/mmu operations
|
||||
*
|
||||
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
|
||||
* Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
* Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz)
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/psr.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/mxcc.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtsrmmu.h>
|
||||
#include <asm/viking.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.data
|
||||
.align 4
|
||||
sun4dsmp_flush_tlb_spin:
|
||||
.word 0
|
||||
#endif
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl viking_flush_cache_all, viking_flush_cache_mm
|
||||
.globl viking_flush_cache_range, viking_flush_cache_page
|
||||
.globl viking_flush_page, viking_mxcc_flush_page
|
||||
.globl viking_flush_page_for_dma, viking_flush_page_to_ram
|
||||
.globl viking_flush_sig_insns
|
||||
.globl viking_flush_tlb_all, viking_flush_tlb_mm
|
||||
.globl viking_flush_tlb_range, viking_flush_tlb_page
|
||||
|
||||
viking_flush_page:
|
||||
sethi %hi(PAGE_OFFSET), %g2
|
||||
sub %o0, %g2, %g3
|
||||
srl %g3, 12, %g1 ! ppage >> 12
|
||||
|
||||
clr %o1 ! set counter, 0 - 127
|
||||
sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3
|
||||
sethi %hi(0x80000000), %o4
|
||||
sethi %hi(VIKING_PTAG_VALID), %o5
|
||||
sethi %hi(2*PAGE_SIZE), %o0
|
||||
sethi %hi(PAGE_SIZE), %g7
|
||||
clr %o2 ! block counter, 0 - 3
|
||||
5:
|
||||
sll %o1, 5, %g4
|
||||
or %g4, %o4, %g4 ! 0x80000000 | (set << 5)
|
||||
|
||||
sll %o2, 26, %g5 ! block << 26
|
||||
6:
|
||||
or %g5, %g4, %g5
|
||||
ldda [%g5] ASI_M_DATAC_TAG, %g2
|
||||
cmp %g3, %g1 ! ptag == ppage?
|
||||
bne 7f
|
||||
inc %o2
|
||||
|
||||
andcc %g2, %o5, %g0 ! ptag VALID?
|
||||
be 7f
|
||||
add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5)
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
ld [%g2 + %g7], %g3
|
||||
add %g2, %o0, %g2
|
||||
ld [%g2], %g3
|
||||
b 8f
|
||||
ld [%g2 + %g7], %g3
|
||||
|
||||
7:
|
||||
cmp %o2, 3
|
||||
ble 6b
|
||||
sll %o2, 26, %g5 ! block << 26
|
||||
|
||||
8: inc %o1
|
||||
cmp %o1, 0x7f
|
||||
ble 5b
|
||||
clr %o2
|
||||
|
||||
9: retl
|
||||
nop
|
||||
|
||||
viking_mxcc_flush_page:
|
||||
sethi %hi(PAGE_OFFSET), %g2
|
||||
sub %o0, %g2, %g3
|
||||
sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
|
||||
sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
|
||||
mov 0x10, %g2 ! set cacheable bit
|
||||
or %o3, %lo(MXCC_SRCSTREAM), %o2
|
||||
or %o3, %lo(MXCC_DESSTREAM), %o3
|
||||
sub %g3, MXCC_STREAM_SIZE, %g3
|
||||
6:
|
||||
stda %g2, [%o2] ASI_M_MXCC
|
||||
stda %g2, [%o3] ASI_M_MXCC
|
||||
andncc %g3, PAGE_MASK, %g0
|
||||
bne 6b
|
||||
sub %g3, MXCC_STREAM_SIZE, %g3
|
||||
|
||||
9: retl
|
||||
nop
|
||||
|
||||
viking_flush_cache_page:
|
||||
viking_flush_cache_range:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
#endif
|
||||
viking_flush_cache_mm:
|
||||
#ifndef CONFIG_SMP
|
||||
ld [%o0 + AOFF_mm_context], %g1
|
||||
cmp %g1, -1
|
||||
bne viking_flush_cache_all
|
||||
nop
|
||||
b,a viking_flush_cache_out
|
||||
#endif
|
||||
viking_flush_cache_all:
|
||||
WINDOW_FLUSH(%g4, %g5)
|
||||
viking_flush_cache_out:
|
||||
retl
|
||||
nop
|
||||
|
||||
viking_flush_tlb_all:
|
||||
mov 0x400, %g1
|
||||
retl
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
|
||||
viking_flush_tlb_mm:
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o1, -1
|
||||
be 1f
|
||||
#endif
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
1: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_tlb_range:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be 2f
|
||||
#endif
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
1: sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 1b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
2: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_tlb_page:
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
#ifndef CONFIG_SMP
|
||||
cmp %o3, -1
|
||||
be 1f
|
||||
#endif
|
||||
and %o1, PAGE_MASK, %o1
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
#ifndef CONFIG_SMP
|
||||
1: retl
|
||||
nop
|
||||
#endif
|
||||
|
||||
viking_flush_page_to_ram:
|
||||
viking_flush_page_for_dma:
|
||||
viking_flush_sig_insns:
|
||||
retl
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm
|
||||
.globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page
|
||||
sun4dsmp_flush_tlb_all:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov 0x400, %g1
|
||||
sta %g0, [%g1] ASI_M_FLUSH_PROBE
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_mm:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + AOFF_mm_context], %o1
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
mov 0x300, %g2
|
||||
sta %o1, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%g2] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_range:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 3f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
and %o1, %o4, %o1
|
||||
add %o1, 0x200, %o1
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
2: sub %o1, %o4, %o1
|
||||
cmp %o1, %o2
|
||||
blu,a 2b
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
3: tst %g5
|
||||
bne,a 3b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
|
||||
sun4dsmp_flush_tlb_page:
|
||||
sethi %hi(sun4dsmp_flush_tlb_spin), %g3
|
||||
1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
tst %g5
|
||||
bne 2f
|
||||
mov SRMMU_CTX_REG, %g1
|
||||
ld [%o0 + VMA_VM_MM], %o0
|
||||
ld [%o0 + AOFF_mm_context], %o3
|
||||
lda [%g1] ASI_M_MMUREGS, %g5
|
||||
and %o1, PAGE_MASK, %o1
|
||||
sta %o3, [%g1] ASI_M_MMUREGS
|
||||
sta %g0, [%o1] ASI_M_FLUSH_PROBE
|
||||
sta %g5, [%g1] ASI_M_MMUREGS
|
||||
retl
|
||||
stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
|
||||
2: tst %g5
|
||||
bne,a 2b
|
||||
ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
|
||||
b,a 1b
|
||||
nop
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue