mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
26
arch/mips/mm/Makefile
Normal file
26
arch/mips/mm/Makefile
Normal file
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Makefile for the Linux/MIPS-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y += cache.o dma-default.o extable.o fault.o \
|
||||
gup.o init.o mmap.o page.o page-funcs.o \
|
||||
tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o
|
||||
|
||||
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
|
||||
obj-$(CONFIG_64BIT) += pgtable-64.o
|
||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
||||
obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
|
||||
obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
|
||||
obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
|
||||
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
|
||||
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
|
||||
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
|
||||
|
||||
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
|
||||
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
|
||||
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
|
||||
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
|
||||
|
||||
obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
|
369
arch/mips/mm/c-octeon.c
Normal file
369
arch/mips/mm/c-octeon.c
Normal file
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2005-2007 Cavium Networks
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/bcache.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/cpu-type.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/r4kcache.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/war.h>
|
||||
|
||||
#include <asm/octeon/octeon.h>
|
||||
|
||||
unsigned long long cache_err_dcache[NR_CPUS];
|
||||
EXPORT_SYMBOL_GPL(cache_err_dcache);
|
||||
|
||||
/**
|
||||
* Octeon automatically flushes the dcache on tlb changes, so
|
||||
* from Linux's viewpoint it acts much like a physically
|
||||
* tagged cache. No flushing is needed
|
||||
*
|
||||
*/
|
||||
static void octeon_flush_data_cache_page(unsigned long addr)
|
||||
{
|
||||
/* Nothing to do */
|
||||
}
|
||||
|
||||
static inline void octeon_local_flush_icache(void)
|
||||
{
|
||||
asm volatile ("synci 0($0)");
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush local I-cache for the specified range.
|
||||
*/
|
||||
static void local_octeon_flush_icache_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
octeon_local_flush_icache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush caches as necessary for all cores affected by a
|
||||
* vma. If no vma is supplied, all cores are flushed.
|
||||
*
|
||||
* @vma: VMA to flush or NULL to flush all icaches.
|
||||
*/
|
||||
static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
|
||||
{
|
||||
extern void octeon_send_ipi_single(int cpu, unsigned int action);
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
cpumask_t mask;
|
||||
#endif
|
||||
|
||||
mb();
|
||||
octeon_local_flush_icache();
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* If we have a vma structure, we only need to worry about
|
||||
* cores it has been used on
|
||||
*/
|
||||
if (vma)
|
||||
mask = *mm_cpumask(vma->vm_mm);
|
||||
else
|
||||
mask = *cpu_online_mask;
|
||||
cpumask_clear_cpu(cpu, &mask);
|
||||
for_each_cpu(cpu, &mask)
|
||||
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
|
||||
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Called to flush the icache on all cores
|
||||
*/
|
||||
static void octeon_flush_icache_all(void)
|
||||
{
|
||||
octeon_flush_icache_all_cores(NULL);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Called to flush all memory associated with a memory
|
||||
* context.
|
||||
*
|
||||
* @mm: Memory context to flush
|
||||
*/
|
||||
static void octeon_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* According to the R4K version of this file, CPUs without
|
||||
* dcache aliases don't need to do anything here
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Flush a range of kernel addresses out of the icache
|
||||
*
|
||||
*/
|
||||
static void octeon_flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
octeon_flush_icache_all_cores(NULL);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Flush the icache for a trampoline. These are used for interrupt
|
||||
* and exception hooking.
|
||||
*
|
||||
* @addr: Address to flush
|
||||
*/
|
||||
static void octeon_flush_cache_sigtramp(unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma(current->mm, addr);
|
||||
octeon_flush_icache_all_cores(vma);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Flush a range out of a vma
|
||||
*
|
||||
* @vma: VMA to flush
|
||||
* @start:
|
||||
* @end:
|
||||
*/
|
||||
static void octeon_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
octeon_flush_icache_all_cores(vma);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Flush a specific page of a vma
|
||||
*
|
||||
* @vma: VMA to flush page for
|
||||
* @page: Page to flush
|
||||
* @pfn:
|
||||
*/
|
||||
static void octeon_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long page, unsigned long pfn)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
octeon_flush_icache_all_cores(vma);
|
||||
}
|
||||
|
||||
static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
/**
|
||||
* Probe Octeon's caches
|
||||
*
|
||||
*/
|
||||
static void probe_octeon(void)
|
||||
{
|
||||
unsigned long icache_size;
|
||||
unsigned long dcache_size;
|
||||
unsigned int config1;
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
int cputype = current_cpu_type();
|
||||
|
||||
config1 = read_c0_config1();
|
||||
switch (cputype) {
|
||||
case CPU_CAVIUM_OCTEON:
|
||||
case CPU_CAVIUM_OCTEON_PLUS:
|
||||
c->icache.linesz = 2 << ((config1 >> 19) & 7);
|
||||
c->icache.sets = 64 << ((config1 >> 22) & 7);
|
||||
c->icache.ways = 1 + ((config1 >> 16) & 7);
|
||||
c->icache.flags |= MIPS_CACHE_VTAG;
|
||||
icache_size =
|
||||
c->icache.sets * c->icache.ways * c->icache.linesz;
|
||||
c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
|
||||
c->dcache.linesz = 128;
|
||||
if (cputype == CPU_CAVIUM_OCTEON_PLUS)
|
||||
c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
|
||||
else
|
||||
c->dcache.sets = 1; /* CN3XXX has one Dcache set */
|
||||
c->dcache.ways = 64;
|
||||
dcache_size =
|
||||
c->dcache.sets * c->dcache.ways * c->dcache.linesz;
|
||||
c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
|
||||
c->options |= MIPS_CPU_PREFETCH;
|
||||
break;
|
||||
|
||||
case CPU_CAVIUM_OCTEON2:
|
||||
c->icache.linesz = 2 << ((config1 >> 19) & 7);
|
||||
c->icache.sets = 8;
|
||||
c->icache.ways = 37;
|
||||
c->icache.flags |= MIPS_CACHE_VTAG;
|
||||
icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
|
||||
|
||||
c->dcache.linesz = 128;
|
||||
c->dcache.ways = 32;
|
||||
c->dcache.sets = 8;
|
||||
dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
|
||||
c->options |= MIPS_CPU_PREFETCH;
|
||||
break;
|
||||
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
c->icache.linesz = 128;
|
||||
c->icache.sets = 16;
|
||||
c->icache.ways = 39;
|
||||
c->icache.flags |= MIPS_CACHE_VTAG;
|
||||
icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
|
||||
|
||||
c->dcache.linesz = 128;
|
||||
c->dcache.ways = 32;
|
||||
c->dcache.sets = 8;
|
||||
dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
|
||||
c->options |= MIPS_CPU_PREFETCH;
|
||||
break;
|
||||
|
||||
default:
|
||||
panic("Unsupported Cavium Networks CPU type");
|
||||
break;
|
||||
}
|
||||
|
||||
/* compute a couple of other cache variables */
|
||||
c->icache.waysize = icache_size / c->icache.ways;
|
||||
c->dcache.waysize = dcache_size / c->dcache.ways;
|
||||
|
||||
c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
|
||||
c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
|
||||
|
||||
if (smp_processor_id() == 0) {
|
||||
pr_notice("Primary instruction cache %ldkB, %s, %d way, "
|
||||
"%d sets, linesize %d bytes.\n",
|
||||
icache_size >> 10,
|
||||
cpu_has_vtag_icache ?
|
||||
"virtually tagged" : "physically tagged",
|
||||
c->icache.ways, c->icache.sets, c->icache.linesz);
|
||||
|
||||
pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
|
||||
"linesize %d bytes.\n",
|
||||
dcache_size >> 10, c->dcache.ways,
|
||||
c->dcache.sets, c->dcache.linesz);
|
||||
}
|
||||
}
|
||||
|
||||
static void octeon_cache_error_setup(void)
|
||||
{
|
||||
extern char except_vec2_octeon;
|
||||
set_handler(0x100, &except_vec2_octeon, 0x80);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup the Octeon cache flush routines
|
||||
*
|
||||
*/
|
||||
void octeon_cache_init(void)
|
||||
{
|
||||
probe_octeon();
|
||||
|
||||
shm_align_mask = PAGE_SIZE - 1;
|
||||
|
||||
flush_cache_all = octeon_flush_icache_all;
|
||||
__flush_cache_all = octeon_flush_icache_all;
|
||||
flush_cache_mm = octeon_flush_cache_mm;
|
||||
flush_cache_page = octeon_flush_cache_page;
|
||||
flush_cache_range = octeon_flush_cache_range;
|
||||
flush_cache_sigtramp = octeon_flush_cache_sigtramp;
|
||||
flush_icache_all = octeon_flush_icache_all;
|
||||
flush_data_cache_page = octeon_flush_data_cache_page;
|
||||
flush_icache_range = octeon_flush_icache_range;
|
||||
local_flush_icache_range = local_octeon_flush_icache_range;
|
||||
|
||||
__flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
|
||||
|
||||
build_clear_page();
|
||||
build_copy_page();
|
||||
|
||||
board_cache_error_setup = octeon_cache_error_setup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a cache error exception
|
||||
*/
|
||||
static RAW_NOTIFIER_HEAD(co_cache_error_chain);
|
||||
|
||||
int register_co_cache_error_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return raw_notifier_chain_register(&co_cache_error_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
|
||||
|
||||
int unregister_co_cache_error_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
|
||||
|
||||
static void co_cache_error_call_notifiers(unsigned long val)
|
||||
{
|
||||
int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
|
||||
if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
|
||||
u64 dcache_err;
|
||||
unsigned long coreid = cvmx_get_core_num();
|
||||
u64 icache_err = read_octeon_c0_icacheerr();
|
||||
|
||||
if (val) {
|
||||
dcache_err = cache_err_dcache[coreid];
|
||||
cache_err_dcache[coreid] = 0;
|
||||
} else {
|
||||
dcache_err = read_octeon_c0_dcacheerr();
|
||||
}
|
||||
|
||||
pr_err("Core%lu: Cache error exception:\n", coreid);
|
||||
pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
|
||||
if (icache_err & 1) {
|
||||
pr_err("CacheErr (Icache) == %llx\n",
|
||||
(unsigned long long)icache_err);
|
||||
write_octeon_c0_icacheerr(0);
|
||||
}
|
||||
if (dcache_err & 1) {
|
||||
pr_err("CacheErr (Dcache) == %llx\n",
|
||||
(unsigned long long)dcache_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the the exception is recoverable
|
||||
*/
|
||||
|
||||
asmlinkage void cache_parity_error_octeon_recoverable(void)
|
||||
{
|
||||
co_cache_error_call_notifiers(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the the exception is not recoverable
|
||||
*/
|
||||
|
||||
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
|
||||
{
|
||||
co_cache_error_call_notifiers(1);
|
||||
panic("Can't handle cache error: nested exception");
|
||||
}
|
346
arch/mips/mm/c-r3k.c
Normal file
346
arch/mips/mm/c-r3k.c
Normal file
|
@ -0,0 +1,346 @@
|
|||
/*
|
||||
* r2300.c: R2000 and R3000 specific mmu/cache code.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
||||
*
|
||||
* with a lot of changes to make this thing work for R3000s
|
||||
* Tx39XX R4k style caches added. HK
|
||||
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
|
||||
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
|
||||
* Copyright (C) 2001, 2004, 2007 Maciej W. Rozycki
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/isadep.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
static unsigned long icache_size, dcache_size; /* Size in bytes */
|
||||
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
|
||||
|
||||
unsigned long r3k_cache_size(unsigned long ca_flags)
|
||||
{
|
||||
unsigned long flags, status, dummy, size;
|
||||
volatile unsigned long *p;
|
||||
|
||||
p = (volatile unsigned long *) KSEG0;
|
||||
|
||||
flags = read_c0_status();
|
||||
|
||||
/* isolate cache space */
|
||||
write_c0_status((ca_flags|flags)&~ST0_IEC);
|
||||
|
||||
*p = 0xa5a55a5a;
|
||||
dummy = *p;
|
||||
status = read_c0_status();
|
||||
|
||||
if (dummy != 0xa5a55a5a || (status & ST0_CM)) {
|
||||
size = 0;
|
||||
} else {
|
||||
for (size = 128; size <= 0x40000; size <<= 1)
|
||||
*(p + size) = 0;
|
||||
*p = -1;
|
||||
for (size = 128;
|
||||
(size <= 0x40000) && (*(p + size) == 0);
|
||||
size <<= 1)
|
||||
;
|
||||
if (size > 0x40000)
|
||||
size = 0;
|
||||
}
|
||||
|
||||
write_c0_status(flags);
|
||||
|
||||
return size * sizeof(*p);
|
||||
}
|
||||
|
||||
unsigned long r3k_cache_lsize(unsigned long ca_flags)
|
||||
{
|
||||
unsigned long flags, status, lsize, i;
|
||||
volatile unsigned long *p;
|
||||
|
||||
p = (volatile unsigned long *) KSEG0;
|
||||
|
||||
flags = read_c0_status();
|
||||
|
||||
/* isolate cache space */
|
||||
write_c0_status((ca_flags|flags)&~ST0_IEC);
|
||||
|
||||
for (i = 0; i < 128; i++)
|
||||
*(p + i) = 0;
|
||||
*(volatile unsigned char *)p = 0;
|
||||
for (lsize = 1; lsize < 128; lsize <<= 1) {
|
||||
*(p + lsize);
|
||||
status = read_c0_status();
|
||||
if (!(status & ST0_CM))
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < 128; i += lsize)
|
||||
*(volatile unsigned char *)(p + i) = 0;
|
||||
|
||||
write_c0_status(flags);
|
||||
|
||||
return lsize * sizeof(*p);
|
||||
}
|
||||
|
||||
static void r3k_probe_cache(void)
|
||||
{
|
||||
dcache_size = r3k_cache_size(ST0_ISC);
|
||||
if (dcache_size)
|
||||
dcache_lsize = r3k_cache_lsize(ST0_ISC);
|
||||
|
||||
icache_size = r3k_cache_size(ST0_ISC|ST0_SWC);
|
||||
if (icache_size)
|
||||
icache_lsize = r3k_cache_lsize(ST0_ISC|ST0_SWC);
|
||||
}
|
||||
|
||||
static void r3k_flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size, i, flags;
|
||||
volatile unsigned char *p;
|
||||
|
||||
size = end - start;
|
||||
if (size > icache_size || KSEGX(start) != KSEG0) {
|
||||
start = KSEG0;
|
||||
size = icache_size;
|
||||
}
|
||||
p = (char *)start;
|
||||
|
||||
flags = read_c0_status();
|
||||
|
||||
/* isolate cache space */
|
||||
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
|
||||
|
||||
for (i = 0; i < size; i += 0x080) {
|
||||
asm( "sb\t$0, 0x000(%0)\n\t"
|
||||
"sb\t$0, 0x004(%0)\n\t"
|
||||
"sb\t$0, 0x008(%0)\n\t"
|
||||
"sb\t$0, 0x00c(%0)\n\t"
|
||||
"sb\t$0, 0x010(%0)\n\t"
|
||||
"sb\t$0, 0x014(%0)\n\t"
|
||||
"sb\t$0, 0x018(%0)\n\t"
|
||||
"sb\t$0, 0x01c(%0)\n\t"
|
||||
"sb\t$0, 0x020(%0)\n\t"
|
||||
"sb\t$0, 0x024(%0)\n\t"
|
||||
"sb\t$0, 0x028(%0)\n\t"
|
||||
"sb\t$0, 0x02c(%0)\n\t"
|
||||
"sb\t$0, 0x030(%0)\n\t"
|
||||
"sb\t$0, 0x034(%0)\n\t"
|
||||
"sb\t$0, 0x038(%0)\n\t"
|
||||
"sb\t$0, 0x03c(%0)\n\t"
|
||||
"sb\t$0, 0x040(%0)\n\t"
|
||||
"sb\t$0, 0x044(%0)\n\t"
|
||||
"sb\t$0, 0x048(%0)\n\t"
|
||||
"sb\t$0, 0x04c(%0)\n\t"
|
||||
"sb\t$0, 0x050(%0)\n\t"
|
||||
"sb\t$0, 0x054(%0)\n\t"
|
||||
"sb\t$0, 0x058(%0)\n\t"
|
||||
"sb\t$0, 0x05c(%0)\n\t"
|
||||
"sb\t$0, 0x060(%0)\n\t"
|
||||
"sb\t$0, 0x064(%0)\n\t"
|
||||
"sb\t$0, 0x068(%0)\n\t"
|
||||
"sb\t$0, 0x06c(%0)\n\t"
|
||||
"sb\t$0, 0x070(%0)\n\t"
|
||||
"sb\t$0, 0x074(%0)\n\t"
|
||||
"sb\t$0, 0x078(%0)\n\t"
|
||||
"sb\t$0, 0x07c(%0)\n\t"
|
||||
: : "r" (p) );
|
||||
p += 0x080;
|
||||
}
|
||||
|
||||
write_c0_status(flags);
|
||||
}
|
||||
|
||||
static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size, i, flags;
|
||||
volatile unsigned char *p;
|
||||
|
||||
size = end - start;
|
||||
if (size > dcache_size || KSEGX(start) != KSEG0) {
|
||||
start = KSEG0;
|
||||
size = dcache_size;
|
||||
}
|
||||
p = (char *)start;
|
||||
|
||||
flags = read_c0_status();
|
||||
|
||||
/* isolate cache space */
|
||||
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
|
||||
|
||||
for (i = 0; i < size; i += 0x080) {
|
||||
asm( "sb\t$0, 0x000(%0)\n\t"
|
||||
"sb\t$0, 0x004(%0)\n\t"
|
||||
"sb\t$0, 0x008(%0)\n\t"
|
||||
"sb\t$0, 0x00c(%0)\n\t"
|
||||
"sb\t$0, 0x010(%0)\n\t"
|
||||
"sb\t$0, 0x014(%0)\n\t"
|
||||
"sb\t$0, 0x018(%0)\n\t"
|
||||
"sb\t$0, 0x01c(%0)\n\t"
|
||||
"sb\t$0, 0x020(%0)\n\t"
|
||||
"sb\t$0, 0x024(%0)\n\t"
|
||||
"sb\t$0, 0x028(%0)\n\t"
|
||||
"sb\t$0, 0x02c(%0)\n\t"
|
||||
"sb\t$0, 0x030(%0)\n\t"
|
||||
"sb\t$0, 0x034(%0)\n\t"
|
||||
"sb\t$0, 0x038(%0)\n\t"
|
||||
"sb\t$0, 0x03c(%0)\n\t"
|
||||
"sb\t$0, 0x040(%0)\n\t"
|
||||
"sb\t$0, 0x044(%0)\n\t"
|
||||
"sb\t$0, 0x048(%0)\n\t"
|
||||
"sb\t$0, 0x04c(%0)\n\t"
|
||||
"sb\t$0, 0x050(%0)\n\t"
|
||||
"sb\t$0, 0x054(%0)\n\t"
|
||||
"sb\t$0, 0x058(%0)\n\t"
|
||||
"sb\t$0, 0x05c(%0)\n\t"
|
||||
"sb\t$0, 0x060(%0)\n\t"
|
||||
"sb\t$0, 0x064(%0)\n\t"
|
||||
"sb\t$0, 0x068(%0)\n\t"
|
||||
"sb\t$0, 0x06c(%0)\n\t"
|
||||
"sb\t$0, 0x070(%0)\n\t"
|
||||
"sb\t$0, 0x074(%0)\n\t"
|
||||
"sb\t$0, 0x078(%0)\n\t"
|
||||
"sb\t$0, 0x07c(%0)\n\t"
|
||||
: : "r" (p) );
|
||||
p += 0x080;
|
||||
}
|
||||
|
||||
write_c0_status(flags);
|
||||
}
|
||||
|
||||
static inline void r3k_flush_cache_all(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void r3k___flush_cache_all(void)
|
||||
{
|
||||
r3k_flush_dcache_range(KSEG0, KSEG0 + dcache_size);
|
||||
r3k_flush_icache_range(KSEG0, KSEG0 + icache_size);
|
||||
}
|
||||
|
||||
static void r3k_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static void r3k_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
static void r3k_flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn)
|
||||
{
|
||||
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
|
||||
int exec = vma->vm_flags & VM_EXEC;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
pr_debug("cpage[%08lx,%08lx]\n",
|
||||
cpu_context(smp_processor_id(), mm), addr);
|
||||
|
||||
/* No ASID => no such page in the cache. */
|
||||
if (cpu_context(smp_processor_id(), mm) == 0)
|
||||
return;
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
ptep = pte_offset(pmdp, addr);
|
||||
|
||||
/* Invalid => no such page in the cache. */
|
||||
if (!(pte_val(*ptep) & _PAGE_PRESENT))
|
||||
return;
|
||||
|
||||
r3k_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
|
||||
if (exec)
|
||||
r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void local_r3k_flush_data_cache_page(void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
static void r3k_flush_data_cache_page(unsigned long addr)
|
||||
{
|
||||
}
|
||||
|
||||
static void r3k_flush_cache_sigtramp(unsigned long addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
pr_debug("csigtramp[%08lx]\n", addr);
|
||||
|
||||
flags = read_c0_status();
|
||||
|
||||
write_c0_status(flags&~ST0_IEC);
|
||||
|
||||
/* Fill the TLB to avoid an exception with caches isolated. */
|
||||
asm( "lw\t$0, 0x000(%0)\n\t"
|
||||
"lw\t$0, 0x004(%0)\n\t"
|
||||
: : "r" (addr) );
|
||||
|
||||
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
|
||||
|
||||
asm( "sb\t$0, 0x000(%0)\n\t"
|
||||
"sb\t$0, 0x004(%0)\n\t"
|
||||
: : "r" (addr) );
|
||||
|
||||
write_c0_status(flags);
|
||||
}
|
||||
|
||||
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
|
||||
{
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
iob();
|
||||
r3k_flush_dcache_range(start, start + size);
|
||||
}
|
||||
|
||||
void r3k_cache_init(void)
|
||||
{
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
|
||||
r3k_probe_cache();
|
||||
|
||||
flush_cache_all = r3k_flush_cache_all;
|
||||
__flush_cache_all = r3k___flush_cache_all;
|
||||
flush_cache_mm = r3k_flush_cache_mm;
|
||||
flush_cache_range = r3k_flush_cache_range;
|
||||
flush_cache_page = r3k_flush_cache_page;
|
||||
flush_icache_range = r3k_flush_icache_range;
|
||||
local_flush_icache_range = r3k_flush_icache_range;
|
||||
|
||||
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
|
||||
|
||||
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
|
||||
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
|
||||
flush_data_cache_page = r3k_flush_data_cache_page;
|
||||
|
||||
_dma_cache_wback_inv = r3k_dma_cache_wback_inv;
|
||||
_dma_cache_wback = r3k_dma_cache_wback_inv;
|
||||
_dma_cache_inv = r3k_dma_cache_wback_inv;
|
||||
|
||||
printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
|
||||
icache_size >> 10, icache_lsize);
|
||||
printk("Primary data cache %ldkB, linesize %ld bytes.\n",
|
||||
dcache_size >> 10, dcache_lsize);
|
||||
|
||||
build_clear_page();
|
||||
build_copy_page();
|
||||
}
|
1709
arch/mips/mm/c-r4k.c
Normal file
1709
arch/mips/mm/c-r4k.c
Normal file
File diff suppressed because it is too large
Load diff
440
arch/mips/mm/c-tx39.c
Normal file
440
arch/mips/mm/c-tx39.c
Normal file
|
@ -0,0 +1,440 @@
|
|||
/*
|
||||
* r2300.c: R2000 and R3000 specific mmu/cache code.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
||||
*
|
||||
* with a lot of changes to make this thing work for R3000s
|
||||
* Tx39XX R4k style caches added. HK
|
||||
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
|
||||
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/isadep.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
/* For R3000 cores with R4000 style caches */
|
||||
static unsigned long icache_size, dcache_size; /* Size in bytes */
|
||||
|
||||
#include <asm/r4kcache.h>
|
||||
|
||||
extern int r3k_have_wired_reg; /* in r3k-tlb.c */
|
||||
|
||||
/* This sequence is required to ensure icache is disabled immediately */
|
||||
#define TX39_STOP_STREAMING() \
|
||||
__asm__ __volatile__( \
|
||||
".set push\n\t" \
|
||||
".set noreorder\n\t" \
|
||||
"b 1f\n\t" \
|
||||
"nop\n\t" \
|
||||
"1:\n\t" \
|
||||
".set pop" \
|
||||
)
|
||||
|
||||
/* TX39H-style cache flush routines. */
|
||||
static void tx39h_flush_icache_all(void)
|
||||
{
|
||||
unsigned long flags, config;
|
||||
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
blast_icache16();
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
iob();
|
||||
blast_inv_dcache_range(addr, addr + size);
|
||||
}
|
||||
|
||||
|
||||
/* TX39H2,TX39H3 */
|
||||
static inline void tx39_blast_dcache_page(unsigned long addr)
|
||||
{
|
||||
if (current_cpu_type() != CPU_TX3912)
|
||||
blast_dcache16_page(addr);
|
||||
}
|
||||
|
||||
static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
|
||||
{
|
||||
blast_dcache16_page_indexed(addr);
|
||||
}
|
||||
|
||||
static inline void tx39_blast_dcache(void)
|
||||
{
|
||||
blast_dcache16();
|
||||
}
|
||||
|
||||
static inline void tx39_blast_icache_page(unsigned long addr)
|
||||
{
|
||||
unsigned long flags, config;
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
blast_icache16_page(addr);
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void tx39_blast_icache_page_indexed(unsigned long addr)
|
||||
{
|
||||
unsigned long flags, config;
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
blast_icache16_page_indexed(addr);
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void tx39_blast_icache(void)
|
||||
{
|
||||
unsigned long flags, config;
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
blast_icache16();
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void tx39__flush_cache_vmap(void)
|
||||
{
|
||||
tx39_blast_dcache();
|
||||
}
|
||||
|
||||
static void tx39__flush_cache_vunmap(void)
|
||||
{
|
||||
tx39_blast_dcache();
|
||||
}
|
||||
|
||||
static inline void tx39_flush_cache_all(void)
|
||||
{
|
||||
if (!cpu_has_dc_aliases)
|
||||
return;
|
||||
|
||||
tx39_blast_dcache();
|
||||
}
|
||||
|
||||
static inline void tx39___flush_cache_all(void)
|
||||
{
|
||||
tx39_blast_dcache();
|
||||
tx39_blast_icache();
|
||||
}
|
||||
|
||||
static void tx39_flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (!cpu_has_dc_aliases)
|
||||
return;
|
||||
|
||||
if (cpu_context(smp_processor_id(), mm) != 0)
|
||||
tx39_blast_dcache();
|
||||
}
|
||||
|
||||
static void tx39_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!cpu_has_dc_aliases)
|
||||
return;
|
||||
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
|
||||
return;
|
||||
|
||||
tx39_blast_dcache();
|
||||
}
|
||||
|
||||
static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
|
||||
{
|
||||
int exec = vma->vm_flags & VM_EXEC;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
/*
|
||||
* If ownes no valid ASID yet, cannot possibly have gotten
|
||||
* this page into the cache.
|
||||
*/
|
||||
if (cpu_context(smp_processor_id(), mm) == 0)
|
||||
return;
|
||||
|
||||
page &= PAGE_MASK;
|
||||
pgdp = pgd_offset(mm, page);
|
||||
pudp = pud_offset(pgdp, page);
|
||||
pmdp = pmd_offset(pudp, page);
|
||||
ptep = pte_offset(pmdp, page);
|
||||
|
||||
/*
|
||||
* If the page isn't marked valid, the page cannot possibly be
|
||||
* in the cache.
|
||||
*/
|
||||
if (!(pte_val(*ptep) & _PAGE_PRESENT))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Doing flushes for another ASID than the current one is
|
||||
* too difficult since stupid R4k caches do a TLB translation
|
||||
* for every cache flush operation. So we do indexed flushes
|
||||
* in that case, which doesn't overly flush the cache too much.
|
||||
*/
|
||||
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
|
||||
if (cpu_has_dc_aliases || exec)
|
||||
tx39_blast_dcache_page(page);
|
||||
if (exec)
|
||||
tx39_blast_icache_page(page);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do indexed flush, too much work to get the (possible) TLB refills
|
||||
* to work correctly.
|
||||
*/
|
||||
if (cpu_has_dc_aliases || exec)
|
||||
tx39_blast_dcache_page_indexed(page);
|
||||
if (exec)
|
||||
tx39_blast_icache_page_indexed(page);
|
||||
}
|
||||
|
||||
static void local_tx39_flush_data_cache_page(void * addr)
|
||||
{
|
||||
tx39_blast_dcache_page((unsigned long)addr);
|
||||
}
|
||||
|
||||
static void tx39_flush_data_cache_page(unsigned long addr)
|
||||
{
|
||||
tx39_blast_dcache_page(addr);
|
||||
}
|
||||
|
||||
static void tx39_flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (end - start > dcache_size)
|
||||
tx39_blast_dcache();
|
||||
else
|
||||
protected_blast_dcache_range(start, end);
|
||||
|
||||
if (end - start > icache_size)
|
||||
tx39_blast_icache();
|
||||
else {
|
||||
unsigned long flags, config;
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
protected_blast_icache_range(start, end);
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end;
|
||||
|
||||
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
|
||||
end = addr + size;
|
||||
do {
|
||||
tx39_blast_dcache_page(addr);
|
||||
addr += PAGE_SIZE;
|
||||
} while(addr != end);
|
||||
} else if (size > dcache_size) {
|
||||
tx39_blast_dcache();
|
||||
} else {
|
||||
blast_dcache_range(addr, addr + size);
|
||||
}
|
||||
}
|
||||
|
||||
static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end;
|
||||
|
||||
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
|
||||
end = addr + size;
|
||||
do {
|
||||
tx39_blast_dcache_page(addr);
|
||||
addr += PAGE_SIZE;
|
||||
} while(addr != end);
|
||||
} else if (size > dcache_size) {
|
||||
tx39_blast_dcache();
|
||||
} else {
|
||||
blast_inv_dcache_range(addr, addr + size);
|
||||
}
|
||||
}
|
||||
|
||||
static void tx39_flush_cache_sigtramp(unsigned long addr)
|
||||
{
|
||||
unsigned long ic_lsize = current_cpu_data.icache.linesz;
|
||||
unsigned long dc_lsize = current_cpu_data.dcache.linesz;
|
||||
unsigned long config;
|
||||
unsigned long flags;
|
||||
|
||||
protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
|
||||
|
||||
/* disable icache (set ICE#) */
|
||||
local_irq_save(flags);
|
||||
config = read_c0_conf();
|
||||
write_c0_conf(config & ~TX39_CONF_ICE);
|
||||
TX39_STOP_STREAMING();
|
||||
protected_flush_icache_line(addr & ~(ic_lsize - 1));
|
||||
write_c0_conf(config);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __init void tx39_probe_cache(void)
|
||||
{
|
||||
unsigned long config;
|
||||
|
||||
config = read_c0_conf();
|
||||
|
||||
icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
|
||||
TX39_CONF_ICS_SHIFT));
|
||||
dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
|
||||
TX39_CONF_DCS_SHIFT));
|
||||
|
||||
current_cpu_data.icache.linesz = 16;
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_TX3912:
|
||||
current_cpu_data.icache.ways = 1;
|
||||
current_cpu_data.dcache.ways = 1;
|
||||
current_cpu_data.dcache.linesz = 4;
|
||||
break;
|
||||
|
||||
case CPU_TX3927:
|
||||
current_cpu_data.icache.ways = 2;
|
||||
current_cpu_data.dcache.ways = 2;
|
||||
current_cpu_data.dcache.linesz = 16;
|
||||
break;
|
||||
|
||||
case CPU_TX3922:
|
||||
default:
|
||||
current_cpu_data.icache.ways = 1;
|
||||
current_cpu_data.dcache.ways = 1;
|
||||
current_cpu_data.dcache.linesz = 16;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void tx39_cache_init(void)
|
||||
{
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
unsigned long config;
|
||||
|
||||
config = read_c0_conf();
|
||||
config &= ~TX39_CONF_WBON;
|
||||
write_c0_conf(config);
|
||||
|
||||
tx39_probe_cache();
|
||||
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_TX3912:
|
||||
/* TX39/H core (writethru direct-map cache) */
|
||||
__flush_cache_vmap = tx39__flush_cache_vmap;
|
||||
__flush_cache_vunmap = tx39__flush_cache_vunmap;
|
||||
flush_cache_all = tx39h_flush_icache_all;
|
||||
__flush_cache_all = tx39h_flush_icache_all;
|
||||
flush_cache_mm = (void *) tx39h_flush_icache_all;
|
||||
flush_cache_range = (void *) tx39h_flush_icache_all;
|
||||
flush_cache_page = (void *) tx39h_flush_icache_all;
|
||||
flush_icache_range = (void *) tx39h_flush_icache_all;
|
||||
local_flush_icache_range = (void *) tx39h_flush_icache_all;
|
||||
|
||||
flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
|
||||
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
|
||||
flush_data_cache_page = (void *) tx39h_flush_icache_all;
|
||||
|
||||
_dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
|
||||
|
||||
shm_align_mask = PAGE_SIZE - 1;
|
||||
|
||||
break;
|
||||
|
||||
case CPU_TX3922:
|
||||
case CPU_TX3927:
|
||||
default:
|
||||
/* TX39/H2,H3 core (writeback 2way-set-associative cache) */
|
||||
r3k_have_wired_reg = 1;
|
||||
write_c0_wired(0); /* set 8 on reset... */
|
||||
/* board-dependent init code may set WBON */
|
||||
|
||||
__flush_cache_vmap = tx39__flush_cache_vmap;
|
||||
__flush_cache_vunmap = tx39__flush_cache_vunmap;
|
||||
|
||||
flush_cache_all = tx39_flush_cache_all;
|
||||
__flush_cache_all = tx39___flush_cache_all;
|
||||
flush_cache_mm = tx39_flush_cache_mm;
|
||||
flush_cache_range = tx39_flush_cache_range;
|
||||
flush_cache_page = tx39_flush_cache_page;
|
||||
flush_icache_range = tx39_flush_icache_range;
|
||||
local_flush_icache_range = tx39_flush_icache_range;
|
||||
|
||||
__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
|
||||
|
||||
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
|
||||
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
|
||||
flush_data_cache_page = tx39_flush_data_cache_page;
|
||||
|
||||
_dma_cache_wback_inv = tx39_dma_cache_wback_inv;
|
||||
_dma_cache_wback = tx39_dma_cache_wback_inv;
|
||||
_dma_cache_inv = tx39_dma_cache_inv;
|
||||
|
||||
shm_align_mask = max_t(unsigned long,
|
||||
(dcache_size / current_cpu_data.dcache.ways) - 1,
|
||||
PAGE_SIZE - 1);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
|
||||
current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
|
||||
|
||||
current_cpu_data.icache.sets =
|
||||
current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
|
||||
current_cpu_data.dcache.sets =
|
||||
current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
|
||||
|
||||
if (current_cpu_data.dcache.waysize > PAGE_SIZE)
|
||||
current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
|
||||
|
||||
current_cpu_data.icache.waybit = 0;
|
||||
current_cpu_data.dcache.waybit = 0;
|
||||
|
||||
printk("Primary instruction cache %ldkB, linesize %d bytes\n",
|
||||
icache_size >> 10, current_cpu_data.icache.linesz);
|
||||
printk("Primary data cache %ldkB, linesize %d bytes\n",
|
||||
dcache_size >> 10, current_cpu_data.dcache.linesz);
|
||||
|
||||
build_clear_page();
|
||||
build_copy_page();
|
||||
tx39h_flush_icache_all();
|
||||
}
|
239
arch/mips/mm/cache.c
Normal file
239
arch/mips/mm/cache.c
Normal file
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2007 MIPS Technologies, Inc.
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
|
||||
/* Cache operations. */
|
||||
void (*flush_cache_all)(void);
|
||||
void (*__flush_cache_all)(void);
|
||||
void (*flush_cache_mm)(struct mm_struct *mm);
|
||||
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
|
||||
unsigned long pfn);
|
||||
void (*flush_icache_range)(unsigned long start, unsigned long end);
|
||||
EXPORT_SYMBOL_GPL(flush_icache_range);
|
||||
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
|
||||
EXPORT_SYMBOL_GPL(local_flush_icache_range);
|
||||
|
||||
void (*__flush_cache_vmap)(void);
|
||||
void (*__flush_cache_vunmap)(void);
|
||||
|
||||
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
|
||||
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
|
||||
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
|
||||
|
||||
/* MIPS specific cache operations */
|
||||
void (*flush_cache_sigtramp)(unsigned long addr);
|
||||
void (*local_flush_data_cache_page)(void * addr);
|
||||
void (*flush_data_cache_page)(unsigned long addr);
|
||||
void (*flush_icache_all)(void);
|
||||
|
||||
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
|
||||
EXPORT_SYMBOL(flush_data_cache_page);
|
||||
EXPORT_SYMBOL(flush_icache_all);
|
||||
|
||||
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
|
||||
|
||||
/* DMA cache operations. */
|
||||
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
|
||||
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
|
||||
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
|
||||
|
||||
EXPORT_SYMBOL(_dma_cache_wback_inv);
|
||||
|
||||
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
|
||||
|
||||
/*
|
||||
* We could optimize the case where the cache argument is not BCACHE but
|
||||
* that seems very atypical use ...
|
||||
*/
|
||||
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
|
||||
unsigned int, cache)
|
||||
{
|
||||
if (bytes == 0)
|
||||
return 0;
|
||||
if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
|
||||
return -EFAULT;
|
||||
|
||||
flush_icache_range(addr, addr + bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __flush_dcache_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
unsigned long addr;
|
||||
|
||||
if (PageHighMem(page))
|
||||
return;
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
SetPageDcacheDirty(page);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We could delay the flush for the !page_mapping case too. But that
|
||||
* case is for exec env/arg pages and those are %99 certainly going to
|
||||
* get faulted into the tlb (and thus flushed) anyways.
|
||||
*/
|
||||
addr = (unsigned long) page_address(page);
|
||||
flush_data_cache_page(addr);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__flush_dcache_page);
|
||||
|
||||
void __flush_anon_page(struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) page_address(page);
|
||||
|
||||
if (pages_do_alias(addr, vmaddr)) {
|
||||
if (page_mapped(page) && !Page_dcache_dirty(page)) {
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_coherent(page, vmaddr);
|
||||
flush_data_cache_page((unsigned long)kaddr);
|
||||
kunmap_coherent();
|
||||
} else
|
||||
flush_data_cache_page(addr);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__flush_anon_page);
|
||||
|
||||
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long pfn = pte_pfn(pteval);
|
||||
|
||||
if (unlikely(!pfn_valid(pfn)))
|
||||
return;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (page_mapping(page) && Page_dcache_dirty(page)) {
|
||||
unsigned long page_addr = (unsigned long) page_address(page);
|
||||
|
||||
if (!cpu_has_ic_fills_f_dc ||
|
||||
pages_do_alias(page_addr, address & PAGE_MASK))
|
||||
flush_data_cache_page(page_addr);
|
||||
ClearPageDcacheDirty(page);
|
||||
}
|
||||
}
|
||||
|
||||
void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
|
||||
if (pte_present(pteval))
|
||||
mips_flush_dcache_from_pte(pteval, addr);
|
||||
}
|
||||
|
||||
set_pte(ptep, pteval);
|
||||
}
|
||||
|
||||
unsigned long _page_cachable_default;
|
||||
EXPORT_SYMBOL(_page_cachable_default);
|
||||
|
||||
static inline void setup_protection_map(void)
|
||||
{
|
||||
if (cpu_has_rixi) {
|
||||
protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
|
||||
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
|
||||
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
||||
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
|
||||
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
||||
|
||||
} else {
|
||||
protection_map[0] = PAGE_NONE;
|
||||
protection_map[1] = PAGE_READONLY;
|
||||
protection_map[2] = PAGE_COPY;
|
||||
protection_map[3] = PAGE_COPY;
|
||||
protection_map[4] = PAGE_READONLY;
|
||||
protection_map[5] = PAGE_READONLY;
|
||||
protection_map[6] = PAGE_COPY;
|
||||
protection_map[7] = PAGE_COPY;
|
||||
protection_map[8] = PAGE_NONE;
|
||||
protection_map[9] = PAGE_READONLY;
|
||||
protection_map[10] = PAGE_SHARED;
|
||||
protection_map[11] = PAGE_SHARED;
|
||||
protection_map[12] = PAGE_READONLY;
|
||||
protection_map[13] = PAGE_READONLY;
|
||||
protection_map[14] = PAGE_SHARED;
|
||||
protection_map[15] = PAGE_SHARED;
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_cache_init(void)
|
||||
{
|
||||
if (cpu_has_3k_cache) {
|
||||
extern void __weak r3k_cache_init(void);
|
||||
|
||||
r3k_cache_init();
|
||||
}
|
||||
if (cpu_has_6k_cache) {
|
||||
extern void __weak r6k_cache_init(void);
|
||||
|
||||
r6k_cache_init();
|
||||
}
|
||||
if (cpu_has_4k_cache) {
|
||||
extern void __weak r4k_cache_init(void);
|
||||
|
||||
r4k_cache_init();
|
||||
}
|
||||
if (cpu_has_8k_cache) {
|
||||
extern void __weak r8k_cache_init(void);
|
||||
|
||||
r8k_cache_init();
|
||||
}
|
||||
if (cpu_has_tx39_cache) {
|
||||
extern void __weak tx39_cache_init(void);
|
||||
|
||||
tx39_cache_init();
|
||||
}
|
||||
|
||||
if (cpu_has_octeon_cache) {
|
||||
extern void __weak octeon_cache_init(void);
|
||||
|
||||
octeon_cache_init();
|
||||
}
|
||||
|
||||
setup_protection_map();
|
||||
}
|
||||
|
||||
int __weak __uncached_access(struct file *file, unsigned long addr)
|
||||
{
|
||||
if (file->f_flags & O_DSYNC)
|
||||
return 1;
|
||||
|
||||
return addr >= __pa(high_memory);
|
||||
}
|
582
arch/mips/mm/cerr-sb1.c
Normal file
582
arch/mips/mm/cerr-sb1.c
Normal file
|
@ -0,0 +1,582 @@
|
|||
/*
|
||||
* Copyright (C) 2001,2002,2003 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/sibyte/sb1250.h>
|
||||
#include <asm/sibyte/sb1250_regs.h>
|
||||
|
||||
#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
|
||||
#include <asm/io.h>
|
||||
#include <asm/sibyte/sb1250_scd.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
|
||||
* that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
|
||||
*/
|
||||
#undef DUMP_L2_ECC_TAG_ON_ERROR
|
||||
|
||||
/* SB1 definitions */
|
||||
|
||||
/* XXX should come from config1 XXX */
|
||||
#define SB1_CACHE_INDEX_MASK 0x1fe0
|
||||
|
||||
#define CP0_ERRCTL_RECOVERABLE (1 << 31)
|
||||
#define CP0_ERRCTL_DCACHE (1 << 30)
|
||||
#define CP0_ERRCTL_ICACHE (1 << 29)
|
||||
#define CP0_ERRCTL_MULTIBUS (1 << 23)
|
||||
#define CP0_ERRCTL_MC_TLB (1 << 15)
|
||||
#define CP0_ERRCTL_MC_TIMEOUT (1 << 14)
|
||||
|
||||
#define CP0_CERRI_TAG_PARITY (1 << 29)
|
||||
#define CP0_CERRI_DATA_PARITY (1 << 28)
|
||||
#define CP0_CERRI_EXTERNAL (1 << 26)
|
||||
|
||||
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
|
||||
#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
|
||||
|
||||
#define CP0_CERRD_MULTIPLE (1 << 31)
|
||||
#define CP0_CERRD_TAG_STATE (1 << 30)
|
||||
#define CP0_CERRD_TAG_ADDRESS (1 << 29)
|
||||
#define CP0_CERRD_DATA_SBE (1 << 28)
|
||||
#define CP0_CERRD_DATA_DBE (1 << 27)
|
||||
#define CP0_CERRD_EXTERNAL (1 << 26)
|
||||
#define CP0_CERRD_LOAD (1 << 25)
|
||||
#define CP0_CERRD_STORE (1 << 24)
|
||||
#define CP0_CERRD_FILLWB (1 << 23)
|
||||
#define CP0_CERRD_COHERENCY (1 << 22)
|
||||
#define CP0_CERRD_DUPTAG (1 << 21)
|
||||
|
||||
#define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL))
|
||||
#define CP0_CERRD_IDX_VALID(c) \
|
||||
(((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0)
|
||||
#define CP0_CERRD_CAUSES \
|
||||
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
|
||||
#define CP0_CERRD_TYPES \
|
||||
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
|
||||
#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
|
||||
|
||||
static uint32_t extract_ic(unsigned short addr, int data);
|
||||
static uint32_t extract_dc(unsigned short addr, int data);
|
||||
|
||||
static inline void breakout_errctl(unsigned int val)
|
||||
{
|
||||
if (val & CP0_ERRCTL_RECOVERABLE)
|
||||
printk(" recoverable");
|
||||
if (val & CP0_ERRCTL_DCACHE)
|
||||
printk(" dcache");
|
||||
if (val & CP0_ERRCTL_ICACHE)
|
||||
printk(" icache");
|
||||
if (val & CP0_ERRCTL_MULTIBUS)
|
||||
printk(" multiple-buserr");
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
static inline void breakout_cerri(unsigned int val)
|
||||
{
|
||||
if (val & CP0_CERRI_TAG_PARITY)
|
||||
printk(" tag-parity");
|
||||
if (val & CP0_CERRI_DATA_PARITY)
|
||||
printk(" data-parity");
|
||||
if (val & CP0_CERRI_EXTERNAL)
|
||||
printk(" external");
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
static inline void breakout_cerrd(unsigned int val)
|
||||
{
|
||||
switch (val & CP0_CERRD_CAUSES) {
|
||||
case CP0_CERRD_LOAD:
|
||||
printk(" load,");
|
||||
break;
|
||||
case CP0_CERRD_STORE:
|
||||
printk(" store,");
|
||||
break;
|
||||
case CP0_CERRD_FILLWB:
|
||||
printk(" fill/wb,");
|
||||
break;
|
||||
case CP0_CERRD_COHERENCY:
|
||||
printk(" coherency,");
|
||||
break;
|
||||
case CP0_CERRD_DUPTAG:
|
||||
printk(" duptags,");
|
||||
break;
|
||||
default:
|
||||
printk(" NO CAUSE,");
|
||||
break;
|
||||
}
|
||||
if (!(val & CP0_CERRD_TYPES))
|
||||
printk(" NO TYPE");
|
||||
else {
|
||||
if (val & CP0_CERRD_MULTIPLE)
|
||||
printk(" multi-err");
|
||||
if (val & CP0_CERRD_TAG_STATE)
|
||||
printk(" tag-state");
|
||||
if (val & CP0_CERRD_TAG_ADDRESS)
|
||||
printk(" tag-address");
|
||||
if (val & CP0_CERRD_DATA_SBE)
|
||||
printk(" data-SBE");
|
||||
if (val & CP0_CERRD_DATA_DBE)
|
||||
printk(" data-DBE");
|
||||
if (val & CP0_CERRD_EXTERNAL)
|
||||
printk(" external");
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SIBYTE_BUS_WATCHER
|
||||
|
||||
static void check_bus_watcher(void)
|
||||
{
|
||||
uint32_t status, l2_err, memio_err;
|
||||
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
|
||||
uint64_t l2_tag;
|
||||
#endif
|
||||
|
||||
/* Destructive read, clears register and interrupt */
|
||||
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
|
||||
/* Bit 31 is always on, but there's no #define for that */
|
||||
if (status & ~(1UL << 31)) {
|
||||
l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
|
||||
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
|
||||
l2_tag = in64(IOADDR(A_L2_ECC_TAG));
|
||||
#endif
|
||||
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
|
||||
printk("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
|
||||
printk("\nLast recorded signature:\n");
|
||||
printk("Request %02x from %d, answered by %d with Dcode %d\n",
|
||||
(unsigned int)(G_SCD_BERR_TID(status) & 0x3f),
|
||||
(int)(G_SCD_BERR_TID(status) >> 6),
|
||||
(int)G_SCD_BERR_RID(status),
|
||||
(int)G_SCD_BERR_DCODE(status));
|
||||
#ifdef DUMP_L2_ECC_TAG_ON_ERROR
|
||||
printk("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
|
||||
#endif
|
||||
} else {
|
||||
printk("Bus watcher indicates no error\n");
|
||||
}
|
||||
}
|
||||
#else
|
||||
extern void check_bus_watcher(void);
|
||||
#endif
|
||||
|
||||
asmlinkage void sb1_cache_error(void)
|
||||
{
|
||||
uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
|
||||
unsigned long long cerr_dpa;
|
||||
|
||||
#ifdef CONFIG_SIBYTE_BW_TRACE
|
||||
/* Freeze the trace buffer now */
|
||||
csr_out32(M_SCD_TRACE_CFG_FREEZE, IOADDR(A_SCD_TRACE_CFG));
|
||||
printk("Trace buffer frozen\n");
|
||||
#endif
|
||||
|
||||
printk("Cache error exception on CPU %x:\n",
|
||||
(read_c0_prid() >> 25) & 0x7);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n\t"
|
||||
" .set mips64\n\t"
|
||||
" .set noat\n\t"
|
||||
" mfc0 %0, $26\n\t"
|
||||
" mfc0 %1, $27\n\t"
|
||||
" mfc0 %2, $27, 1\n\t"
|
||||
" dmfc0 $1, $27, 3\n\t"
|
||||
" dsrl32 %3, $1, 0 \n\t"
|
||||
" sll %4, $1, 0 \n\t"
|
||||
" mfc0 %5, $30\n\t"
|
||||
" .set pop"
|
||||
: "=r" (errctl), "=r" (cerr_i), "=r" (cerr_d),
|
||||
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
|
||||
|
||||
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
|
||||
printk(" c0_errorepc == %08x\n", eepc);
|
||||
printk(" c0_errctl == %08x", errctl);
|
||||
breakout_errctl(errctl);
|
||||
if (errctl & CP0_ERRCTL_ICACHE) {
|
||||
printk(" c0_cerr_i == %08x", cerr_i);
|
||||
breakout_cerri(cerr_i);
|
||||
if (CP0_CERRI_IDX_VALID(cerr_i)) {
|
||||
/* Check index of EPC, allowing for delay slot */
|
||||
if (((eepc & SB1_CACHE_INDEX_MASK) != (cerr_i & SB1_CACHE_INDEX_MASK)) &&
|
||||
((eepc & SB1_CACHE_INDEX_MASK) != ((cerr_i & SB1_CACHE_INDEX_MASK) - 4)))
|
||||
printk(" cerr_i idx doesn't match eepc\n");
|
||||
else {
|
||||
res = extract_ic(cerr_i & SB1_CACHE_INDEX_MASK,
|
||||
(cerr_i & CP0_CERRI_DATA) != 0);
|
||||
if (!(res & cerr_i))
|
||||
printk("...didn't see indicated icache problem\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errctl & CP0_ERRCTL_DCACHE) {
|
||||
printk(" c0_cerr_d == %08x", cerr_d);
|
||||
breakout_cerrd(cerr_d);
|
||||
if (CP0_CERRD_DPA_VALID(cerr_d)) {
|
||||
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
|
||||
if (!CP0_CERRD_IDX_VALID(cerr_d)) {
|
||||
res = extract_dc(cerr_dpa & SB1_CACHE_INDEX_MASK,
|
||||
(cerr_d & CP0_CERRD_DATA) != 0);
|
||||
if (!(res & cerr_d))
|
||||
printk("...didn't see indicated dcache problem\n");
|
||||
} else {
|
||||
if ((cerr_dpa & SB1_CACHE_INDEX_MASK) != (cerr_d & SB1_CACHE_INDEX_MASK))
|
||||
printk(" cerr_d idx doesn't match cerr_dpa\n");
|
||||
else {
|
||||
res = extract_dc(cerr_d & SB1_CACHE_INDEX_MASK,
|
||||
(cerr_d & CP0_CERRD_DATA) != 0);
|
||||
if (!(res & cerr_d))
|
||||
printk("...didn't see indicated problem\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check_bus_watcher();
|
||||
|
||||
/*
|
||||
* Calling panic() when a fatal cache error occurs scrambles the
|
||||
* state of the system (and the cache), making it difficult to
|
||||
* investigate after the fact. However, if you just stall the CPU,
|
||||
* the other CPU may keep on running, which is typically very
|
||||
* undesirable.
|
||||
*/
|
||||
#ifdef CONFIG_SB1_CERR_STALL
|
||||
while (1)
|
||||
;
|
||||
#else
|
||||
panic("unhandled cache error");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* Parity lookup table. */
|
||||
static const uint8_t parity[256] = {
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
||||
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0
|
||||
};
|
||||
|
||||
/* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */
|
||||
static const uint64_t mask_72_64[8] = {
|
||||
0x0738C808099264FFULL,
|
||||
0x38C808099264FF07ULL,
|
||||
0xC808099264FF0738ULL,
|
||||
0x08099264FF0738C8ULL,
|
||||
0x099264FF0738C808ULL,
|
||||
0x9264FF0738C80809ULL,
|
||||
0x64FF0738C8080992ULL,
|
||||
0xFF0738C808099264ULL
|
||||
};
|
||||
|
||||
/* Calculate the parity on a range of bits */
|
||||
static char range_parity(uint64_t dword, int max, int min)
|
||||
{
|
||||
char parity = 0;
|
||||
int i;
|
||||
dword >>= min;
|
||||
for (i=max-min; i>=0; i--) {
|
||||
if (dword & 0x1)
|
||||
parity = !parity;
|
||||
dword >>= 1;
|
||||
}
|
||||
return parity;
|
||||
}
|
||||
|
||||
/* Calculate the 4-bit even byte-parity for an instruction */
|
||||
static unsigned char inst_parity(uint32_t word)
|
||||
{
|
||||
int i, j;
|
||||
char parity = 0;
|
||||
for (j=0; j<4; j++) {
|
||||
char byte_parity = 0;
|
||||
for (i=0; i<8; i++) {
|
||||
if (word & 0x80000000)
|
||||
byte_parity = !byte_parity;
|
||||
word <<= 1;
|
||||
}
|
||||
parity <<= 1;
|
||||
parity |= byte_parity;
|
||||
}
|
||||
return parity;
|
||||
}
|
||||
|
||||
static uint32_t extract_ic(unsigned short addr, int data)
|
||||
{
|
||||
unsigned short way;
|
||||
int valid;
|
||||
uint32_t taghi, taglolo, taglohi;
|
||||
unsigned long long taglo, va;
|
||||
uint64_t tlo_tmp;
|
||||
uint8_t lru;
|
||||
int res = 0;
|
||||
|
||||
printk("Icache index 0x%04x ", addr);
|
||||
for (way = 0; way < 4; way++) {
|
||||
/* Index-load-tag-I */
|
||||
__asm__ __volatile__ (
|
||||
" .set push \n\t"
|
||||
" .set noreorder \n\t"
|
||||
" .set mips64 \n\t"
|
||||
" .set noat \n\t"
|
||||
" cache 4, 0(%3) \n\t"
|
||||
" mfc0 %0, $29 \n\t"
|
||||
" dmfc0 $1, $28 \n\t"
|
||||
" dsrl32 %1, $1, 0 \n\t"
|
||||
" sll %2, $1, 0 \n\t"
|
||||
" .set pop"
|
||||
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
|
||||
: "r" ((way << 13) | addr));
|
||||
|
||||
taglo = ((unsigned long long)taglohi << 32) | taglolo;
|
||||
if (way == 0) {
|
||||
lru = (taghi >> 14) & 0xff;
|
||||
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
|
||||
((addr >> 5) & 0x3), /* bank */
|
||||
((addr >> 7) & 0x3f), /* index */
|
||||
(lru & 0x3),
|
||||
((lru >> 2) & 0x3),
|
||||
((lru >> 4) & 0x3),
|
||||
((lru >> 6) & 0x3));
|
||||
}
|
||||
va = (taglo & 0xC0000FFFFFFFE000ULL) | addr;
|
||||
if ((taglo & (1 << 31)) && (((taglo >> 62) & 0x3) == 3))
|
||||
va |= 0x3FFFF00000000000ULL;
|
||||
valid = ((taghi >> 29) & 1);
|
||||
if (valid) {
|
||||
tlo_tmp = taglo & 0xfff3ff;
|
||||
if (((taglo >> 10) & 1) ^ range_parity(tlo_tmp, 23, 0)) {
|
||||
printk(" ** bad parity in VTag0/G/ASID\n");
|
||||
res |= CP0_CERRI_TAG_PARITY;
|
||||
}
|
||||
if (((taglo >> 11) & 1) ^ range_parity(taglo, 63, 24)) {
|
||||
printk(" ** bad parity in R/VTag1\n");
|
||||
res |= CP0_CERRI_TAG_PARITY;
|
||||
}
|
||||
}
|
||||
if (valid ^ ((taghi >> 27) & 1)) {
|
||||
printk(" ** bad parity for valid bit\n");
|
||||
res |= CP0_CERRI_TAG_PARITY;
|
||||
}
|
||||
printk(" %d [VA %016llx] [Vld? %d] raw tags: %08X-%016llX\n",
|
||||
way, va, valid, taghi, taglo);
|
||||
|
||||
if (data) {
|
||||
uint32_t datahi, insta, instb;
|
||||
uint8_t predecode;
|
||||
int offset;
|
||||
|
||||
/* (hit all banks and ways) */
|
||||
for (offset = 0; offset < 4; offset++) {
|
||||
/* Index-load-data-I */
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n\t"
|
||||
" .set noreorder\n\t"
|
||||
" .set mips64\n\t"
|
||||
" .set noat\n\t"
|
||||
" cache 6, 0(%3) \n\t"
|
||||
" mfc0 %0, $29, 1\n\t"
|
||||
" dmfc0 $1, $28, 1\n\t"
|
||||
" dsrl32 %1, $1, 0 \n\t"
|
||||
" sll %2, $1, 0 \n\t"
|
||||
" .set pop \n"
|
||||
: "=r" (datahi), "=r" (insta), "=r" (instb)
|
||||
: "r" ((way << 13) | addr | (offset << 3)));
|
||||
predecode = (datahi >> 8) & 0xff;
|
||||
if (((datahi >> 16) & 1) != (uint32_t)range_parity(predecode, 7, 0)) {
|
||||
printk(" ** bad parity in predecode\n");
|
||||
res |= CP0_CERRI_DATA_PARITY;
|
||||
}
|
||||
/* XXXKW should/could check predecode bits themselves */
|
||||
if (((datahi >> 4) & 0xf) ^ inst_parity(insta)) {
|
||||
printk(" ** bad parity in instruction a\n");
|
||||
res |= CP0_CERRI_DATA_PARITY;
|
||||
}
|
||||
if ((datahi & 0xf) ^ inst_parity(instb)) {
|
||||
printk(" ** bad parity in instruction b\n");
|
||||
res |= CP0_CERRI_DATA_PARITY;
|
||||
}
|
||||
printk(" %05X-%08X%08X", datahi, insta, instb);
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Compute the ECC for a data doubleword */
|
||||
static uint8_t dc_ecc(uint64_t dword)
|
||||
{
|
||||
uint64_t t;
|
||||
uint32_t w;
|
||||
uint8_t p;
|
||||
int i;
|
||||
|
||||
p = 0;
|
||||
for (i = 7; i >= 0; i--)
|
||||
{
|
||||
p <<= 1;
|
||||
t = dword & mask_72_64[i];
|
||||
w = (uint32_t)(t >> 32);
|
||||
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
|
||||
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
|
||||
w = (uint32_t)(t & 0xFFFFFFFF);
|
||||
p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
|
||||
^ parity[(w>>8) & 0xFF] ^ parity[w & 0xFF]);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
struct dc_state {
|
||||
unsigned char val;
|
||||
char *name;
|
||||
};
|
||||
|
||||
static struct dc_state dc_states[] = {
|
||||
{ 0x00, "INVALID" },
|
||||
{ 0x0f, "COH-SHD" },
|
||||
{ 0x13, "NCO-E-C" },
|
||||
{ 0x19, "NCO-E-D" },
|
||||
{ 0x16, "COH-E-C" },
|
||||
{ 0x1c, "COH-E-D" },
|
||||
{ 0xff, "*ERROR*" }
|
||||
};
|
||||
|
||||
#define DC_TAG_VALID(state) \
|
||||
(((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
|
||||
((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
|
||||
|
||||
static char *dc_state_str(unsigned char state)
|
||||
{
|
||||
struct dc_state *dsc = dc_states;
|
||||
while (dsc->val != 0xff) {
|
||||
if (dsc->val == state)
|
||||
break;
|
||||
dsc++;
|
||||
}
|
||||
return dsc->name;
|
||||
}
|
||||
|
||||
static uint32_t extract_dc(unsigned short addr, int data)
|
||||
{
|
||||
int valid, way;
|
||||
unsigned char state;
|
||||
uint32_t taghi, taglolo, taglohi;
|
||||
unsigned long long taglo, pa;
|
||||
uint8_t ecc, lru;
|
||||
int res = 0;
|
||||
|
||||
printk("Dcache index 0x%04x ", addr);
|
||||
for (way = 0; way < 4; way++) {
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n\t"
|
||||
" .set noreorder\n\t"
|
||||
" .set mips64\n\t"
|
||||
" .set noat\n\t"
|
||||
" cache 5, 0(%3)\n\t" /* Index-load-tag-D */
|
||||
" mfc0 %0, $29, 2\n\t"
|
||||
" dmfc0 $1, $28, 2\n\t"
|
||||
" dsrl32 %1, $1, 0\n\t"
|
||||
" sll %2, $1, 0\n\t"
|
||||
" .set pop"
|
||||
: "=r" (taghi), "=r" (taglohi), "=r" (taglolo)
|
||||
: "r" ((way << 13) | addr));
|
||||
|
||||
taglo = ((unsigned long long)taglohi << 32) | taglolo;
|
||||
pa = (taglo & 0xFFFFFFE000ULL) | addr;
|
||||
if (way == 0) {
|
||||
lru = (taghi >> 14) & 0xff;
|
||||
printk("[Bank %d Set 0x%02x] LRU > %d %d %d %d > MRU\n",
|
||||
((addr >> 11) & 0x2) | ((addr >> 5) & 1), /* bank */
|
||||
((addr >> 6) & 0x3f), /* index */
|
||||
(lru & 0x3),
|
||||
((lru >> 2) & 0x3),
|
||||
((lru >> 4) & 0x3),
|
||||
((lru >> 6) & 0x3));
|
||||
}
|
||||
state = (taghi >> 25) & 0x1f;
|
||||
valid = DC_TAG_VALID(state);
|
||||
printk(" %d [PA %010llx] [state %s (%02x)] raw tags: %08X-%016llX\n",
|
||||
way, pa, dc_state_str(state), state, taghi, taglo);
|
||||
if (valid) {
|
||||
if (((taglo >> 11) & 1) ^ range_parity(taglo, 39, 26)) {
|
||||
printk(" ** bad parity in PTag1\n");
|
||||
res |= CP0_CERRD_TAG_ADDRESS;
|
||||
}
|
||||
if (((taglo >> 10) & 1) ^ range_parity(taglo, 25, 13)) {
|
||||
printk(" ** bad parity in PTag0\n");
|
||||
res |= CP0_CERRD_TAG_ADDRESS;
|
||||
}
|
||||
} else {
|
||||
res |= CP0_CERRD_TAG_STATE;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
uint32_t datalohi, datalolo, datahi;
|
||||
unsigned long long datalo;
|
||||
int offset;
|
||||
char bad_ecc = 0;
|
||||
|
||||
for (offset = 0; offset < 4; offset++) {
|
||||
/* Index-load-data-D */
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n\t"
|
||||
" .set noreorder\n\t"
|
||||
" .set mips64\n\t"
|
||||
" .set noat\n\t"
|
||||
" cache 7, 0(%3)\n\t" /* Index-load-data-D */
|
||||
" mfc0 %0, $29, 3\n\t"
|
||||
" dmfc0 $1, $28, 3\n\t"
|
||||
" dsrl32 %1, $1, 0 \n\t"
|
||||
" sll %2, $1, 0 \n\t"
|
||||
" .set pop"
|
||||
: "=r" (datahi), "=r" (datalohi), "=r" (datalolo)
|
||||
: "r" ((way << 13) | addr | (offset << 3)));
|
||||
datalo = ((unsigned long long)datalohi << 32) | datalolo;
|
||||
ecc = dc_ecc(datalo);
|
||||
if (ecc != datahi) {
|
||||
int bits;
|
||||
bad_ecc |= 1 << (3-offset);
|
||||
ecc ^= datahi;
|
||||
bits = hweight8(ecc);
|
||||
res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
|
||||
}
|
||||
printk(" %02X-%016llX", datahi, datalo);
|
||||
}
|
||||
printk("\n");
|
||||
if (bad_ecc)
|
||||
printk(" dwords w/ bad ECC: %d %d %d %d\n",
|
||||
!!(bad_ecc & 8), !!(bad_ecc & 4),
|
||||
!!(bad_ecc & 2), !!(bad_ecc & 1));
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
42
arch/mips/mm/cex-gen.S
Normal file
42
arch/mips/mm/cex-gen.S
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1995 - 1999 Ralf Baechle
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
*
|
||||
* Cache error handler
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* Game over. Go to the button. Press gently. Swear where allowed by
|
||||
* legislation.
|
||||
*/
|
||||
LEAF(except_vec2_generic)
|
||||
.set noreorder
|
||||
.set noat
|
||||
.set mips0
|
||||
/*
|
||||
* This is a very bad place to be. Our cache error
|
||||
* detection has triggered. If we have write-back data
|
||||
* in the cache, we may not be able to recover. As a
|
||||
* first-order desperate measure, turn off KSEG0 cacheing.
|
||||
*/
|
||||
mfc0 k0,CP0_CONFIG
|
||||
li k1,~CONF_CM_CMASK
|
||||
and k0,k0,k1
|
||||
ori k0,k0,CONF_CM_UNCACHED
|
||||
mtc0 k0,CP0_CONFIG
|
||||
/* Give it a few cycles to sink in... */
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
j cache_parity_error
|
||||
nop
|
||||
END(except_vec2_generic)
|
70
arch/mips/mm/cex-oct.S
Normal file
70
arch/mips/mm/cex-oct.S
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2006 Cavium Networks
|
||||
* Cache error handler
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* Handle cache error. Indicate to the second level handler whether
|
||||
* the exception is recoverable.
|
||||
*/
|
||||
LEAF(except_vec2_octeon)
|
||||
|
||||
.set push
|
||||
.set mips64r2
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
|
||||
/* due to an errata we need to read the COP0 CacheErr (Dcache)
|
||||
* before any cache/DRAM access */
|
||||
|
||||
rdhwr k0, $0 /* get core_id */
|
||||
PTR_LA k1, cache_err_dcache
|
||||
sll k0, k0, 3
|
||||
PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
|
||||
|
||||
dmfc0 k0, CP0_CACHEERR, 1
|
||||
sd k0, (k1)
|
||||
dmtc0 $0, CP0_CACHEERR, 1
|
||||
|
||||
/* check whether this is a nested exception */
|
||||
mfc0 k1, CP0_STATUS
|
||||
andi k1, k1, ST0_EXL
|
||||
beqz k1, 1f
|
||||
nop
|
||||
j cache_parity_error_octeon_non_recoverable
|
||||
nop
|
||||
|
||||
/* exception is recoverable */
|
||||
1: j handle_cache_err
|
||||
nop
|
||||
|
||||
.set pop
|
||||
END(except_vec2_octeon)
|
||||
|
||||
/* We need to jump to handle_cache_err so that the previous handler
|
||||
* can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
|
||||
* space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
|
||||
LEAF(handle_cache_err)
|
||||
.set push
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
SAVE_ALL
|
||||
KMODE
|
||||
jal cache_parity_error_octeon_recoverable
|
||||
nop
|
||||
j ret_from_exception
|
||||
nop
|
||||
|
||||
.set pop
|
||||
END(handle_cache_err)
|
170
arch/mips/mm/cex-sb1.S
Normal file
170
arch/mips/mm/cex-sb1.S
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright (C) 2001,2002,2003 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/sibyte/board.h>
|
||||
|
||||
#define C0_ERRCTL $26 /* CP0: Error info */
|
||||
#define C0_CERR_I $27 /* CP0: Icache error */
|
||||
#define C0_CERR_D $27,1 /* CP0: Dcache error */
|
||||
|
||||
/*
|
||||
* Based on SiByte sample software cache-err/cerr.S
|
||||
* CVS revision 1.8. Only the 'unrecoverable' case
|
||||
* is changed.
|
||||
*/
|
||||
|
||||
.set mips64
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
/*
|
||||
* sb1_cerr_vec: code to be copied to the Cache Error
|
||||
* Exception vector. The code must be pushed out to memory
|
||||
* (either by copying to Kseg0 and Kseg1 both, or by flushing
|
||||
* the L1 and L2) since it is fetched as 0xa0000100.
|
||||
*
|
||||
* NOTE: Be sure this handler is at most 28 instructions long
|
||||
* since the final 16 bytes of the exception vector memory
|
||||
* (0x170-0x17f) are used to preserve k0, k1, and ra.
|
||||
*/
|
||||
|
||||
LEAF(except_vec2_sb1)
|
||||
/*
|
||||
* If this error is recoverable, we need to exit the handler
|
||||
* without having dirtied any registers. To do this,
|
||||
* save/restore k0 and k1 from low memory (Useg is direct
|
||||
* mapped while ERL=1). Note that we can't save to a
|
||||
* CPU-specific location without ruining a register in the
|
||||
* process. This means we are vulnerable to data corruption
|
||||
* whenever the handler is reentered by a second CPU.
|
||||
*/
|
||||
sd k0,0x170($0)
|
||||
sd k1,0x178($0)
|
||||
|
||||
#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
|
||||
j handle_vec2_sb1
|
||||
nop
|
||||
#else
|
||||
/*
|
||||
* M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell
|
||||
* if we can fast-path out of here for a h/w-recovered error.
|
||||
*/
|
||||
mfc0 k1,C0_ERRCTL
|
||||
bgtz k1,attempt_recovery
|
||||
sll k0,k1,1
|
||||
|
||||
recovered_dcache:
|
||||
/*
|
||||
* Unlock CacheErr-D (which in turn unlocks CacheErr-DPA).
|
||||
* Ought to log the occurrence of this recovered dcache error.
|
||||
*/
|
||||
b recovered
|
||||
mtc0 $0,C0_CERR_D
|
||||
|
||||
attempt_recovery:
|
||||
/*
|
||||
* k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
|
||||
* Dcache errors we can recover from will take more extensive
|
||||
* processing. For now, they are considered "unrecoverable".
|
||||
* Note that 'DC' becoming set (outside of ERL mode) will
|
||||
* cause 'IC' to clear; so if there's an Icache error, we'll
|
||||
* only find out about it if we recover from this error and
|
||||
* continue executing.
|
||||
*/
|
||||
bltz k0,unrecoverable
|
||||
sll k0,1
|
||||
|
||||
/*
|
||||
* k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an
|
||||
* Icache error isn't indicated, I'm not sure why we got here.
|
||||
* Consider that case "unrecoverable" for now.
|
||||
*/
|
||||
bgez k0,unrecoverable
|
||||
|
||||
attempt_icache_recovery:
|
||||
/*
|
||||
* External icache errors are due to uncorrectable ECC errors
|
||||
* in the L2 cache or Memory Controller and cannot be
|
||||
* recovered here.
|
||||
*/
|
||||
mfc0 k0,C0_CERR_I /* delay slot */
|
||||
li k1,1 << 26 /* ICACHE_EXTERNAL */
|
||||
and k1,k0
|
||||
bnez k1,unrecoverable
|
||||
andi k0,0x1fe0
|
||||
|
||||
/*
|
||||
* Since the error is internal, the 'IDX' field from
|
||||
* CacheErr-I is valid and we can just invalidate all blocks
|
||||
* in that set.
|
||||
*/
|
||||
cache Index_Invalidate_I,(0<<13)(k0)
|
||||
cache Index_Invalidate_I,(1<<13)(k0)
|
||||
cache Index_Invalidate_I,(2<<13)(k0)
|
||||
cache Index_Invalidate_I,(3<<13)(k0)
|
||||
|
||||
/* Ought to log this recovered icache error */
|
||||
|
||||
recovered:
|
||||
/* Restore the saved registers */
|
||||
ld k0,0x170($0)
|
||||
ld k1,0x178($0)
|
||||
eret
|
||||
|
||||
unrecoverable:
|
||||
/* Unrecoverable Icache or Dcache error; log it and/or fail */
|
||||
j handle_vec2_sb1
|
||||
nop
|
||||
#endif
|
||||
|
||||
END(except_vec2_sb1)
|
||||
|
||||
LEAF(handle_vec2_sb1)
|
||||
mfc0 k0,CP0_CONFIG
|
||||
li k1,~CONF_CM_CMASK
|
||||
and k0,k0,k1
|
||||
ori k0,k0,CONF_CM_UNCACHED
|
||||
mtc0 k0,CP0_CONFIG
|
||||
|
||||
SSNOP
|
||||
SSNOP
|
||||
SSNOP
|
||||
SSNOP
|
||||
bnezl $0, 1f
|
||||
1:
|
||||
mfc0 k0, CP0_STATUS
|
||||
sll k0, k0, 3 # check CU0 (kernel?)
|
||||
bltz k0, 2f
|
||||
nop
|
||||
|
||||
/* Get a valid Kseg0 stack pointer. Any task's stack pointer
|
||||
* will do, although if we ever want to resume execution we
|
||||
* better not have corrupted any state. */
|
||||
get_saved_sp
|
||||
move sp, k1
|
||||
|
||||
2:
|
||||
j sb1_cache_error
|
||||
nop
|
||||
|
||||
END(handle_vec2_sb1)
|
389
arch/mips/mm/dma-default.c
Normal file
389
arch/mips/mm/dma-default.c
Normal file
|
@ -0,0 +1,389 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
|
||||
* Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
|
||||
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cpu-type.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <dma-coherence.h>
|
||||
|
||||
#ifdef CONFIG_DMA_MAYBE_COHERENT
|
||||
int coherentio = 0; /* User defined DMA coherency from command line. */
|
||||
EXPORT_SYMBOL_GPL(coherentio);
|
||||
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
|
||||
|
||||
static int __init setcoherentio(char *str)
|
||||
{
|
||||
coherentio = 1;
|
||||
pr_info("Hardware DMA cache coherency (command line)\n");
|
||||
return 0;
|
||||
}
|
||||
early_param("coherentio", setcoherentio);
|
||||
|
||||
static int __init setnocoherentio(char *str)
|
||||
{
|
||||
coherentio = 0;
|
||||
pr_info("Software DMA cache coherency (command line)\n");
|
||||
return 0;
|
||||
}
|
||||
early_param("nocoherentio", setnocoherentio);
|
||||
#endif
|
||||
|
||||
static inline struct page *dma_addr_to_page(struct device *dev,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return pfn_to_page(
|
||||
plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* The affected CPUs below in 'cpu_needs_post_dma_flush()' can
|
||||
* speculatively fill random cachelines with stale data at any time,
|
||||
* requiring an extra flush post-DMA.
|
||||
*
|
||||
* Warning on the terminology - Linux calls an uncached area coherent;
|
||||
* MIPS terminology calls memory areas with hardware maintained coherency
|
||||
* coherent.
|
||||
*/
|
||||
static inline int cpu_needs_post_dma_flush(struct device *dev)
|
||||
{
|
||||
return !plat_device_is_coherent(dev) &&
|
||||
(boot_cpu_type() == CPU_R10000 ||
|
||||
boot_cpu_type() == CPU_R12000 ||
|
||||
boot_cpu_type() == CPU_BMIPS5000);
|
||||
}
|
||||
|
||||
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
||||
{
|
||||
gfp_t dma_flag;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
if (dev == NULL)
|
||||
dma_flag = __GFP_DMA;
|
||||
else
|
||||
#endif
|
||||
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
|
||||
dma_flag = __GFP_DMA;
|
||||
else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
||||
dma_flag = __GFP_DMA32;
|
||||
else
|
||||
#endif
|
||||
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
||||
dma_flag = __GFP_DMA32;
|
||||
else
|
||||
#endif
|
||||
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
||||
dma_flag = __GFP_DMA;
|
||||
else
|
||||
#endif
|
||||
dma_flag = 0;
|
||||
|
||||
/* Don't invoke OOM killer */
|
||||
gfp |= __GFP_NORETRY;
|
||||
|
||||
return gfp | dma_flag;
|
||||
}
|
||||
|
||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle, gfp_t gfp)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
gfp = massage_gfp_flags(dev, gfp);
|
||||
|
||||
ret = (void *) __get_free_pages(gfp, get_order(size));
|
||||
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = plat_map_dma_mem(dev, ret, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_noncoherent);
|
||||
|
||||
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct page *page = NULL;
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
gfp = massage_gfp_flags(dev, gfp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
|
||||
page = dma_alloc_from_contiguous(dev,
|
||||
count, get_order(size));
|
||||
if (!page)
|
||||
page = alloc_pages(gfp, get_order(size));
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
ret = page_address(page);
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = plat_map_dma_mem(dev, ret, size);
|
||||
if (!plat_device_is_coherent(dev)) {
|
||||
dma_cache_wback_inv((unsigned long) ret, size);
|
||||
if (!hw_coherentio)
|
||||
ret = UNCAC_ADDR(ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
||||
free_pages((unsigned long) vaddr, get_order(size));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_free_noncoherent);
|
||||
|
||||
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long addr = (unsigned long) vaddr;
|
||||
int order = get_order(size);
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct page *page = NULL;
|
||||
|
||||
if (dma_release_from_coherent(dev, order, vaddr))
|
||||
return;
|
||||
|
||||
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (!plat_device_is_coherent(dev) && !hw_coherentio)
|
||||
addr = CAC_ADDR(addr);
|
||||
|
||||
page = virt_to_page((void *) addr);
|
||||
|
||||
if (!dma_release_from_contiguous(dev, page, count))
|
||||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
static inline void __dma_sync_virtual(void *addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
dma_cache_wback((unsigned long)addr, size);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
dma_cache_inv((unsigned long)addr, size);
|
||||
break;
|
||||
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_wback_inv((unsigned long)addr, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A single sg entry may refer to multiple physically contiguous
|
||||
* pages. But we still need to process highmem pages individually.
|
||||
* If highmem is not configured then the bulk of this loop gets
|
||||
* optimized out.
|
||||
*/
|
||||
static inline void __dma_sync(struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
size_t left = size;
|
||||
|
||||
do {
|
||||
size_t len = left;
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
void *addr;
|
||||
|
||||
if (offset + len > PAGE_SIZE) {
|
||||
if (offset >= PAGE_SIZE) {
|
||||
page += offset >> PAGE_SHIFT;
|
||||
offset &= ~PAGE_MASK;
|
||||
}
|
||||
len = PAGE_SIZE - offset;
|
||||
}
|
||||
|
||||
addr = kmap_atomic(page);
|
||||
__dma_sync_virtual(addr + offset, len, direction);
|
||||
kunmap_atomic(addr);
|
||||
} else
|
||||
__dma_sync_virtual(page_address(page) + offset,
|
||||
size, direction);
|
||||
offset = 0;
|
||||
page++;
|
||||
left -= len;
|
||||
} while (left);
|
||||
}
|
||||
|
||||
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
|
||||
{
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
__dma_sync(dma_addr_to_page(dev, dma_addr),
|
||||
dma_addr & ~PAGE_MASK, size, direction);
|
||||
|
||||
plat_unmap_dma_mem(dev, dma_addr, size, direction);
|
||||
}
|
||||
|
||||
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev))
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length = sg->length;
|
||||
#endif
|
||||
sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
|
||||
sg->offset;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (!plat_device_is_coherent(dev))
|
||||
__dma_sync(page, offset, size, direction);
|
||||
|
||||
return plat_map_dma_mem_page(dev, page) + offset;
|
||||
}
|
||||
|
||||
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nhwentries, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nhwentries; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev) &&
|
||||
direction != DMA_TO_DEVICE)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
|
||||
}
|
||||
}
|
||||
|
||||
static void mips_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
__dma_sync(dma_addr_to_page(dev, dma_handle),
|
||||
dma_handle & ~PAGE_MASK, size, direction);
|
||||
}
|
||||
|
||||
static void mips_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
if (!plat_device_is_coherent(dev))
|
||||
__dma_sync(dma_addr_to_page(dev, dma_handle),
|
||||
dma_handle & ~PAGE_MASK, size, direction);
|
||||
}
|
||||
|
||||
static void mips_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
|
||||
static void mips_dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!plat_device_is_coherent(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
|
||||
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mips_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return plat_dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
if (!plat_device_is_coherent(dev))
|
||||
__dma_sync_virtual(vaddr, size, direction);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
static struct dma_map_ops mips_default_dma_map_ops = {
|
||||
.alloc = mips_dma_alloc_coherent,
|
||||
.free = mips_dma_free_coherent,
|
||||
.map_page = mips_dma_map_page,
|
||||
.unmap_page = mips_dma_unmap_page,
|
||||
.map_sg = mips_dma_map_sg,
|
||||
.unmap_sg = mips_dma_unmap_sg,
|
||||
.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
|
||||
.sync_single_for_device = mips_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = mips_dma_sync_sg_for_device,
|
||||
.mapping_error = mips_dma_mapping_error,
|
||||
.dma_supported = mips_dma_supported
|
||||
};
|
||||
|
||||
struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
|
||||
EXPORT_SYMBOL(mips_dma_map_ops);
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
|
||||
|
||||
static int __init mips_dma_init(void)
|
||||
{
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(mips_dma_init);
|
25
arch/mips/mm/extable.c
Normal file
25
arch/mips/mm/extable.c
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/branch.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(exception_epc(regs));
|
||||
if (fixup) {
|
||||
regs->cp0_epc = fixup->nextinsn;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
331
arch/mips/mm/fault.c
Normal file
331
arch/mips/mm/fault.c
Normal file
|
@ -0,0 +1,331 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1995 - 2000 by Ralf Baechle
|
||||
*/
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/branch.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/highmem.h> /* For VMALLOC_END */
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
/*
|
||||
* This routine handles page faults. It determines the address,
|
||||
* and the problem, and then passes it off to one of the appropriate
|
||||
* routines.
|
||||
*/
|
||||
static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
|
||||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct * vma = NULL;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
const int field = sizeof(unsigned long) * 2;
|
||||
siginfo_t info;
|
||||
int fault;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
#if 0
|
||||
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
|
||||
current->comm, current->pid, field, address, write,
|
||||
field, regs->cp0_epc);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
/*
|
||||
* This is to notify the fault handler of the kprobes. The
|
||||
* exception code is redundant as it is also carried in REGS,
|
||||
* but we pass it anyhow.
|
||||
*/
|
||||
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
|
||||
(regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
|
||||
return;
|
||||
#endif
|
||||
|
||||
info.si_code = SEGV_MAPERR;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
# define VMALLOC_FAULT_TARGET no_context
|
||||
#else
|
||||
# define VMALLOC_FAULT_TARGET vmalloc_fault
|
||||
#endif
|
||||
|
||||
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
|
||||
goto VMALLOC_FAULT_TARGET;
|
||||
#ifdef MODULE_START
|
||||
if (unlikely(address >= MODULE_START && address < MODULE_END))
|
||||
goto VMALLOC_FAULT_TARGET;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto bad_area_nosemaphore;
|
||||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
if (vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto bad_area;
|
||||
if (expand_stack(vma, address))
|
||||
goto bad_area;
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
info.si_code = SEGV_ACCERR;
|
||||
|
||||
if (write) {
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto bad_area;
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
} else {
|
||||
if (cpu_has_rixi) {
|
||||
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
|
||||
#if 0
|
||||
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
|
||||
raw_smp_processor_id(),
|
||||
current->comm, current->pid,
|
||||
field, address, write,
|
||||
field, regs->cp0_epc);
|
||||
#endif
|
||||
goto bad_area;
|
||||
}
|
||||
if (!(vma->vm_flags & VM_READ)) {
|
||||
#if 0
|
||||
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
|
||||
raw_smp_processor_id(),
|
||||
current->comm, current->pid,
|
||||
field, address, write,
|
||||
field, regs->cp0_epc);
|
||||
#endif
|
||||
goto bad_area;
|
||||
}
|
||||
} else {
|
||||
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGSEGV)
|
||||
goto bad_area;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||
regs, address);
|
||||
tsk->maj_flt++;
|
||||
} else {
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||
regs, address);
|
||||
tsk->min_flt++;
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/*
|
||||
* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
bad_area_nosemaphore:
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if (user_mode(regs)) {
|
||||
tsk->thread.cp0_badvaddr = address;
|
||||
tsk->thread.error_code = write;
|
||||
#if 0
|
||||
printk("do_page_fault() #2: sending SIGSEGV to %s for "
|
||||
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
|
||||
tsk->comm,
|
||||
write ? "write access to" : "read access from",
|
||||
field, address,
|
||||
field, (unsigned long) regs->cp0_epc,
|
||||
field, (unsigned long) regs->regs[31]);
|
||||
#endif
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
/* info.si_code has been set above */
|
||||
info.si_addr = (void __user *) address;
|
||||
force_sig_info(SIGSEGV, &info, tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
no_context:
|
||||
/* Are we prepared to handle this kernel fault? */
|
||||
if (fixup_exception(regs)) {
|
||||
current->thread.cp0_baduaddr = address;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
bust_spinlocks(1);
|
||||
|
||||
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
|
||||
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
|
||||
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
|
||||
field, regs->regs[31]);
|
||||
die("Oops", regs);
|
||||
|
||||
out_of_memory:
|
||||
/*
|
||||
* We ran out of memory, call the OOM killer, and return the userspace
|
||||
* (which will retry the fault, or kill us if we got oom-killed).
|
||||
*/
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
else
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
#if 0
|
||||
printk("do_page_fault() #3: sending SIGBUS to %s for "
|
||||
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
|
||||
tsk->comm,
|
||||
write ? "write access to" : "read access from",
|
||||
field, address,
|
||||
field, (unsigned long) regs->cp0_epc,
|
||||
field, (unsigned long) regs->regs[31]);
|
||||
#endif
|
||||
tsk->thread.cp0_badvaddr = address;
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void __user *) address;
|
||||
force_sig_info(SIGBUS, &info, tsk);
|
||||
|
||||
return;
|
||||
#ifndef CONFIG_64BIT
|
||||
vmalloc_fault:
|
||||
{
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*
|
||||
* Do _not_ use "tsk" here. We might be inside
|
||||
* an interrupt in the middle of a task switch..
|
||||
*/
|
||||
int offset = __pgd_offset(address);
|
||||
pgd_t *pgd, *pgd_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pte_t *pte_k;
|
||||
|
||||
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
|
||||
pgd_k = init_mm.pgd + offset;
|
||||
|
||||
if (!pgd_present(*pgd_k))
|
||||
goto no_context;
|
||||
set_pgd(pgd, *pgd_k);
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
goto no_context;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
pmd_k = pmd_offset(pud_k, address);
|
||||
if (!pmd_present(*pmd_k))
|
||||
goto no_context;
|
||||
set_pmd(pmd, *pmd_k);
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, address);
|
||||
if (!pte_present(*pte_k))
|
||||
goto no_context;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
unsigned long write, unsigned long address)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
__do_page_fault(regs, write, address);
|
||||
exception_exit(prev_state);
|
||||
}
|
318
arch/mips/mm/gup.c
Normal file
318
arch/mips/mm/gup.c
Normal file
|
@ -0,0 +1,318 @@
|
|||
/*
|
||||
* Lockless get_user_pages_fast for MIPS
|
||||
*
|
||||
* Copyright (C) 2008 Nick Piggin
|
||||
* Copyright (C) 2008 Novell Inc.
|
||||
* Copyright (C) 2011 Ralf Baechle
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static inline pte_t gup_get_pte(pte_t *ptep)
|
||||
{
|
||||
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
||||
pte_t pte;
|
||||
|
||||
retry:
|
||||
pte.pte_low = ptep->pte_low;
|
||||
smp_rmb();
|
||||
pte.pte_high = ptep->pte_high;
|
||||
smp_rmb();
|
||||
if (unlikely(pte.pte_low != ptep->pte_low))
|
||||
goto retry;
|
||||
|
||||
return pte;
|
||||
#else
|
||||
return ACCESS_ONCE(*ptep);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
pte_t *ptep = pte_offset_map(&pmd, addr);
|
||||
do {
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
if (!pte_present(pte) ||
|
||||
pte_special(pte) || (write && !pte_write(pte))) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
get_page(page);
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
pte_unmap(ptep - 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void get_head_page_multiple(struct page *page, int nr)
|
||||
{
|
||||
VM_BUG_ON(page != compound_head(page));
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
atomic_add(nr, &page->_count);
|
||||
SetPageReferenced(page);
|
||||
}
|
||||
|
||||
static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
pte_t pte = *(pte_t *)&pmd;
|
||||
struct page *head, *page;
|
||||
int refs;
|
||||
|
||||
if (write && !pte_write(pte))
|
||||
return 0;
|
||||
/* hugepages are never "special" */
|
||||
VM_BUG_ON(pte_special(pte));
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
refs = 0;
|
||||
head = pte_page(pte);
|
||||
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
if (PageTail(page))
|
||||
get_huge_page_tail(page);
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
get_head_page_multiple(head, refs);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
/*
|
||||
* The pmd_trans_splitting() check below explains why
|
||||
* pmdp_splitting_flush has to flush the tlb, to stop
|
||||
* this gup-fast code from running while we set the
|
||||
* splitting bit in the pmd. Returning zero will take
|
||||
* the slow path that will call wait_split_huge_page()
|
||||
* if the pmd is still in splitting state. gup-fast
|
||||
* can't because it has irq disabled and
|
||||
* wait_split_huge_page() would never return as the
|
||||
* tlb flush IPI wouldn't run.
|
||||
*/
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (unlikely(pmd_huge(pmd))) {
|
||||
if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
|
||||
return 0;
|
||||
} else {
|
||||
if (!gup_pte_range(pmd, addr, next, write, pages,nr))
|
||||
return 0;
|
||||
}
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
pte_t pte = *(pte_t *)&pud;
|
||||
struct page *head, *page;
|
||||
int refs;
|
||||
|
||||
if (write && !pte_write(pte))
|
||||
return 0;
|
||||
/* hugepages are never "special" */
|
||||
VM_BUG_ON(pte_special(pte));
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
refs = 0;
|
||||
head = pte_page(pte);
|
||||
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
if (PageTail(page))
|
||||
get_huge_page_tail(page);
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
get_head_page_multiple(head, refs);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (unlikely(pud_huge(pud))) {
|
||||
if (!gup_huge_pud(pud, addr, next, write, pages,nr))
|
||||
return 0;
|
||||
} else {
|
||||
if (!gup_pmd_range(pud, addr, next, write, pages,nr))
|
||||
return 0;
|
||||
}
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
unsigned long flags;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
||||
(void __user *)start, len)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* XXX: batch / limit 'nr', to avoid large irq off latency
|
||||
* needs some instrumenting to determine the common sizes used by
|
||||
* important workloads (eg. DB2), and whether limiting the batch
|
||||
* size will decrease performance.
|
||||
*
|
||||
* It seems like we're in the clear for the moment. Direct-IO is
|
||||
* the main guy that batches up lots of get_user_pages, and even
|
||||
* they are limited to 64-at-a-time which is not so many.
|
||||
*/
|
||||
/*
|
||||
* This doesn't prevent pagetable teardown, but does prevent
|
||||
* the pagetables and pages from being freed.
|
||||
*
|
||||
* So long as we atomically load page table pointers versus teardown,
|
||||
* we can follow the address down to the page and take a ref on it.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
break;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
break;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
||||
* If not successful, it will fall back to taking the lock and
|
||||
* calling get_user_pages().
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
int ret, nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
|
||||
end = start + len;
|
||||
if (end < start || cpu_has_dc_aliases)
|
||||
goto slow_irqon;
|
||||
|
||||
/* XXX: batch / limit 'nr' */
|
||||
local_irq_disable();
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_enable();
|
||||
|
||||
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||
return nr;
|
||||
slow:
|
||||
local_irq_enable();
|
||||
|
||||
slow_irqon:
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
(end - start) >> PAGE_SHIFT,
|
||||
write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
if (ret < 0)
|
||||
ret = nr;
|
||||
else
|
||||
ret += nr;
|
||||
}
|
||||
return ret;
|
||||
}
|
138
arch/mips/mm/highmem.c
Normal file
138
arch/mips/mm/highmem.c
Normal file
|
@ -0,0 +1,138 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static pte_t *kmap_pte;
|
||||
|
||||
unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
might_sleep();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
addr = kmap_high(page);
|
||||
flush_tlb_one((unsigned long)addr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
if (!PageHighMem(page))
|
||||
return;
|
||||
kunmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap);
|
||||
|
||||
/*
|
||||
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
|
||||
* no global lock is needed and because the kmap code must perform a global TLB
|
||||
* invalidation when the kmap pool wraps.
|
||||
*
|
||||
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
||||
* kmaps are appropriate for short, tight code paths only.
|
||||
*/
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte - idx)));
|
||||
#endif
|
||||
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
|
||||
local_flush_tlb_one((unsigned long)vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_atomic);
|
||||
|
||||
void __kunmap_atomic(void *kvaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
int type __maybe_unused;
|
||||
|
||||
if (vaddr < FIXADDR_START) { // FIXME
|
||||
pagefault_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
type = kmap_atomic_idx();
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
{
|
||||
int idx = type + KM_TYPE_NR * smp_processor_id();
|
||||
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
||||
|
||||
/*
|
||||
* force other mappings to Oops if they'll try to access
|
||||
* this pte without first remap it
|
||||
*/
|
||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
||||
local_flush_tlb_one(vaddr);
|
||||
}
|
||||
#endif
|
||||
kmap_atomic_idx_pop();
|
||||
pagefault_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
/*
|
||||
* This is the same as kmap_atomic() but can map memory that doesn't
|
||||
* have a struct page associated with it.
|
||||
*/
|
||||
void *kmap_atomic_pfn(unsigned long pfn)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
int idx, type;
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
type = kmap_atomic_idx_push();
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
|
||||
flush_tlb_one(vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
|
||||
struct page *kmap_atomic_to_page(void *ptr)
|
||||
{
|
||||
unsigned long idx, vaddr = (unsigned long)ptr;
|
||||
pte_t *pte;
|
||||
|
||||
if (vaddr < FIXADDR_START)
|
||||
return virt_to_page(ptr);
|
||||
|
||||
idx = virt_to_fix(vaddr);
|
||||
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
||||
return pte_page(*pte);
|
||||
}
|
||||
|
||||
void __init kmap_init(void)
|
||||
{
|
||||
unsigned long kmap_vstart;
|
||||
|
||||
/* cache the first kmap pte */
|
||||
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
|
||||
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
|
||||
}
|
79
arch/mips/mm/hugetlbpage.c
Normal file
79
arch/mips/mm/hugetlbpage.c
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* MIPS Huge TLB Page Support for Kernel.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
|
||||
* Copyright 2005, Embedded Alley Solutions, Inc.
|
||||
* Matt Porter <mporter@embeddedalley.com>
|
||||
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <asm/mman.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (pud)
|
||||
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_present(*pgd)) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_present(*pud))
|
||||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
return (pte_t *) pmd;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks for proper alignment of input addr and len parameters.
|
||||
*/
|
||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & _PAGE_HUGE) != 0;
|
||||
}
|
446
arch/mips/mm/init.c
Normal file
446
arch/mips/mm/init.c
Normal file
|
@ -0,0 +1,446 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1994 - 2000 Ralf Baechle
|
||||
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
||||
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
|
||||
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kcore.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cachectl.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
/*
|
||||
* We have up to 8 empty zeroed pages so we can map one of the right colour
|
||||
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
|
||||
* where we have to avoid VCED / VECI exceptions for good performance at
|
||||
* any price. Since page is never written to after the initialization we
|
||||
* don't have to care about aliases on other CPUs.
|
||||
*/
|
||||
unsigned long empty_zero_page, zero_page_mask;
|
||||
EXPORT_SYMBOL_GPL(empty_zero_page);
|
||||
EXPORT_SYMBOL(zero_page_mask);
|
||||
|
||||
/*
|
||||
* Not static inline because used by IP27 special magic initialization code
|
||||
*/
|
||||
void setup_zero_pages(void)
|
||||
{
|
||||
unsigned int order, i;
|
||||
struct page *page;
|
||||
|
||||
if (cpu_has_vce)
|
||||
order = 3;
|
||||
else
|
||||
order = 0;
|
||||
|
||||
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!empty_zero_page)
|
||||
panic("Oh boy, that early out of memory?");
|
||||
|
||||
page = virt_to_page((void *)empty_zero_page);
|
||||
split_page(page, order);
|
||||
for (i = 0; i < (1 << order); i++, page++)
|
||||
mark_page_reserved(page);
|
||||
|
||||
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr, flags, entrylo;
|
||||
unsigned long old_ctx;
|
||||
pte_t pte;
|
||||
int tlbidx;
|
||||
|
||||
BUG_ON(Page_dcache_dirty(page));
|
||||
|
||||
pagefault_disable();
|
||||
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
|
||||
idx += in_interrupt() ? FIX_N_COLOURS : 0;
|
||||
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
|
||||
pte = mk_pte(page, prot);
|
||||
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
||||
entrylo = pte.pte_high;
|
||||
#else
|
||||
entrylo = pte_to_entrylo(pte_val(pte));
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = read_c0_entryhi();
|
||||
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
|
||||
write_c0_entrylo0(entrylo);
|
||||
write_c0_entrylo1(entrylo);
|
||||
tlbidx = read_c0_wired();
|
||||
write_c0_wired(tlbidx + 1);
|
||||
write_c0_index(tlbidx);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
|
||||
void *kmap_coherent(struct page *page, unsigned long addr)
|
||||
{
|
||||
return __kmap_pgprot(page, addr, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void *kmap_noncoherent(struct page *page, unsigned long addr)
|
||||
{
|
||||
return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
|
||||
}
|
||||
|
||||
void kunmap_coherent(void)
|
||||
{
|
||||
unsigned int wired;
|
||||
unsigned long flags, old_ctx;
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = read_c0_entryhi();
|
||||
wired = read_c0_wired() - 1;
|
||||
write_c0_wired(wired);
|
||||
write_c0_index(wired);
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
void *vfrom, *vto;
|
||||
|
||||
vto = kmap_atomic(to);
|
||||
if (cpu_has_dc_aliases &&
|
||||
page_mapped(from) && !Page_dcache_dirty(from)) {
|
||||
vfrom = kmap_coherent(from, vaddr);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_coherent();
|
||||
} else {
|
||||
vfrom = kmap_atomic(from);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_atomic(vfrom);
|
||||
}
|
||||
if ((!cpu_has_ic_fills_f_dc) ||
|
||||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
|
||||
flush_data_cache_page((unsigned long)vto);
|
||||
kunmap_atomic(vto);
|
||||
/* Make sure this page is cleared on other CPU's too before using it */
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
if (cpu_has_dc_aliases &&
|
||||
page_mapped(page) && !Page_dcache_dirty(page)) {
|
||||
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(vto, src, len);
|
||||
kunmap_coherent();
|
||||
} else {
|
||||
memcpy(dst, src, len);
|
||||
if (cpu_has_dc_aliases)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
if (cpu_has_dc_aliases &&
|
||||
page_mapped(page) && !Page_dcache_dirty(page)) {
|
||||
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(dst, vfrom, len);
|
||||
kunmap_coherent();
|
||||
} else {
|
||||
memcpy(dst, src, len);
|
||||
if (cpu_has_dc_aliases)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(copy_from_user_page);
|
||||
|
||||
void __init fixrange_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
int i, j, k;
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = start;
|
||||
i = __pgd_offset(vaddr);
|
||||
j = __pud_offset(vaddr);
|
||||
k = __pmd_offset(vaddr);
|
||||
pgd = pgd_base + i;
|
||||
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
|
||||
pud = (pud_t *)pgd;
|
||||
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
|
||||
pmd = (pmd_t *)pud;
|
||||
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
set_pmd(pmd, __pmd((unsigned long)pte));
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
j = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
int page_is_ram(unsigned long pagenr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long addr, end;
|
||||
|
||||
switch (boot_mem_map.map[i].type) {
|
||||
case BOOT_MEM_RAM:
|
||||
case BOOT_MEM_INIT_RAM:
|
||||
break;
|
||||
default:
|
||||
/* not usable memory */
|
||||
continue;
|
||||
}
|
||||
|
||||
addr = PFN_UP(boot_mem_map.map[i].addr);
|
||||
end = PFN_DOWN(boot_mem_map.map[i].addr +
|
||||
boot_mem_map.map[i].size);
|
||||
|
||||
if (pagenr >= addr && pagenr < end)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
unsigned long lastpfn __maybe_unused;
|
||||
|
||||
pagetable_init();
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
kmap_init();
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
lastpfn = max_low_pfn;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
|
||||
lastpfn = highend_pfn;
|
||||
|
||||
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
|
||||
printk(KERN_WARNING "This processor doesn't support highmem."
|
||||
" %ldk highmem ignored\n",
|
||||
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
|
||||
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
|
||||
lastpfn = max_low_pfn;
|
||||
}
|
||||
#endif
|
||||
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static struct kcore_list kcore_kseg0;
|
||||
#endif
|
||||
|
||||
static inline void mem_init_free_highmem(void)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long tmp;
|
||||
|
||||
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
if (!page_is_ram(tmp))
|
||||
SetPageReserved(page);
|
||||
else
|
||||
free_highmem_page(page);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned __weak platform_maar_init(unsigned num_maars)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void maar_init(void)
|
||||
{
|
||||
unsigned num_maars, used, i;
|
||||
|
||||
if (!cpu_has_maar)
|
||||
return;
|
||||
|
||||
/* Detect the number of MAARs */
|
||||
write_c0_maari(~0);
|
||||
back_to_back_c0_hazard();
|
||||
num_maars = read_c0_maari() + 1;
|
||||
|
||||
/* MAARs should be in pairs */
|
||||
WARN_ON(num_maars % 2);
|
||||
|
||||
/* Configure the required MAARs */
|
||||
used = platform_maar_init(num_maars / 2);
|
||||
|
||||
/* Disable any further MAARs */
|
||||
for (i = (used * 2); i < num_maars; i++) {
|
||||
write_c0_maari(i);
|
||||
back_to_back_c0_hazard();
|
||||
write_c0_maar(0);
|
||||
back_to_back_c0_hazard();
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
|
||||
#endif
|
||||
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
|
||||
#else
|
||||
max_mapnr = max_low_pfn;
|
||||
#endif
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
maar_init();
|
||||
free_all_bootmem();
|
||||
setup_zero_pages(); /* Setup zeroed pages. */
|
||||
mem_init_free_highmem();
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if ((unsigned long) &_text > (unsigned long) CKSEG0)
|
||||
/* The -4 is a hack so that user tools don't have to handle
|
||||
the overflow. */
|
||||
kclist_add(&kcore_kseg0, (void *) CKSEG0,
|
||||
0x80000000 - 4, KCORE_TEXT);
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
void *addr = phys_to_virt(PFN_PHYS(pfn));
|
||||
|
||||
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
free_reserved_page(page);
|
||||
}
|
||||
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
|
||||
|
||||
void __init_refok free_initmem(void)
|
||||
{
|
||||
prom_free_prom_memory();
|
||||
/*
|
||||
* Let the platform define a specific function to free the
|
||||
* init section since EVA may have used any possible mapping
|
||||
* between virtual and physical addresses.
|
||||
*/
|
||||
if (free_init_pages_eva)
|
||||
free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
|
||||
else
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
unsigned long pgd_current[NR_CPUS];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
|
||||
* are constants. So we use the variants from asm-offset.h until that gcc
|
||||
* will officially be retired.
|
||||
*
|
||||
* Align swapper_pg_dir in to 64K, allows its address to be loaded
|
||||
* with a single LUI instruction in the TLB handlers. If we used
|
||||
* __aligned(64K), its size would get rounded up to the alignment
|
||||
* size, and waste space. So we place it in its own section and align
|
||||
* it in the linker script.
|
||||
*/
|
||||
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
|
||||
#endif
|
||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
192
arch/mips/mm/ioremap.c
Normal file
192
arch/mips/mm/ioremap.c
Normal file
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* (C) Copyright 1995 1996 Linus Torvalds
|
||||
* (C) Copyright 2001, 2002 Ralf Baechle
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static inline void remap_area_pte(pte_t * pte, unsigned long address,
|
||||
phys_t size, phys_t phys_addr, unsigned long flags)
|
||||
{
|
||||
phys_t end;
|
||||
unsigned long pfn;
|
||||
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
|
||||
| __WRITEABLE | flags);
|
||||
|
||||
address &= ~PMD_MASK;
|
||||
end = address + size;
|
||||
if (end > PMD_SIZE)
|
||||
end = PMD_SIZE;
|
||||
BUG_ON(address >= end);
|
||||
pfn = phys_addr >> PAGE_SHIFT;
|
||||
do {
|
||||
if (!pte_none(*pte)) {
|
||||
printk("remap_area_pte: page already exists\n");
|
||||
BUG();
|
||||
}
|
||||
set_pte(pte, pfn_pte(pfn, pgprot));
|
||||
address += PAGE_SIZE;
|
||||
pfn++;
|
||||
pte++;
|
||||
} while (address && (address < end));
|
||||
}
|
||||
|
||||
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
|
||||
phys_t size, phys_t phys_addr, unsigned long flags)
|
||||
{
|
||||
phys_t end;
|
||||
|
||||
address &= ~PGDIR_MASK;
|
||||
end = address + size;
|
||||
if (end > PGDIR_SIZE)
|
||||
end = PGDIR_SIZE;
|
||||
phys_addr -= address;
|
||||
BUG_ON(address >= end);
|
||||
do {
|
||||
pte_t * pte = pte_alloc_kernel(pmd, address);
|
||||
if (!pte)
|
||||
return -ENOMEM;
|
||||
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
pmd++;
|
||||
} while (address && (address < end));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int remap_area_pages(unsigned long address, phys_t phys_addr,
|
||||
phys_t size, unsigned long flags)
|
||||
{
|
||||
int error;
|
||||
pgd_t * dir;
|
||||
unsigned long end = address + size;
|
||||
|
||||
phys_addr -= address;
|
||||
dir = pgd_offset(&init_mm, address);
|
||||
flush_cache_all();
|
||||
BUG_ON(address >= end);
|
||||
do {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
error = -ENOMEM;
|
||||
pud = pud_alloc(&init_mm, dir, address);
|
||||
if (!pud)
|
||||
break;
|
||||
pmd = pmd_alloc(&init_mm, pud, address);
|
||||
if (!pmd)
|
||||
break;
|
||||
if (remap_area_pmd(pmd, address, end - address,
|
||||
phys_addr + address, flags))
|
||||
break;
|
||||
error = 0;
|
||||
address = (address + PGDIR_SIZE) & PGDIR_MASK;
|
||||
dir++;
|
||||
} while (address && (address < end));
|
||||
flush_tlb_all();
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic mapping function (not visible outside):
|
||||
*/
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
|
||||
#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
|
||||
|
||||
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
|
||||
{
|
||||
struct vm_struct * area;
|
||||
unsigned long offset;
|
||||
phys_t last_addr;
|
||||
void * addr;
|
||||
|
||||
phys_addr = fixup_bigphys_addr(phys_addr, size);
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
last_addr = phys_addr + size - 1;
|
||||
if (!size || last_addr < phys_addr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Map uncached objects in the low 512mb of address space using KSEG1,
|
||||
* otherwise map using page tables.
|
||||
*/
|
||||
if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
|
||||
flags == _CACHE_UNCACHED)
|
||||
return (void __iomem *) CKSEG1ADDR(phys_addr);
|
||||
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
*/
|
||||
if (phys_addr < virt_to_phys(high_memory)) {
|
||||
char *t_addr, *t_end;
|
||||
struct page *page;
|
||||
|
||||
t_addr = __va(phys_addr);
|
||||
t_end = t_addr + (size - 1);
|
||||
|
||||
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
|
||||
if(!PageReserved(page))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
area = get_vm_area(size, VM_IOREMAP);
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = area->addr;
|
||||
if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
|
||||
vunmap(addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *) (offset + (char *)addr);
|
||||
}
|
||||
|
||||
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
|
||||
|
||||
void __iounmap(const volatile void __iomem *addr)
|
||||
{
|
||||
struct vm_struct *p;
|
||||
|
||||
if (IS_KSEG1(addr))
|
||||
return;
|
||||
|
||||
p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
|
||||
if (!p)
|
||||
printk(KERN_ERR "iounmap: bad address %p\n", addr);
|
||||
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
EXPORT_SYMBOL(__iounmap);
|
198
arch/mips/mm/mmap.c
Normal file
198
arch/mips/mm/mmap.c
Normal file
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2011 Wind River Systems,
|
||||
* written by Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||
EXPORT_SYMBOL(shm_align_mask);
|
||||
|
||||
/* gap between mmap and stack */
|
||||
#define MIN_GAP (128*1024*1024UL)
|
||||
#define MAX_GAP ((TASK_SIZE)/6*5)
|
||||
|
||||
static int mmap_is_legacy(void)
|
||||
{
|
||||
if (current->personality & ADDR_COMPAT_LAYOUT)
|
||||
return 1;
|
||||
|
||||
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
|
||||
return 1;
|
||||
|
||||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base(unsigned long rnd)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
if (gap < MIN_GAP)
|
||||
gap = MIN_GAP;
|
||||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
|
||||
}
|
||||
|
||||
#define COLOUR_ALIGN(addr, pgoff) \
|
||||
((((addr) + shm_align_mask) & ~shm_align_mask) + \
|
||||
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
|
||||
|
||||
enum mmap_allocation_direction {UP, DOWN};
|
||||
|
||||
static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
||||
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags, enum mmap_allocation_direction dir)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr = addr0;
|
||||
int do_color_align;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (unlikely(len > TASK_SIZE))
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
|
||||
if (TASK_SIZE - len < addr)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We do not accept a shared mapping if it would violate
|
||||
* cache aliasing constraints.
|
||||
*/
|
||||
if ((flags & MAP_SHARED) &&
|
||||
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = 1;
|
||||
|
||||
/* requesting a specific address */
|
||||
if (addr) {
|
||||
if (do_color_align)
|
||||
addr = COLOUR_ALIGN(addr, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
|
||||
info.length = len;
|
||||
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
|
||||
if (dir == DOWN) {
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = mm->mmap_base;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
if (!(addr & ~PAGE_MASK))
|
||||
return addr;
|
||||
|
||||
/*
|
||||
* A failed mmap() very likely causes application failure,
|
||||
* so fall back to the bottom-up function here. This scenario
|
||||
* can happen with large stack limits and large mmap()
|
||||
* allocations.
|
||||
*/
|
||||
}
|
||||
|
||||
info.flags = 0;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
return arch_get_unmapped_area_common(filp,
|
||||
addr0, len, pgoff, flags, UP);
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no need to export this but sched.h declares the function as
|
||||
* extern so making it static here results in an error.
|
||||
*/
|
||||
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
||||
unsigned long addr0, unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
return arch_get_unmapped_area_common(filp,
|
||||
addr0, len, pgoff, flags, DOWN);
|
||||
}
|
||||
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long random_factor = 0UL;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE) {
|
||||
random_factor = get_random_int();
|
||||
random_factor = random_factor << PAGE_SHIFT;
|
||||
if (TASK_IS_32BIT_ADDR)
|
||||
random_factor &= 0xfffffful;
|
||||
else
|
||||
random_factor &= 0xffffffful;
|
||||
}
|
||||
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base(random_factor);
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long brk_rnd(void)
|
||||
{
|
||||
unsigned long rnd = get_random_int();
|
||||
|
||||
rnd = rnd << PAGE_SHIFT;
|
||||
/* 8MB for 32bit, 256MB for 64bit */
|
||||
if (TASK_IS_32BIT_ADDR)
|
||||
rnd = rnd & 0x7ffffful;
|
||||
else
|
||||
rnd = rnd & 0xffffffful;
|
||||
|
||||
return rnd;
|
||||
}
|
||||
|
||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long base = mm->brk;
|
||||
unsigned long ret;
|
||||
|
||||
ret = PAGE_ALIGN(base + brk_rnd());
|
||||
|
||||
if (ret < mm->brk)
|
||||
return mm->brk;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __virt_addr_valid(const volatile void *kaddr)
|
||||
{
|
||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
50
arch/mips/mm/page-funcs.S
Normal file
50
arch/mips/mm/page-funcs.S
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Micro-assembler generated clear_page/copy_page functions.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
#define cpu_clear_page_function_name clear_page_cpu
|
||||
#define cpu_copy_page_function_name copy_page_cpu
|
||||
#else
|
||||
#define cpu_clear_page_function_name clear_page
|
||||
#define cpu_copy_page_function_name copy_page
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x058 bytes
|
||||
* R4600 v1.7: 0x05c bytes
|
||||
* R4600 v2.0: 0x060 bytes
|
||||
* With prefetching, 16 word strides 0x120 bytes
|
||||
*/
|
||||
EXPORT(__clear_page_start)
|
||||
LEAF(cpu_clear_page_function_name)
|
||||
1: j 1b /* Dummy, will be replaced. */
|
||||
.space 288
|
||||
END(cpu_clear_page_function_name)
|
||||
EXPORT(__clear_page_end)
|
||||
|
||||
/*
|
||||
* Maximum sizes:
|
||||
*
|
||||
* R4000 128 bytes S-cache: 0x11c bytes
|
||||
* R4600 v1.7: 0x080 bytes
|
||||
* R4600 v2.0: 0x07c bytes
|
||||
* With prefetching, 16 word strides 0x540 bytes
|
||||
*/
|
||||
EXPORT(__copy_page_start)
|
||||
LEAF(cpu_copy_page_function_name)
|
||||
1: j 1b /* Dummy, will be replaced. */
|
||||
.space 1344
|
||||
END(cpu_copy_page_function_name)
|
||||
EXPORT(__copy_page_end)
|
660
arch/mips/mm/page.c
Normal file
660
arch/mips/mm/page.c
Normal file
|
@ -0,0 +1,660 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2007 Maciej W. Rozycki
|
||||
* Copyright (C) 2008 Thiemo Seufer
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/cpu-type.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/prefetch.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/war.h>
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
#include <asm/sibyte/sb1250.h>
|
||||
#include <asm/sibyte/sb1250_regs.h>
|
||||
#include <asm/sibyte/sb1250_dma.h>
|
||||
#endif
|
||||
|
||||
#include <asm/uasm.h>
|
||||
|
||||
/* Registers used in the assembled routines. */
|
||||
#define ZERO 0
|
||||
#define AT 2
|
||||
#define A0 4
|
||||
#define A1 5
|
||||
#define A2 6
|
||||
#define T0 8
|
||||
#define T1 9
|
||||
#define T2 10
|
||||
#define T3 11
|
||||
#define T9 25
|
||||
#define RA 31
|
||||
|
||||
/* Handle labels (which must be positive integers). */
|
||||
enum label_id {
|
||||
label_clear_nopref = 1,
|
||||
label_clear_pref,
|
||||
label_copy_nopref,
|
||||
label_copy_pref_both,
|
||||
label_copy_pref_store,
|
||||
};
|
||||
|
||||
UASM_L_LA(_clear_nopref)
|
||||
UASM_L_LA(_clear_pref)
|
||||
UASM_L_LA(_copy_nopref)
|
||||
UASM_L_LA(_copy_pref_both)
|
||||
UASM_L_LA(_copy_pref_store)
|
||||
|
||||
/* We need one branch and therefore one relocation per target label. */
|
||||
static struct uasm_label labels[5];
|
||||
static struct uasm_reloc relocs[5];
|
||||
|
||||
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
|
||||
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
|
||||
|
||||
static int pref_bias_clear_store;
|
||||
static int pref_bias_copy_load;
|
||||
static int pref_bias_copy_store;
|
||||
|
||||
static u32 pref_src_mode;
|
||||
static u32 pref_dst_mode;
|
||||
|
||||
static int clear_word_size;
|
||||
static int copy_word_size;
|
||||
|
||||
static int half_clear_loop_size;
|
||||
static int half_copy_loop_size;
|
||||
|
||||
static int cache_line_size;
|
||||
#define cache_line_mask() (cache_line_size - 1)
|
||||
|
||||
static inline void
|
||||
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
|
||||
{
|
||||
if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
|
||||
if (off > 0x7fff) {
|
||||
uasm_i_lui(buf, T9, uasm_rel_hi(off));
|
||||
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
|
||||
} else
|
||||
uasm_i_addiu(buf, T9, ZERO, off);
|
||||
uasm_i_daddu(buf, reg1, reg2, T9);
|
||||
} else {
|
||||
if (off > 0x7fff) {
|
||||
uasm_i_lui(buf, T9, uasm_rel_hi(off));
|
||||
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
|
||||
UASM_i_ADDU(buf, reg1, reg2, T9);
|
||||
} else
|
||||
UASM_i_ADDIU(buf, reg1, reg2, off);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_prefetch_parameters(void)
|
||||
{
|
||||
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
|
||||
clear_word_size = 8;
|
||||
else
|
||||
clear_word_size = 4;
|
||||
|
||||
if (cpu_has_64bit_gp_regs)
|
||||
copy_word_size = 8;
|
||||
else
|
||||
copy_word_size = 4;
|
||||
|
||||
/*
|
||||
* The pref's used here are using "streaming" hints, which cause the
|
||||
* copied data to be kicked out of the cache sooner. A page copy often
|
||||
* ends up copying a lot more data than is commonly used, so this seems
|
||||
* to make sense in terms of reducing cache pollution, but I've no real
|
||||
* performance data to back this up.
|
||||
*/
|
||||
if (cpu_has_prefetch) {
|
||||
/*
|
||||
* XXX: Most prefetch bias values in here are based on
|
||||
* guesswork.
|
||||
*/
|
||||
cache_line_size = cpu_dcache_line_size();
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_R5500:
|
||||
case CPU_TX49XX:
|
||||
/* These processors only support the Pref_Load. */
|
||||
pref_bias_copy_load = 256;
|
||||
break;
|
||||
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_R14000:
|
||||
/*
|
||||
* Those values have been experimentally tuned for an
|
||||
* Origin 200.
|
||||
*/
|
||||
pref_bias_clear_store = 512;
|
||||
pref_bias_copy_load = 256;
|
||||
pref_bias_copy_store = 256;
|
||||
pref_src_mode = Pref_LoadStreamed;
|
||||
pref_dst_mode = Pref_StoreStreamed;
|
||||
break;
|
||||
|
||||
case CPU_SB1:
|
||||
case CPU_SB1A:
|
||||
pref_bias_clear_store = 128;
|
||||
pref_bias_copy_load = 128;
|
||||
pref_bias_copy_store = 128;
|
||||
/*
|
||||
* SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
|
||||
* hints are broken.
|
||||
*/
|
||||
if (current_cpu_type() == CPU_SB1 &&
|
||||
(current_cpu_data.processor_id & 0xff) < 0x02) {
|
||||
pref_src_mode = Pref_Load;
|
||||
pref_dst_mode = Pref_Store;
|
||||
} else {
|
||||
pref_src_mode = Pref_LoadStreamed;
|
||||
pref_dst_mode = Pref_StoreStreamed;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
pref_bias_clear_store = 128;
|
||||
pref_bias_copy_load = 256;
|
||||
pref_bias_copy_store = 128;
|
||||
pref_src_mode = Pref_LoadStreamed;
|
||||
pref_dst_mode = Pref_PrepareForStore;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (cpu_has_cache_cdex_s)
|
||||
cache_line_size = cpu_scache_line_size();
|
||||
else if (cpu_has_cache_cdex_p)
|
||||
cache_line_size = cpu_dcache_line_size();
|
||||
}
|
||||
/*
|
||||
* Too much unrolling will overflow the available space in
|
||||
* clear_space_array / copy_page_array.
|
||||
*/
|
||||
half_clear_loop_size = min(16 * clear_word_size,
|
||||
max(cache_line_size >> 1,
|
||||
4 * clear_word_size));
|
||||
half_copy_loop_size = min(16 * copy_word_size,
|
||||
max(cache_line_size >> 1,
|
||||
4 * copy_word_size));
|
||||
}
|
||||
|
||||
static void build_clear_store(u32 **buf, int off)
|
||||
{
|
||||
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
|
||||
uasm_i_sd(buf, ZERO, off, A0);
|
||||
} else {
|
||||
uasm_i_sw(buf, ZERO, off, A0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void build_clear_pref(u32 **buf, int off)
|
||||
{
|
||||
if (off & cache_line_mask())
|
||||
return;
|
||||
|
||||
if (pref_bias_clear_store) {
|
||||
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
|
||||
A0);
|
||||
} else if (cache_line_size == (half_clear_loop_size << 1)) {
|
||||
if (cpu_has_cache_cdex_s) {
|
||||
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
||||
} else if (cpu_has_cache_cdex_p) {
|
||||
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
}
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lw(buf, ZERO, ZERO, AT);
|
||||
|
||||
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern u32 __clear_page_start;
|
||||
extern u32 __clear_page_end;
|
||||
extern u32 __copy_page_start;
|
||||
extern u32 __copy_page_end;
|
||||
|
||||
void build_clear_page(void)
|
||||
{
|
||||
int off;
|
||||
u32 *buf = &__clear_page_start;
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
int i;
|
||||
static atomic_t run_once = ATOMIC_INIT(0);
|
||||
|
||||
if (atomic_xchg(&run_once, 1)) {
|
||||
return;
|
||||
}
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
set_prefetch_parameters();
|
||||
|
||||
/*
|
||||
* This algorithm makes the following assumptions:
|
||||
* - The prefetch bias is a multiple of 2 words.
|
||||
* - The prefetch bias is less than one page.
|
||||
*/
|
||||
BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
|
||||
BUG_ON(PAGE_SIZE < pref_bias_clear_store);
|
||||
|
||||
off = PAGE_SIZE - pref_bias_clear_store;
|
||||
if (off > 0xffff || !pref_bias_clear_store)
|
||||
pg_addiu(&buf, A2, A0, off);
|
||||
else
|
||||
uasm_i_ori(&buf, A2, A0, off);
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
|
||||
|
||||
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
|
||||
* cache_line_size : 0;
|
||||
while (off) {
|
||||
build_clear_pref(&buf, -off);
|
||||
off -= cache_line_size;
|
||||
}
|
||||
uasm_l_clear_pref(&l, buf);
|
||||
do {
|
||||
build_clear_pref(&buf, off);
|
||||
build_clear_store(&buf, off);
|
||||
off += clear_word_size;
|
||||
} while (off < half_clear_loop_size);
|
||||
pg_addiu(&buf, A0, A0, 2 * off);
|
||||
off = -off;
|
||||
do {
|
||||
build_clear_pref(&buf, off);
|
||||
if (off == -clear_word_size)
|
||||
uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
|
||||
build_clear_store(&buf, off);
|
||||
off += clear_word_size;
|
||||
} while (off < 0);
|
||||
|
||||
if (pref_bias_clear_store) {
|
||||
pg_addiu(&buf, A2, A0, pref_bias_clear_store);
|
||||
uasm_l_clear_nopref(&l, buf);
|
||||
off = 0;
|
||||
do {
|
||||
build_clear_store(&buf, off);
|
||||
off += clear_word_size;
|
||||
} while (off < half_clear_loop_size);
|
||||
pg_addiu(&buf, A0, A0, 2 * off);
|
||||
off = -off;
|
||||
do {
|
||||
if (off == -clear_word_size)
|
||||
uasm_il_bne(&buf, &r, A0, A2,
|
||||
label_clear_nopref);
|
||||
build_clear_store(&buf, off);
|
||||
off += clear_word_size;
|
||||
} while (off < 0);
|
||||
}
|
||||
|
||||
uasm_i_jr(&buf, RA);
|
||||
uasm_i_nop(&buf);
|
||||
|
||||
BUG_ON(buf > &__clear_page_end);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
pr_debug("Synthesized clear page handler (%u instructions).\n",
|
||||
(u32)(buf - &__clear_page_start));
|
||||
|
||||
pr_debug("\t.set push\n");
|
||||
pr_debug("\t.set noreorder\n");
|
||||
for (i = 0; i < (buf - &__clear_page_start); i++)
|
||||
pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
|
||||
pr_debug("\t.set pop\n");
|
||||
}
|
||||
|
||||
static void build_copy_load(u32 **buf, int reg, int off)
|
||||
{
|
||||
if (cpu_has_64bit_gp_regs) {
|
||||
uasm_i_ld(buf, reg, off, A1);
|
||||
} else {
|
||||
uasm_i_lw(buf, reg, off, A1);
|
||||
}
|
||||
}
|
||||
|
||||
static void build_copy_store(u32 **buf, int reg, int off)
|
||||
{
|
||||
if (cpu_has_64bit_gp_regs) {
|
||||
uasm_i_sd(buf, reg, off, A0);
|
||||
} else {
|
||||
uasm_i_sw(buf, reg, off, A0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void build_copy_load_pref(u32 **buf, int off)
|
||||
{
|
||||
if (off & cache_line_mask())
|
||||
return;
|
||||
|
||||
if (pref_bias_copy_load)
|
||||
uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
|
||||
}
|
||||
|
||||
static inline void build_copy_store_pref(u32 **buf, int off)
|
||||
{
|
||||
if (off & cache_line_mask())
|
||||
return;
|
||||
|
||||
if (pref_bias_copy_store) {
|
||||
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
|
||||
A0);
|
||||
} else if (cache_line_size == (half_copy_loop_size << 1)) {
|
||||
if (cpu_has_cache_cdex_s) {
|
||||
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
|
||||
} else if (cpu_has_cache_cdex_p) {
|
||||
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
uasm_i_nop(buf);
|
||||
}
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lw(buf, ZERO, ZERO, AT);
|
||||
|
||||
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void build_copy_page(void)
|
||||
{
|
||||
int off;
|
||||
u32 *buf = &__copy_page_start;
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
int i;
|
||||
static atomic_t run_once = ATOMIC_INIT(0);
|
||||
|
||||
if (atomic_xchg(&run_once, 1)) {
|
||||
return;
|
||||
}
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
set_prefetch_parameters();
|
||||
|
||||
/*
|
||||
* This algorithm makes the following assumptions:
|
||||
* - All prefetch biases are multiples of 8 words.
|
||||
* - The prefetch biases are less than one page.
|
||||
* - The store prefetch bias isn't greater than the load
|
||||
* prefetch bias.
|
||||
*/
|
||||
BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
|
||||
BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
|
||||
BUG_ON(PAGE_SIZE < pref_bias_copy_load);
|
||||
BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
|
||||
|
||||
off = PAGE_SIZE - pref_bias_copy_load;
|
||||
if (off > 0xffff || !pref_bias_copy_load)
|
||||
pg_addiu(&buf, A2, A0, off);
|
||||
else
|
||||
uasm_i_ori(&buf, A2, A0, off);
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
|
||||
|
||||
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
|
||||
cache_line_size : 0;
|
||||
while (off) {
|
||||
build_copy_load_pref(&buf, -off);
|
||||
off -= cache_line_size;
|
||||
}
|
||||
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
|
||||
cache_line_size : 0;
|
||||
while (off) {
|
||||
build_copy_store_pref(&buf, -off);
|
||||
off -= cache_line_size;
|
||||
}
|
||||
uasm_l_copy_pref_both(&l, buf);
|
||||
do {
|
||||
build_copy_load_pref(&buf, off);
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load_pref(&buf, off + copy_word_size);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load_pref(&buf, off + 3 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store_pref(&buf, off + copy_word_size);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 3 * copy_word_size);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < half_copy_loop_size);
|
||||
pg_addiu(&buf, A1, A1, 2 * off);
|
||||
pg_addiu(&buf, A0, A0, 2 * off);
|
||||
off = -off;
|
||||
do {
|
||||
build_copy_load_pref(&buf, off);
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load_pref(&buf, off + copy_word_size);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load_pref(&buf, off + 3 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store_pref(&buf, off + copy_word_size);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 3 * copy_word_size);
|
||||
if (off == -(4 * copy_word_size))
|
||||
uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < 0);
|
||||
|
||||
if (pref_bias_copy_load - pref_bias_copy_store) {
|
||||
pg_addiu(&buf, A2, A0,
|
||||
pref_bias_copy_load - pref_bias_copy_store);
|
||||
uasm_l_copy_pref_store(&l, buf);
|
||||
off = 0;
|
||||
do {
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store_pref(&buf, off + copy_word_size);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 3 * copy_word_size);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < half_copy_loop_size);
|
||||
pg_addiu(&buf, A1, A1, 2 * off);
|
||||
pg_addiu(&buf, A0, A0, 2 * off);
|
||||
off = -off;
|
||||
do {
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store_pref(&buf, off + copy_word_size);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 2 * copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_store_pref(&buf, off + 3 * copy_word_size);
|
||||
if (off == -(4 * copy_word_size))
|
||||
uasm_il_bne(&buf, &r, A2, A0,
|
||||
label_copy_pref_store);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < 0);
|
||||
}
|
||||
|
||||
if (pref_bias_copy_store) {
|
||||
pg_addiu(&buf, A2, A0, pref_bias_copy_store);
|
||||
uasm_l_copy_nopref(&l, buf);
|
||||
off = 0;
|
||||
do {
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < half_copy_loop_size);
|
||||
pg_addiu(&buf, A1, A1, 2 * off);
|
||||
pg_addiu(&buf, A0, A0, 2 * off);
|
||||
off = -off;
|
||||
do {
|
||||
build_copy_load(&buf, T0, off);
|
||||
build_copy_load(&buf, T1, off + copy_word_size);
|
||||
build_copy_load(&buf, T2, off + 2 * copy_word_size);
|
||||
build_copy_load(&buf, T3, off + 3 * copy_word_size);
|
||||
build_copy_store(&buf, T0, off);
|
||||
build_copy_store(&buf, T1, off + copy_word_size);
|
||||
build_copy_store(&buf, T2, off + 2 * copy_word_size);
|
||||
if (off == -(4 * copy_word_size))
|
||||
uasm_il_bne(&buf, &r, A2, A0,
|
||||
label_copy_nopref);
|
||||
build_copy_store(&buf, T3, off + 3 * copy_word_size);
|
||||
off += 4 * copy_word_size;
|
||||
} while (off < 0);
|
||||
}
|
||||
|
||||
uasm_i_jr(&buf, RA);
|
||||
uasm_i_nop(&buf);
|
||||
|
||||
BUG_ON(buf > &__copy_page_end);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
pr_debug("Synthesized copy page handler (%u instructions).\n",
|
||||
(u32)(buf - &__copy_page_start));
|
||||
|
||||
pr_debug("\t.set push\n");
|
||||
pr_debug("\t.set noreorder\n");
|
||||
for (i = 0; i < (buf - &__copy_page_start); i++)
|
||||
pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
|
||||
pr_debug("\t.set pop\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
|
||||
extern void clear_page_cpu(void *page);
|
||||
extern void copy_page_cpu(void *to, void *from);
|
||||
|
||||
/*
|
||||
* Pad descriptors to cacheline, since each is exclusively owned by a
|
||||
* particular CPU.
|
||||
*/
|
||||
struct dmadscr {
|
||||
u64 dscr_a;
|
||||
u64 dscr_b;
|
||||
u64 pad_a;
|
||||
u64 pad_b;
|
||||
} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
|
||||
|
||||
void sb1_dma_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DM_NUM_CHANNELS; i++) {
|
||||
const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
|
||||
V_DM_DSCR_BASE_RINGSZ(1);
|
||||
void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
|
||||
|
||||
__raw_writeq(base_val, base_reg);
|
||||
__raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
|
||||
__raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
|
||||
}
|
||||
}
|
||||
|
||||
void clear_page(void *page)
|
||||
{
|
||||
u64 to_phys = CPHYSADDR((unsigned long)page);
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/* if the page is not in KSEG0, use old way */
|
||||
if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
|
||||
return clear_page_cpu(page);
|
||||
|
||||
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
|
||||
M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
|
||||
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
|
||||
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
|
||||
|
||||
/*
|
||||
* Don't really want to do it this way, but there's no
|
||||
* reliable way to delay completion detection.
|
||||
*/
|
||||
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
|
||||
& M_DM_DSCR_BASE_INTERRUPT))
|
||||
;
|
||||
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
||||
}
|
||||
|
||||
void copy_page(void *to, void *from)
|
||||
{
|
||||
u64 from_phys = CPHYSADDR((unsigned long)from);
|
||||
u64 to_phys = CPHYSADDR((unsigned long)to);
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/* if any page is not in KSEG0, use old way */
|
||||
if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
|
||||
|| (long)KSEGX((unsigned long)from) != (long)CKSEG0)
|
||||
return copy_page_cpu(to, from);
|
||||
|
||||
page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
|
||||
M_DM_DSCRA_INTERRUPT;
|
||||
page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
|
||||
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
|
||||
|
||||
/*
|
||||
* Don't really want to do it this way, but there's no
|
||||
* reliable way to delay completion detection.
|
||||
*/
|
||||
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
|
||||
& M_DM_DSCR_BASE_INTERRUPT))
|
||||
;
|
||||
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
|
70
arch/mips/mm/pgtable-32.c
Normal file
70
arch/mips/mm/pgtable-32.c
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2003 by Ralf Baechle
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
void pgd_init(unsigned long page)
|
||||
{
|
||||
unsigned long *p = (unsigned long *) page;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
|
||||
p[i + 0] = (unsigned long) invalid_pte_table;
|
||||
p[i + 1] = (unsigned long) invalid_pte_table;
|
||||
p[i + 2] = (unsigned long) invalid_pte_table;
|
||||
p[i + 3] = (unsigned long) invalid_pte_table;
|
||||
p[i + 4] = (unsigned long) invalid_pte_table;
|
||||
p[i + 5] = (unsigned long) invalid_pte_table;
|
||||
p[i + 6] = (unsigned long) invalid_pte_table;
|
||||
p[i + 7] = (unsigned long) invalid_pte_table;
|
||||
}
|
||||
}
|
||||
|
||||
void __init pagetable_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
pgd_t *pgd_base;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
#endif
|
||||
|
||||
/* Initialize the entire pgd. */
|
||||
pgd_init((unsigned long)swapper_pg_dir);
|
||||
pgd_init((unsigned long)swapper_pg_dir
|
||||
+ sizeof(pgd_t) * USER_PTRS_PER_PGD);
|
||||
|
||||
pgd_base = swapper_pg_dir;
|
||||
|
||||
/*
|
||||
* Fixed mappings:
|
||||
*/
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Permanent kmaps:
|
||||
*/
|
||||
vaddr = PKMAP_BASE;
|
||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
|
||||
|
||||
pgd = swapper_pg_dir + __pgd_offset(vaddr);
|
||||
pud = pud_offset(pgd, vaddr);
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
pkmap_page_table = pte;
|
||||
#endif
|
||||
}
|
111
arch/mips/mm/pgtable-64.c
Normal file
111
arch/mips/mm/pgtable-64.c
Normal file
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999, 2000 by Silicon Graphics
|
||||
* Copyright (C) 2003 by Ralf Baechle
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void pgd_init(unsigned long page)
|
||||
{
|
||||
unsigned long *p, *end;
|
||||
unsigned long entry;
|
||||
|
||||
#ifdef __PAGETABLE_PMD_FOLDED
|
||||
entry = (unsigned long)invalid_pte_table;
|
||||
#else
|
||||
entry = (unsigned long)invalid_pmd_table;
|
||||
#endif
|
||||
|
||||
p = (unsigned long *) page;
|
||||
end = p + PTRS_PER_PGD;
|
||||
|
||||
do {
|
||||
p[0] = entry;
|
||||
p[1] = entry;
|
||||
p[2] = entry;
|
||||
p[3] = entry;
|
||||
p[4] = entry;
|
||||
p += 8;
|
||||
p[-3] = entry;
|
||||
p[-2] = entry;
|
||||
p[-1] = entry;
|
||||
} while (p != end);
|
||||
}
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
void pmd_init(unsigned long addr, unsigned long pagetable)
|
||||
{
|
||||
unsigned long *p, *end;
|
||||
|
||||
p = (unsigned long *) addr;
|
||||
end = p + PTRS_PER_PMD;
|
||||
|
||||
do {
|
||||
p[0] = pagetable;
|
||||
p[1] = pagetable;
|
||||
p[2] = pagetable;
|
||||
p[3] = pagetable;
|
||||
p[4] = pagetable;
|
||||
p += 8;
|
||||
p[-3] = pagetable;
|
||||
p[-2] = pagetable;
|
||||
p[-1] = pagetable;
|
||||
} while (p != end);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
||||
void pmdp_splitting_flush(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
if (!pmd_trans_splitting(*pmdp)) {
|
||||
pmd_t pmd = pmd_mksplitting(*pmdp);
|
||||
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __init pagetable_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
pgd_t *pgd_base;
|
||||
|
||||
/* Initialize the entire pgd. */
|
||||
pgd_init((unsigned long)swapper_pg_dir);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
|
||||
#endif
|
||||
pgd_base = swapper_pg_dir;
|
||||
/*
|
||||
* Fixed mappings:
|
||||
*/
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
|
||||
}
|
176
arch/mips/mm/sc-ip22.c
Normal file
176
arch/mips/mm/sc-ip22.c
Normal file
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* sc-ip22.c: Indy cache management functions.
|
||||
*
|
||||
* Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
|
||||
* derived from r4xx0.c by David S. Miller (davem@davemloft.net).
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/bcache.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/sgi/ip22.h>
|
||||
#include <asm/sgi/mc.h>
|
||||
|
||||
/* Secondary cache size in bytes, if present. */
|
||||
static unsigned long scache_size;
|
||||
|
||||
#undef DEBUG_CACHE
|
||||
|
||||
#define SC_SIZE 0x00080000
|
||||
#define SC_LINE 32
|
||||
#define CI_MASK (SC_SIZE - SC_LINE)
|
||||
#define SC_INDEX(n) ((n) & CI_MASK)
|
||||
|
||||
static inline void indy_sc_wipe(unsigned long first, unsigned long last)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
".set\tpush\t\t\t# indy_sc_wipe\n\t"
|
||||
".set\tnoreorder\n\t"
|
||||
".set\tmips3\n\t"
|
||||
".set\tnoat\n\t"
|
||||
"mfc0\t%2, $12\n\t"
|
||||
"li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
|
||||
"mtc0\t$1, $12\n\t"
|
||||
|
||||
"dli\t$1, 0x9000000080000000\n\t"
|
||||
"or\t%0, $1\t\t\t# first line to flush\n\t"
|
||||
"or\t%1, $1\t\t\t# last line to flush\n\t"
|
||||
".set\tat\n\t"
|
||||
|
||||
"1:\tsw\t$0, 0(%0)\n\t"
|
||||
"bne\t%0, %1, 1b\n\t"
|
||||
" daddu\t%0, 32\n\t"
|
||||
|
||||
"mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
|
||||
"nop; nop; nop; nop;\n\t"
|
||||
".set\tpop"
|
||||
: "=r" (first), "=r" (last), "=&r" (tmp)
|
||||
: "0" (first), "1" (last));
|
||||
}
|
||||
|
||||
static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long first_line, last_line;
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef DEBUG_CACHE
|
||||
printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
|
||||
#endif
|
||||
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
/* Which lines to flush? */
|
||||
first_line = SC_INDEX(addr);
|
||||
last_line = SC_INDEX(addr + size - 1);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (first_line <= last_line) {
|
||||
indy_sc_wipe(first_line, last_line);
|
||||
goto out;
|
||||
}
|
||||
|
||||
indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
|
||||
indy_sc_wipe(0, last_line);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void indy_sc_enable(void)
|
||||
{
|
||||
unsigned long addr, tmp1, tmp2;
|
||||
|
||||
/* This is really cool... */
|
||||
#ifdef DEBUG_CACHE
|
||||
printk("Enabling R4600 SCACHE\n");
|
||||
#endif
|
||||
__asm__ __volatile__(
|
||||
".set\tpush\n\t"
|
||||
".set\tnoreorder\n\t"
|
||||
".set\tmips3\n\t"
|
||||
"mfc0\t%2, $12\n\t"
|
||||
"nop; nop; nop; nop;\n\t"
|
||||
"li\t%1, 0x80\n\t"
|
||||
"mtc0\t%1, $12\n\t"
|
||||
"nop; nop; nop; nop;\n\t"
|
||||
"li\t%0, 0x1\n\t"
|
||||
"dsll\t%0, 31\n\t"
|
||||
"lui\t%1, 0x9000\n\t"
|
||||
"dsll32\t%1, 0\n\t"
|
||||
"or\t%0, %1, %0\n\t"
|
||||
"sb\t$0, 0(%0)\n\t"
|
||||
"mtc0\t$0, $12\n\t"
|
||||
"nop; nop; nop; nop;\n\t"
|
||||
"mtc0\t%2, $12\n\t"
|
||||
"nop; nop; nop; nop;\n\t"
|
||||
".set\tpop"
|
||||
: "=r" (tmp1), "=r" (tmp2), "=r" (addr));
|
||||
}
|
||||
|
||||
static void indy_sc_disable(void)
|
||||
{
|
||||
unsigned long tmp1, tmp2, tmp3;
|
||||
|
||||
#ifdef DEBUG_CACHE
|
||||
printk("Disabling R4600 SCACHE\n");
|
||||
#endif
|
||||
__asm__ __volatile__(
|
||||
".set\tpush\n\t"
|
||||
".set\tnoreorder\n\t"
|
||||
".set\tmips3\n\t"
|
||||
"li\t%0, 0x1\n\t"
|
||||
"dsll\t%0, 31\n\t"
|
||||
"lui\t%1, 0x9000\n\t"
|
||||
"dsll32\t%1, 0\n\t"
|
||||
"or\t%0, %1, %0\n\t"
|
||||
"mfc0\t%2, $12\n\t"
|
||||
"nop; nop; nop; nop\n\t"
|
||||
"li\t%1, 0x80\n\t"
|
||||
"mtc0\t%1, $12\n\t"
|
||||
"nop; nop; nop; nop\n\t"
|
||||
"sh\t$0, 0(%0)\n\t"
|
||||
"mtc0\t$0, $12\n\t"
|
||||
"nop; nop; nop; nop\n\t"
|
||||
"mtc0\t%2, $12\n\t"
|
||||
"nop; nop; nop; nop\n\t"
|
||||
".set\tpop"
|
||||
: "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
|
||||
}
|
||||
|
||||
static inline int __init indy_sc_probe(void)
|
||||
{
|
||||
unsigned int size = ip22_eeprom_read(&sgimc->eeprom, 17);
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
size <<= PAGE_SHIFT;
|
||||
printk(KERN_INFO "R4600/R5000 SCACHE size %dK, linesize 32 bytes.\n",
|
||||
size >> 10);
|
||||
scache_size = size;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* XXX Check with wje if the Indy caches can differenciate between
|
||||
writeback + invalidate and just invalidate. */
|
||||
static struct bcache_ops indy_sc_ops = {
|
||||
.bc_enable = indy_sc_enable,
|
||||
.bc_disable = indy_sc_disable,
|
||||
.bc_wback_inv = indy_sc_wback_invalidate,
|
||||
.bc_inv = indy_sc_wback_invalidate
|
||||
};
|
||||
|
||||
void indy_sc_init(void)
|
||||
{
|
||||
if (indy_sc_probe()) {
|
||||
indy_sc_enable();
|
||||
bcops = &indy_sc_ops;
|
||||
}
|
||||
}
|
148
arch/mips/mm/sc-mips.c
Normal file
148
arch/mips/mm/sc-mips.c
Normal file
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (C) 2006 Chris Dearman (chris@mips.com),
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cpu-type.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/bcache.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/r4kcache.h>
|
||||
|
||||
/*
|
||||
* MIPS32/MIPS64 L2 cache handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Writeback and invalidate the secondary cache before DMA.
|
||||
*/
|
||||
static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
blast_scache_range(addr, addr + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate the secondary cache before DMA.
|
||||
*/
|
||||
static void mips_sc_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long lsize = cpu_scache_line_size();
|
||||
unsigned long almask = ~(lsize - 1);
|
||||
|
||||
cache_op(Hit_Writeback_Inv_SD, addr & almask);
|
||||
cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
|
||||
blast_inv_scache_range(addr, addr + size);
|
||||
}
|
||||
|
||||
static void mips_sc_enable(void)
|
||||
{
|
||||
/* L2 cache is permanently enabled */
|
||||
}
|
||||
|
||||
static void mips_sc_disable(void)
|
||||
{
|
||||
/* L2 cache is permanently enabled */
|
||||
}
|
||||
|
||||
static struct bcache_ops mips_sc_ops = {
|
||||
.bc_enable = mips_sc_enable,
|
||||
.bc_disable = mips_sc_disable,
|
||||
.bc_wback_inv = mips_sc_wback_inv,
|
||||
.bc_inv = mips_sc_inv
|
||||
};
|
||||
|
||||
/*
|
||||
* Check if the L2 cache controller is activated on a particular platform.
|
||||
* MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
|
||||
* cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
|
||||
* cache being disabled. However there is no guarantee for this to be
|
||||
* true on all platforms. In an act of stupidity the spec defined bits
|
||||
* 12..15 as implementation defined so below function will eventually have
|
||||
* to be replaced by a platform specific probe.
|
||||
*/
|
||||
static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
|
||||
{
|
||||
unsigned int config2 = read_c0_config2();
|
||||
unsigned int tmp;
|
||||
|
||||
/* Check the bypass bit (L2B) */
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_34K:
|
||||
case CPU_74K:
|
||||
case CPU_1004K:
|
||||
case CPU_1074K:
|
||||
case CPU_INTERAPTIV:
|
||||
case CPU_PROAPTIV:
|
||||
case CPU_P5600:
|
||||
case CPU_BMIPS5000:
|
||||
if (config2 & (1 << 12))
|
||||
return 0;
|
||||
}
|
||||
|
||||
tmp = (config2 >> 4) & 0x0f;
|
||||
if (0 < tmp && tmp <= 7)
|
||||
c->scache.linesz = 2 << tmp;
|
||||
else
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int __init mips_sc_probe(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int config1, config2;
|
||||
unsigned int tmp;
|
||||
|
||||
/* Mark as not present until probe completed */
|
||||
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
|
||||
|
||||
/* Ignore anything but MIPSxx processors */
|
||||
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
|
||||
MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
|
||||
return 0;
|
||||
|
||||
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
|
||||
config1 = read_c0_config1();
|
||||
if (!(config1 & MIPS_CONF_M))
|
||||
return 0;
|
||||
|
||||
config2 = read_c0_config2();
|
||||
|
||||
if (!mips_sc_is_activated(c))
|
||||
return 0;
|
||||
|
||||
tmp = (config2 >> 8) & 0x0f;
|
||||
if (0 <= tmp && tmp <= 7)
|
||||
c->scache.sets = 64 << tmp;
|
||||
else
|
||||
return 0;
|
||||
|
||||
tmp = (config2 >> 0) & 0x0f;
|
||||
if (0 <= tmp && tmp <= 7)
|
||||
c->scache.ways = tmp + 1;
|
||||
else
|
||||
return 0;
|
||||
|
||||
c->scache.waysize = c->scache.sets * c->scache.linesz;
|
||||
c->scache.waybit = __ffs(c->scache.waysize);
|
||||
|
||||
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int mips_sc_init(void)
|
||||
{
|
||||
int found = mips_sc_probe();
|
||||
if (found) {
|
||||
mips_sc_enable();
|
||||
bcops = &mips_sc_ops;
|
||||
}
|
||||
return found;
|
||||
}
|
107
arch/mips/mm/sc-r5k.c
Normal file
107
arch/mips/mm/sc-r5k.c
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org),
|
||||
* derived from r4xx0.c by David S. Miller (davem@davemloft.net).
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/bcache.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/r4kcache.h>
|
||||
|
||||
/* Secondary cache size in bytes, if present. */
|
||||
static unsigned long scache_size;
|
||||
|
||||
#define SC_LINE 32
|
||||
#define SC_PAGE (128*SC_LINE)
|
||||
|
||||
static inline void blast_r5000_scache(void)
|
||||
{
|
||||
unsigned long start = INDEX_BASE;
|
||||
unsigned long end = start + scache_size;
|
||||
|
||||
while(start < end) {
|
||||
cache_op(R5K_Page_Invalidate_S, start);
|
||||
start += SC_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end, a;
|
||||
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
if (size >= scache_size) {
|
||||
blast_r5000_scache();
|
||||
return;
|
||||
}
|
||||
|
||||
/* On the R5000 secondary cache we cannot
|
||||
* invalidate less than a page at a time.
|
||||
* The secondary cache is physically indexed, write-through.
|
||||
*/
|
||||
a = addr & ~(SC_PAGE - 1);
|
||||
end = (addr + size - 1) & ~(SC_PAGE - 1);
|
||||
while (a <= end) {
|
||||
cache_op(R5K_Page_Invalidate_S, a);
|
||||
a += SC_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
static void r5k_sc_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
set_c0_config(R5K_CONF_SE);
|
||||
blast_r5000_scache();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void r5k_sc_disable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
blast_r5000_scache();
|
||||
clear_c0_config(R5K_CONF_SE);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline int __init r5k_sc_probe(void)
|
||||
{
|
||||
unsigned long config = read_c0_config();
|
||||
|
||||
if (config & CONF_SC)
|
||||
return(0);
|
||||
|
||||
scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
|
||||
|
||||
printk("R5000 SCACHE size %ldkB, linesize 32 bytes.\n",
|
||||
scache_size >> 10);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct bcache_ops r5k_sc_ops = {
|
||||
.bc_enable = r5k_sc_enable,
|
||||
.bc_disable = r5k_sc_disable,
|
||||
.bc_wback_inv = r5k_dma_cache_inv_sc,
|
||||
.bc_inv = r5k_dma_cache_inv_sc
|
||||
};
|
||||
|
||||
void r5k_sc_init(void)
|
||||
{
|
||||
if (r5k_sc_probe()) {
|
||||
r5k_sc_enable();
|
||||
bcops = &r5k_sc_ops;
|
||||
}
|
||||
}
|
269
arch/mips/mm/sc-rm7k.c
Normal file
269
arch/mips/mm/sc-rm7k.c
Normal file
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* sc-rm7k.c: RM7000 cache management functions.
|
||||
*
|
||||
* Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
|
||||
*/
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/bcache.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h> /* for run_uncached() */
|
||||
|
||||
/* Primary cache parameters. */
|
||||
#define sc_lsize 32
|
||||
#define tc_pagesize (32*128)
|
||||
|
||||
/* Secondary cache parameters. */
|
||||
#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
|
||||
|
||||
/* Tertiary cache parameters */
|
||||
#define tc_lsize 32
|
||||
|
||||
extern unsigned long icache_way_size, dcache_way_size;
|
||||
static unsigned long tcache_size;
|
||||
|
||||
#include <asm/r4kcache.h>
|
||||
|
||||
static int rm7k_tcache_init;
|
||||
|
||||
/*
|
||||
* Writeback and invalidate the primary cache dcache before DMA.
|
||||
* (XXX These need to be fixed ...)
|
||||
*/
|
||||
static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end, a;
|
||||
|
||||
pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
|
||||
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
blast_scache_range(addr, addr + size);
|
||||
|
||||
if (!rm7k_tcache_init)
|
||||
return;
|
||||
|
||||
a = addr & ~(tc_pagesize - 1);
|
||||
end = (addr + size - 1) & ~(tc_pagesize - 1);
|
||||
while(1) {
|
||||
invalidate_tcache_page(a); /* Page_Invalidate_T */
|
||||
if (a == end)
|
||||
break;
|
||||
a += tc_pagesize;
|
||||
}
|
||||
}
|
||||
|
||||
static void rm7k_sc_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end, a;
|
||||
|
||||
pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
|
||||
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
|
||||
blast_inv_scache_range(addr, addr + size);
|
||||
|
||||
if (!rm7k_tcache_init)
|
||||
return;
|
||||
|
||||
a = addr & ~(tc_pagesize - 1);
|
||||
end = (addr + size - 1) & ~(tc_pagesize - 1);
|
||||
while(1) {
|
||||
invalidate_tcache_page(a); /* Page_Invalidate_T */
|
||||
if (a == end)
|
||||
break;
|
||||
a += tc_pagesize;
|
||||
}
|
||||
}
|
||||
|
||||
static void blast_rm7k_tcache(void)
|
||||
{
|
||||
unsigned long start = CKSEG0ADDR(0);
|
||||
unsigned long end = start + tcache_size;
|
||||
|
||||
write_c0_taglo(0);
|
||||
|
||||
while (start < end) {
|
||||
cache_op(Page_Invalidate_T, start);
|
||||
start += tc_pagesize;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is executed in uncached address space.
|
||||
*/
|
||||
static void __rm7k_tc_enable(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
set_c0_config(RM7K_CONF_TE);
|
||||
|
||||
write_c0_taglo(0);
|
||||
write_c0_taghi(0);
|
||||
|
||||
for (i = 0; i < tcache_size; i += tc_lsize)
|
||||
cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
|
||||
}
|
||||
|
||||
static void rm7k_tc_enable(void)
|
||||
{
|
||||
if (read_c0_config() & RM7K_CONF_TE)
|
||||
return;
|
||||
|
||||
BUG_ON(tcache_size == 0);
|
||||
|
||||
run_uncached(__rm7k_tc_enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is executed in uncached address space.
|
||||
*/
|
||||
static void __rm7k_sc_enable(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
set_c0_config(RM7K_CONF_SE);
|
||||
|
||||
write_c0_taglo(0);
|
||||
write_c0_taghi(0);
|
||||
|
||||
for (i = 0; i < scache_size; i += sc_lsize)
|
||||
cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
|
||||
}
|
||||
|
||||
static void rm7k_sc_enable(void)
|
||||
{
|
||||
if (read_c0_config() & RM7K_CONF_SE)
|
||||
return;
|
||||
|
||||
pr_info("Enabling secondary cache...\n");
|
||||
run_uncached(__rm7k_sc_enable);
|
||||
|
||||
if (rm7k_tcache_init)
|
||||
rm7k_tc_enable();
|
||||
}
|
||||
|
||||
static void rm7k_tc_disable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
blast_rm7k_tcache();
|
||||
clear_c0_config(RM7K_CONF_TE);
|
||||
local_irq_save(flags);
|
||||
}
|
||||
|
||||
static void rm7k_sc_disable(void)
|
||||
{
|
||||
clear_c0_config(RM7K_CONF_SE);
|
||||
|
||||
if (rm7k_tcache_init)
|
||||
rm7k_tc_disable();
|
||||
}
|
||||
|
||||
static struct bcache_ops rm7k_sc_ops = {
|
||||
.bc_enable = rm7k_sc_enable,
|
||||
.bc_disable = rm7k_sc_disable,
|
||||
.bc_wback_inv = rm7k_sc_wback_inv,
|
||||
.bc_inv = rm7k_sc_inv
|
||||
};
|
||||
|
||||
/*
|
||||
* This is a probing function like the one found in c-r4k.c, we look for the
|
||||
* wrap around point with different addresses.
|
||||
*/
|
||||
static void __probe_tcache(void)
|
||||
{
|
||||
unsigned long flags, addr, begin, end, pow2;
|
||||
|
||||
begin = (unsigned long) &_stext;
|
||||
begin &= ~((8 * 1024 * 1024) - 1);
|
||||
end = begin + (8 * 1024 * 1024);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
set_c0_config(RM7K_CONF_TE);
|
||||
|
||||
/* Fill size-multiple lines with a valid tag */
|
||||
pow2 = (256 * 1024);
|
||||
for (addr = begin; addr <= end; addr = (begin + pow2)) {
|
||||
unsigned long *p = (unsigned long *) addr;
|
||||
__asm__ __volatile__("nop" : : "r" (*p));
|
||||
pow2 <<= 1;
|
||||
}
|
||||
|
||||
/* Load first line with a 0 tag, to check after */
|
||||
write_c0_taglo(0);
|
||||
write_c0_taghi(0);
|
||||
cache_op(Index_Store_Tag_T, begin);
|
||||
|
||||
/* Look for the wrap-around */
|
||||
pow2 = (512 * 1024);
|
||||
for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
|
||||
cache_op(Index_Load_Tag_T, addr);
|
||||
if (!read_c0_taglo())
|
||||
break;
|
||||
pow2 <<= 1;
|
||||
}
|
||||
|
||||
addr -= begin;
|
||||
tcache_size = addr;
|
||||
|
||||
clear_c0_config(RM7K_CONF_TE);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void rm7k_sc_init(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int config = read_c0_config();
|
||||
|
||||
if ((config & RM7K_CONF_SC))
|
||||
return;
|
||||
|
||||
c->scache.linesz = sc_lsize;
|
||||
c->scache.ways = 4;
|
||||
c->scache.waybit= __ffs(scache_size / c->scache.ways);
|
||||
c->scache.waysize = scache_size / c->scache.ways;
|
||||
c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
|
||||
printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
|
||||
(scache_size >> 10), sc_lsize);
|
||||
|
||||
if (!(config & RM7K_CONF_SE))
|
||||
rm7k_sc_enable();
|
||||
|
||||
bcops = &rm7k_sc_ops;
|
||||
|
||||
/*
|
||||
* While we're at it let's deal with the tertiary cache.
|
||||
*/
|
||||
|
||||
rm7k_tcache_init = 0;
|
||||
tcache_size = 0;
|
||||
|
||||
if (config & RM7K_CONF_TC)
|
||||
return;
|
||||
|
||||
/*
|
||||
* No efficient way to ask the hardware for the size of the tcache,
|
||||
* so must probe for it.
|
||||
*/
|
||||
run_uncached(__probe_tcache);
|
||||
rm7k_tc_enable();
|
||||
rm7k_tcache_init = 1;
|
||||
c->tcache.linesz = tc_lsize;
|
||||
c->tcache.ways = 1;
|
||||
pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
|
||||
}
|
39
arch/mips/mm/tlb-funcs.S
Normal file
39
arch/mips/mm/tlb-funcs.S
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Micro-assembler generated tlb handler functions.
|
||||
*
|
||||
* Copyright (C) 2013 Broadcom Corporation.
|
||||
*
|
||||
* Based on mm/page-funcs.c
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
#define FASTPATH_SIZE 128
|
||||
|
||||
EXPORT(tlbmiss_handler_setup_pgd_start)
|
||||
LEAF(tlbmiss_handler_setup_pgd)
|
||||
1: j 1b /* Dummy, will be replaced. */
|
||||
.space 64
|
||||
END(tlbmiss_handler_setup_pgd)
|
||||
EXPORT(tlbmiss_handler_setup_pgd_end)
|
||||
|
||||
LEAF(handle_tlbm)
|
||||
.space FASTPATH_SIZE * 4
|
||||
END(handle_tlbm)
|
||||
EXPORT(handle_tlbm_end)
|
||||
|
||||
LEAF(handle_tlbs)
|
||||
.space FASTPATH_SIZE * 4
|
||||
END(handle_tlbs)
|
||||
EXPORT(handle_tlbs_end)
|
||||
|
||||
LEAF(handle_tlbl)
|
||||
.space FASTPATH_SIZE * 4
|
||||
END(handle_tlbl)
|
||||
EXPORT(handle_tlbl_end)
|
283
arch/mips/mm/tlb-r3k.c
Normal file
283
arch/mips/mm/tlb-r3k.c
Normal file
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
* r2300.c: R2000 and R3000 specific mmu/cache code.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
||||
*
|
||||
* with a lot of changes to make this thing work for R3000s
|
||||
* Tx39XX R4k style caches added. HK
|
||||
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
|
||||
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
|
||||
* Copyright (C) 2002 Ralf Baechle
|
||||
* Copyright (C) 2002 Maciej W. Rozycki
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbmisc.h>
|
||||
#include <asm/isadep.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
#undef DEBUG_TLB
|
||||
|
||||
extern void build_tlb_refill_handler(void);
|
||||
|
||||
/* CP0 hazard avoidance. */
|
||||
#define BARRIER \
|
||||
__asm__ __volatile__( \
|
||||
".set push\n\t" \
|
||||
".set noreorder\n\t" \
|
||||
"nop\n\t" \
|
||||
".set pop\n\t")
|
||||
|
||||
int r3k_have_wired_reg; /* should be in cpu_data? */
|
||||
|
||||
/* TLB operations. */
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
int entry;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlball]");
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(0);
|
||||
entry = r3k_have_wired_reg ? read_c0_wired() : 8;
|
||||
for (; entry < current_cpu_data.tlbsize; entry++) {
|
||||
write_c0_index(entry << 8);
|
||||
write_c0_entryhi((entry | 0x80000) << 12);
|
||||
BARRIER;
|
||||
tlb_write_indexed();
|
||||
}
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != 0) {
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
|
||||
#endif
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != 0) {
|
||||
unsigned long size, flags;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
|
||||
cpu_context(cpu, mm) & ASID_MASK, start, end);
|
||||
#endif
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size <= current_cpu_data.tlbsize) {
|
||||
int oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
int newpid = cpu_context(cpu, mm) & ASID_MASK;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += PAGE_SIZE - 1;
|
||||
end &= PAGE_MASK;
|
||||
while (start < end) {
|
||||
int idx;
|
||||
|
||||
write_c0_entryhi(start | newpid);
|
||||
start += PAGE_SIZE; /* BARRIER */
|
||||
tlb_probe();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entryhi(KSEG0);
|
||||
if (idx < 0) /* BARRIER */
|
||||
continue;
|
||||
tlb_write_indexed();
|
||||
}
|
||||
write_c0_entryhi(oldpid);
|
||||
} else {
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size, flags;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
|
||||
#endif
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size <= current_cpu_data.tlbsize) {
|
||||
int pid = read_c0_entryhi();
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += PAGE_SIZE - 1;
|
||||
end &= PAGE_MASK;
|
||||
|
||||
while (start < end) {
|
||||
int idx;
|
||||
|
||||
write_c0_entryhi(start);
|
||||
start += PAGE_SIZE; /* BARRIER */
|
||||
tlb_probe();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entryhi(KSEG0);
|
||||
if (idx < 0) /* BARRIER */
|
||||
continue;
|
||||
tlb_write_indexed();
|
||||
}
|
||||
write_c0_entryhi(pid);
|
||||
} else {
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, vma->vm_mm) != 0) {
|
||||
unsigned long flags;
|
||||
int oldpid, newpid, idx;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
|
||||
#endif
|
||||
newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
|
||||
page &= PAGE_MASK;
|
||||
local_irq_save(flags);
|
||||
oldpid = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entryhi(page | newpid);
|
||||
BARRIER;
|
||||
tlb_probe();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entryhi(KSEG0);
|
||||
if (idx < 0) /* BARRIER */
|
||||
goto finish;
|
||||
tlb_write_indexed();
|
||||
|
||||
finish:
|
||||
write_c0_entryhi(oldpid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx, pid;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
|
||||
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
|
||||
(cpu_context(cpu, vma->vm_mm)), pid);
|
||||
}
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
address &= PAGE_MASK;
|
||||
write_c0_entryhi(address | pid);
|
||||
BARRIER;
|
||||
tlb_probe();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(pte_val(pte));
|
||||
write_c0_entryhi(address | pid);
|
||||
if (idx < 0) { /* BARRIER */
|
||||
tlb_write_random();
|
||||
} else {
|
||||
tlb_write_indexed();
|
||||
}
|
||||
write_c0_entryhi(pid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
unsigned long entryhi, unsigned long pagemask)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
static unsigned long wired = 0;
|
||||
|
||||
if (r3k_have_wired_reg) { /* TX39XX */
|
||||
unsigned long old_pagemask;
|
||||
unsigned long w;
|
||||
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
|
||||
entrylo0, entryhi, pagemask);
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
old_pagemask = read_c0_pagemask();
|
||||
w = read_c0_wired();
|
||||
write_c0_wired(w + 1);
|
||||
write_c0_index(w << 8);
|
||||
write_c0_pagemask(pagemask);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_entrylo0(entrylo0);
|
||||
BARRIER;
|
||||
tlb_write_indexed();
|
||||
|
||||
write_c0_entryhi(old_ctx);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
local_flush_tlb_all();
|
||||
local_irq_restore(flags);
|
||||
|
||||
} else if (wired < 8) {
|
||||
#ifdef DEBUG_TLB
|
||||
printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
|
||||
entrylo0, entryhi);
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
old_ctx = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(wired);
|
||||
wired++; /* BARRIER */
|
||||
tlb_write_indexed();
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_flush_tlb_all();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void tlb_init(void)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
|
||||
build_tlb_refill_handler();
|
||||
}
|
543
arch/mips/mm/tlb-r4k.c
Normal file
543
arch/mips/mm/tlb-r4k.c
Normal file
|
@ -0,0 +1,543 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
||||
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
|
||||
* Carsten Langgaard, carstenl@mips.com
|
||||
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-type.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbmisc.h>
|
||||
|
||||
extern void build_tlb_refill_handler(void);
|
||||
|
||||
/*
|
||||
* LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
|
||||
* unfortunately, itlb is not totally transparent to software.
|
||||
*/
|
||||
static inline void flush_itlb(void)
|
||||
{
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_LOONGSON2:
|
||||
case CPU_LOONGSON3:
|
||||
write_c0_diag(4);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void flush_itlb_vm(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_itlb();
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
int entry, ftlbhighset;
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
htw_stop();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
entry = read_c0_wired();
|
||||
|
||||
/* Blast 'em all away. */
|
||||
if (cpu_has_tlbinv) {
|
||||
if (current_cpu_data.tlbsizevtlb) {
|
||||
write_c0_index(0);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbinvf(); /* invalidate VTLB */
|
||||
}
|
||||
ftlbhighset = current_cpu_data.tlbsizevtlb +
|
||||
current_cpu_data.tlbsizeftlbsets;
|
||||
for (entry = current_cpu_data.tlbsizevtlb;
|
||||
entry < ftlbhighset;
|
||||
entry++) {
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbinvf(); /* invalidate one FTLB set */
|
||||
}
|
||||
} else {
|
||||
while (entry < current_cpu_data.tlbsize) {
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
htw_start();
|
||||
flush_itlb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_tlb_all);
|
||||
|
||||
/* All entries common to a mm share an asid. To effectively flush
|
||||
these entries, we just bump the asid. */
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != 0) {
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != 0) {
|
||||
unsigned long size, flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
start = round_down(start, PAGE_SIZE << 1);
|
||||
end = round_up(end, PAGE_SIZE << 1);
|
||||
size = (end - start) >> (PAGE_SHIFT + 1);
|
||||
if (size <= (current_cpu_data.tlbsizeftlbsets ?
|
||||
current_cpu_data.tlbsize / 8 :
|
||||
current_cpu_data.tlbsize / 2)) {
|
||||
int oldpid = read_c0_entryhi();
|
||||
int newpid = cpu_asid(cpu, mm);
|
||||
|
||||
htw_stop();
|
||||
while (start < end) {
|
||||
int idx;
|
||||
|
||||
write_c0_entryhi(start | newpid);
|
||||
start += (PAGE_SIZE << 1);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
if (idx < 0)
|
||||
continue;
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(oldpid);
|
||||
htw_start();
|
||||
} else {
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
flush_itlb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size, flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
size = (size + 1) >> 1;
|
||||
if (size <= (current_cpu_data.tlbsizeftlbsets ?
|
||||
current_cpu_data.tlbsize / 8 :
|
||||
current_cpu_data.tlbsize / 2)) {
|
||||
int pid = read_c0_entryhi();
|
||||
|
||||
start &= (PAGE_MASK << 1);
|
||||
end += ((PAGE_SIZE << 1) - 1);
|
||||
end &= (PAGE_MASK << 1);
|
||||
htw_stop();
|
||||
|
||||
while (start < end) {
|
||||
int idx;
|
||||
|
||||
write_c0_entryhi(start);
|
||||
start += (PAGE_SIZE << 1);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
if (idx < 0)
|
||||
continue;
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(pid);
|
||||
htw_start();
|
||||
} else {
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
flush_itlb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, vma->vm_mm) != 0) {
|
||||
unsigned long flags;
|
||||
int oldpid, newpid, idx;
|
||||
|
||||
newpid = cpu_asid(cpu, vma->vm_mm);
|
||||
page &= (PAGE_MASK << 1);
|
||||
local_irq_save(flags);
|
||||
oldpid = read_c0_entryhi();
|
||||
htw_stop();
|
||||
write_c0_entryhi(page | newpid);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
if (idx < 0)
|
||||
goto finish;
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
finish:
|
||||
write_c0_entryhi(oldpid);
|
||||
htw_start();
|
||||
flush_itlb_vm(vma);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This one is only used for pages with the global bit set so we don't care
|
||||
* much about the ASID.
|
||||
*/
|
||||
void local_flush_tlb_one(unsigned long page)
|
||||
{
|
||||
unsigned long flags;
|
||||
int oldpid, idx;
|
||||
|
||||
local_irq_save(flags);
|
||||
oldpid = read_c0_entryhi();
|
||||
htw_stop();
|
||||
page &= (PAGE_MASK << 1);
|
||||
write_c0_entryhi(page);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
if (idx >= 0) {
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
write_c0_entryhi(oldpid);
|
||||
htw_start();
|
||||
flush_itlb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* We will need multiple versions of update_mmu_cache(), one that just
|
||||
* updates the TLB with the new pte(s), and another which also checks
|
||||
* for the R4k "end of page" hardware bug and does the needy.
|
||||
*/
|
||||
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
int idx, pid;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
htw_stop();
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
address &= (PAGE_MASK << 1);
|
||||
write_c0_entryhi(address | pid);
|
||||
pgdp = pgd_offset(vma->vm_mm, address);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
pudp = pud_offset(pgdp, address);
|
||||
pmdp = pmd_offset(pudp, address);
|
||||
idx = read_c0_index();
|
||||
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
||||
/* this could be a huge page */
|
||||
if (pmd_huge(*pmdp)) {
|
||||
unsigned long lo;
|
||||
write_c0_pagemask(PM_HUGE_MASK);
|
||||
ptep = (pte_t *)pmdp;
|
||||
lo = pte_to_entrylo(pte_val(*ptep));
|
||||
write_c0_entrylo0(lo);
|
||||
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
|
||||
|
||||
mtc0_tlbw_hazard();
|
||||
if (idx < 0)
|
||||
tlb_write_random();
|
||||
else
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ptep = pte_offset_map(pmdp, address);
|
||||
|
||||
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
||||
write_c0_entrylo0(ptep->pte_high);
|
||||
ptep++;
|
||||
write_c0_entrylo1(ptep->pte_high);
|
||||
#else
|
||||
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
|
||||
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
|
||||
#endif
|
||||
mtc0_tlbw_hazard();
|
||||
if (idx < 0)
|
||||
tlb_write_random();
|
||||
else
|
||||
tlb_write_indexed();
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
htw_start();
|
||||
flush_itlb_vm(vma);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
unsigned long entryhi, unsigned long pagemask)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long wired;
|
||||
unsigned long old_pagemask;
|
||||
unsigned long old_ctx;
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
htw_stop();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
wired = read_c0_wired();
|
||||
write_c0_wired(wired + 1);
|
||||
write_c0_index(wired);
|
||||
tlbw_use_hazard(); /* What is the hazard here? */
|
||||
write_c0_pagemask(pagemask);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
write_c0_entryhi(old_ctx);
|
||||
tlbw_use_hazard(); /* What is the hazard here? */
|
||||
htw_start();
|
||||
write_c0_pagemask(old_pagemask);
|
||||
local_flush_tlb_all();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
||||
int __init has_transparent_hugepage(void)
|
||||
{
|
||||
unsigned int mask;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
write_c0_pagemask(PM_HUGE_MASK);
|
||||
back_to_back_c0_hazard();
|
||||
mask = read_c0_pagemask();
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return mask == PM_HUGE_MASK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/*
|
||||
* Used for loading TLB entries before trap_init() has started, when we
|
||||
* don't actually want to add a wired entry which remains throughout the
|
||||
* lifetime of the system
|
||||
*/
|
||||
|
||||
int temp_tlb_entry __cpuinitdata;
|
||||
|
||||
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
unsigned long entryhi, unsigned long pagemask)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
unsigned long wired;
|
||||
unsigned long old_pagemask;
|
||||
unsigned long old_ctx;
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
htw_stop();
|
||||
old_ctx = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
wired = read_c0_wired();
|
||||
if (--temp_tlb_entry < wired) {
|
||||
printk(KERN_WARNING
|
||||
"No TLB space left for add_temporary_entry\n");
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
write_c0_index(temp_tlb_entry);
|
||||
write_c0_pagemask(pagemask);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
write_c0_entryhi(old_ctx);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
htw_start();
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ntlb;
|
||||
static int __init set_ntlb(char *str)
|
||||
{
|
||||
get_option(&str, &ntlb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
/*
|
||||
* Configure TLB (for init or after a CPU has been powered off).
|
||||
*/
|
||||
static void r4k_tlb_configure(void)
|
||||
{
|
||||
/*
|
||||
* You should never change this register:
|
||||
* - On R4600 1.7 the tlbp never hits for pages smaller than
|
||||
* the value in the c0_pagemask register.
|
||||
* - The entire mm handling assumes the c0_pagemask register to
|
||||
* be set to fixed-size pages.
|
||||
*/
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
write_c0_wired(0);
|
||||
if (current_cpu_type() == CPU_R10000 ||
|
||||
current_cpu_type() == CPU_R12000 ||
|
||||
current_cpu_type() == CPU_R14000)
|
||||
write_c0_framemask(0);
|
||||
|
||||
if (cpu_has_rixi) {
|
||||
/*
|
||||
* Enable the no read, no exec bits, and enable large virtual
|
||||
* address.
|
||||
*/
|
||||
u32 pg = PG_RIE | PG_XIE;
|
||||
#ifdef CONFIG_64BIT
|
||||
pg |= PG_ELPA;
|
||||
#endif
|
||||
if (cpu_has_rixiex)
|
||||
pg |= PG_IEC;
|
||||
write_c0_pagegrain(pg);
|
||||
}
|
||||
|
||||
temp_tlb_entry = current_cpu_data.tlbsize - 1;
|
||||
|
||||
/* From this point on the ARC firmware is dead. */
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* Did I tell you that ARC SUCKS? */
|
||||
}
|
||||
|
||||
void tlb_init(void)
|
||||
{
|
||||
r4k_tlb_configure();
|
||||
|
||||
if (ntlb) {
|
||||
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
|
||||
int wired = current_cpu_data.tlbsize - ntlb;
|
||||
write_c0_wired(wired);
|
||||
write_c0_index(wired-1);
|
||||
printk("Restricting TLB to %d entries\n", ntlb);
|
||||
} else
|
||||
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
|
||||
}
|
||||
|
||||
build_tlb_refill_handler();
|
||||
}
|
||||
|
||||
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
||||
void *v)
|
||||
{
|
||||
switch (cmd) {
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
case CPU_PM_EXIT:
|
||||
r4k_tlb_configure();
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block r4k_tlb_pm_notifier_block = {
|
||||
.notifier_call = r4k_tlb_pm_notifier,
|
||||
};
|
||||
|
||||
static int __init r4k_tlb_init_pm(void)
|
||||
{
|
||||
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
|
||||
}
|
||||
arch_initcall(r4k_tlb_init_pm);
|
247
arch/mips/mm/tlb-r8k.c
Normal file
247
arch/mips/mm/tlb-r8k.c
Normal file
|
@ -0,0 +1,247 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
||||
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
|
||||
* Carsten Langgaard, carstenl@mips.com
|
||||
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
extern void build_tlb_refill_handler(void);
|
||||
|
||||
#define TFP_TLB_SIZE 384
|
||||
#define TFP_TLB_SET_SHIFT 7
|
||||
|
||||
/* CP0 hazard avoidance. */
|
||||
#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
|
||||
"nop; nop; nop; nop; nop; nop;\n\t" \
|
||||
".set reorder\n\t")
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
int entry;
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
write_c0_entrylo(0);
|
||||
|
||||
for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
|
||||
write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
|
||||
write_c0_vaddr(entry << PAGE_SHIFT);
|
||||
write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write();
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != 0)
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
int oldpid, newpid, size;
|
||||
|
||||
if (!cpu_context(cpu, mm))
|
||||
return;
|
||||
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
size = (size + 1) >> 1;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (size > TFP_TLB_SIZE / 2) {
|
||||
drop_mmu_context(mm, cpu);
|
||||
goto out_restore;
|
||||
}
|
||||
|
||||
oldpid = read_c0_entryhi();
|
||||
newpid = cpu_asid(cpu, mm);
|
||||
|
||||
write_c0_entrylo(0);
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += (PAGE_SIZE - 1);
|
||||
end &= PAGE_MASK;
|
||||
while (start < end) {
|
||||
signed long idx;
|
||||
|
||||
write_c0_vaddr(start);
|
||||
write_c0_entryhi(start);
|
||||
start += PAGE_SIZE;
|
||||
tlb_probe();
|
||||
idx = read_c0_tlbset();
|
||||
if (idx < 0)
|
||||
continue;
|
||||
|
||||
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
|
||||
tlb_write();
|
||||
}
|
||||
write_c0_entryhi(oldpid);
|
||||
|
||||
out_restore:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Usable for KV1 addresses only! */
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size, flags;
|
||||
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
size = (size + 1) >> 1;
|
||||
|
||||
if (size > TFP_TLB_SIZE / 2) {
|
||||
local_flush_tlb_all();
|
||||
return;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
write_c0_entrylo(0);
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end += (PAGE_SIZE - 1);
|
||||
end &= PAGE_MASK;
|
||||
while (start < end) {
|
||||
signed long idx;
|
||||
|
||||
write_c0_vaddr(start);
|
||||
write_c0_entryhi(start);
|
||||
start += PAGE_SIZE;
|
||||
tlb_probe();
|
||||
idx = read_c0_tlbset();
|
||||
if (idx < 0)
|
||||
continue;
|
||||
|
||||
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
|
||||
tlb_write();
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
int oldpid, newpid;
|
||||
signed long idx;
|
||||
|
||||
if (!cpu_context(cpu, vma->vm_mm))
|
||||
return;
|
||||
|
||||
newpid = cpu_asid(cpu, vma->vm_mm);
|
||||
page &= PAGE_MASK;
|
||||
local_irq_save(flags);
|
||||
oldpid = read_c0_entryhi();
|
||||
write_c0_vaddr(page);
|
||||
write_c0_entryhi(newpid);
|
||||
tlb_probe();
|
||||
idx = read_c0_tlbset();
|
||||
if (idx < 0)
|
||||
goto finish;
|
||||
|
||||
write_c0_entrylo(0);
|
||||
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
|
||||
tlb_write();
|
||||
|
||||
finish:
|
||||
write_c0_entryhi(oldpid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* We will need multiple versions of update_mmu_cache(), one that just
|
||||
* updates the TLB with the new pte(s), and another which also checks
|
||||
* for the R4k "end of page" hardware bug and does the needy.
|
||||
*/
|
||||
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags;
|
||||
pgd_t *pgdp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
int pid;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
|
||||
local_irq_save(flags);
|
||||
address &= PAGE_MASK;
|
||||
write_c0_vaddr(address);
|
||||
write_c0_entryhi(pid);
|
||||
pgdp = pgd_offset(vma->vm_mm, address);
|
||||
pmdp = pmd_offset(pgdp, address);
|
||||
ptep = pte_offset_map(pmdp, address);
|
||||
tlb_probe();
|
||||
|
||||
write_c0_entrylo(pte_val(*ptep++) >> 6);
|
||||
tlb_write();
|
||||
|
||||
write_c0_entryhi(pid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void probe_tlb(unsigned long config)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
|
||||
}
|
||||
|
||||
void tlb_init(void)
|
||||
{
|
||||
unsigned int config = read_c0_config();
|
||||
unsigned long status;
|
||||
|
||||
probe_tlb(config);
|
||||
|
||||
status = read_c0_status();
|
||||
status &= ~(ST0_UPS | ST0_KPS);
|
||||
#ifdef CONFIG_PAGE_SIZE_4KB
|
||||
status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
|
||||
#elif defined(CONFIG_PAGE_SIZE_8KB)
|
||||
status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
|
||||
#elif defined(CONFIG_PAGE_SIZE_16KB)
|
||||
status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
|
||||
#elif defined(CONFIG_PAGE_SIZE_64KB)
|
||||
status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
|
||||
#endif
|
||||
write_c0_status(status);
|
||||
|
||||
write_c0_wired(0);
|
||||
|
||||
local_flush_tlb_all();
|
||||
|
||||
build_tlb_refill_handler();
|
||||
}
|
27
arch/mips/mm/tlbex-fault.S
Normal file
27
arch/mips/mm/tlbex-fault.S
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999 Ralf Baechle
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
*/
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
.macro tlb_do_page_fault, write
|
||||
NESTED(tlb_do_page_fault_\write, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
MFC0 a2, CP0_BADVADDR
|
||||
KMODE
|
||||
move a0, sp
|
||||
REG_S a2, PT_BVADDR(sp)
|
||||
li a1, \write
|
||||
PTR_LA ra, ret_from_exception
|
||||
j do_page_fault
|
||||
END(tlb_do_page_fault_\write)
|
||||
.endm
|
||||
|
||||
tlb_do_page_fault 0
|
||||
tlb_do_page_fault 1
|
2368
arch/mips/mm/tlbex.c
Normal file
2368
arch/mips/mm/tlbex.c
Normal file
File diff suppressed because it is too large
Load diff
235
arch/mips/mm/uasm-micromips.c
Normal file
235
arch/mips/mm/uasm-micromips.c
Normal file
|
@ -0,0 +1,235 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* A small micro-assembler. It is intentionally kept simple, does only
|
||||
* support a subset of instructions, and does not try to hide pipeline
|
||||
* effects like branch delay slots.
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
|
||||
* Copyright (C) 2005, 2007 Maciej W. Rozycki
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/inst.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/bugs.h>
|
||||
#define UASM_ISA _UASM_ISA_MICROMIPS
|
||||
#include <asm/uasm.h>
|
||||
|
||||
#define RS_MASK 0x1f
|
||||
#define RS_SH 16
|
||||
#define RT_MASK 0x1f
|
||||
#define RT_SH 21
|
||||
#define SCIMM_MASK 0x3ff
|
||||
#define SCIMM_SH 16
|
||||
|
||||
/* This macro sets the non-variable bits of an instruction. */
|
||||
#define M(a, b, c, d, e, f) \
|
||||
((a) << OP_SH \
|
||||
| (b) << RT_SH \
|
||||
| (c) << RS_SH \
|
||||
| (d) << RD_SH \
|
||||
| (e) << RE_SH \
|
||||
| (f) << FUNC_SH)
|
||||
|
||||
/* Define these when we are not the ISA the kernel is being compiled with. */
|
||||
#ifndef CONFIG_CPU_MICROMIPS
|
||||
#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
|
||||
#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
|
||||
#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
|
||||
#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
|
||||
#endif
|
||||
|
||||
#include "uasm.c"
|
||||
|
||||
static struct insn insn_table_MM[] = {
|
||||
{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
|
||||
{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||
{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
|
||||
{ insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
||||
{ insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_beql, 0, 0 },
|
||||
{ insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bgezl, 0, 0 },
|
||||
{ insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bltzl, 0, 0 },
|
||||
{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
|
||||
{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
|
||||
{ insn_daddu, 0, 0 },
|
||||
{ insn_daddiu, 0, 0 },
|
||||
{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
|
||||
{ insn_dmfc0, 0, 0 },
|
||||
{ insn_dmtc0, 0, 0 },
|
||||
{ insn_dsll, 0, 0 },
|
||||
{ insn_dsll32, 0, 0 },
|
||||
{ insn_dsra, 0, 0 },
|
||||
{ insn_dsrl, 0, 0 },
|
||||
{ insn_dsrl32, 0, 0 },
|
||||
{ insn_drotr, 0, 0 },
|
||||
{ insn_drotr32, 0, 0 },
|
||||
{ insn_dsubu, 0, 0 },
|
||||
{ insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
|
||||
{ insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
|
||||
{ insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
|
||||
{ insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
|
||||
{ insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
|
||||
{ insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS },
|
||||
{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
|
||||
{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||
{ insn_ld, 0, 0 },
|
||||
{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
|
||||
{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
|
||||
{ insn_lld, 0, 0 },
|
||||
{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
|
||||
{ insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||
{ insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
|
||||
{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
|
||||
{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
|
||||
{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
|
||||
{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
||||
{ insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
|
||||
{ insn_rfe, 0, 0 },
|
||||
{ insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
|
||||
{ insn_scd, 0, 0 },
|
||||
{ insn_sd, 0, 0 },
|
||||
{ insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
|
||||
{ insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
|
||||
{ insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
|
||||
{ insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||
{ insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
|
||||
{ insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
|
||||
{ insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
|
||||
{ insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD },
|
||||
{ insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
|
||||
{ insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
|
||||
{ insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||
{ insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },
|
||||
{ insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
|
||||
{ insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
|
||||
{ insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
|
||||
{ insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
|
||||
{ insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM },
|
||||
{ insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS },
|
||||
{ insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
|
||||
{ insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
||||
{ insn_dins, 0, 0 },
|
||||
{ insn_dinsm, 0, 0 },
|
||||
{ insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
|
||||
{ insn_bbit0, 0, 0 },
|
||||
{ insn_bbit1, 0, 0 },
|
||||
{ insn_lwx, 0, 0 },
|
||||
{ insn_ldx, 0, 0 },
|
||||
{ insn_invalid, 0, 0 }
|
||||
};
|
||||
|
||||
#undef M
|
||||
|
||||
static inline u32 build_bimm(s32 arg)
|
||||
{
|
||||
WARN(arg > 0xffff || arg < -0x10000,
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
|
||||
|
||||
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
|
||||
}
|
||||
|
||||
static inline u32 build_jimm(u32 arg)
|
||||
{
|
||||
|
||||
WARN(arg & ~((JIMM_MASK << 2) | 1),
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg >> 1) & JIMM_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* The order of opcode arguments is implicitly left to right,
|
||||
* starting with RS and ending with FUNC or IMM.
|
||||
*/
|
||||
static void build_insn(u32 **buf, enum opcode opc, ...)
|
||||
{
|
||||
struct insn *ip = NULL;
|
||||
unsigned int i;
|
||||
va_list ap;
|
||||
u32 op;
|
||||
|
||||
for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
|
||||
if (insn_table_MM[i].opcode == opc) {
|
||||
ip = &insn_table_MM[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
|
||||
panic("Unsupported Micro-assembler instruction %d", opc);
|
||||
|
||||
op = ip->match;
|
||||
va_start(ap, opc);
|
||||
if (ip->fields & RS) {
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0)
|
||||
op |= build_rt(va_arg(ap, u32));
|
||||
else
|
||||
op |= build_rs(va_arg(ap, u32));
|
||||
}
|
||||
if (ip->fields & RT) {
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0)
|
||||
op |= build_rs(va_arg(ap, u32));
|
||||
else
|
||||
op |= build_rt(va_arg(ap, u32));
|
||||
}
|
||||
if (ip->fields & RD)
|
||||
op |= build_rd(va_arg(ap, u32));
|
||||
if (ip->fields & RE)
|
||||
op |= build_re(va_arg(ap, u32));
|
||||
if (ip->fields & SIMM)
|
||||
op |= build_simm(va_arg(ap, s32));
|
||||
if (ip->fields & UIMM)
|
||||
op |= build_uimm(va_arg(ap, u32));
|
||||
if (ip->fields & BIMM)
|
||||
op |= build_bimm(va_arg(ap, s32));
|
||||
if (ip->fields & JIMM)
|
||||
op |= build_jimm(va_arg(ap, u32));
|
||||
if (ip->fields & FUNC)
|
||||
op |= build_func(va_arg(ap, u32));
|
||||
if (ip->fields & SET)
|
||||
op |= build_set(va_arg(ap, u32));
|
||||
if (ip->fields & SCIMM)
|
||||
op |= build_scimm(va_arg(ap, u32));
|
||||
va_end(ap);
|
||||
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
**buf = ((op & 0xffff) << 16) | (op >> 16);
|
||||
#else
|
||||
**buf = op;
|
||||
#endif
|
||||
(*buf)++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
{
|
||||
long laddr = (long)lab->addr;
|
||||
long raddr = (long)rel->addr;
|
||||
|
||||
switch (rel->type) {
|
||||
case R_MIPS_PC16:
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
*rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
|
||||
#else
|
||||
*rel->addr |= build_bimm(laddr - (raddr + 4));
|
||||
#endif
|
||||
break;
|
||||
|
||||
default:
|
||||
panic("Unsupported Micro-assembler relocation %d",
|
||||
rel->type);
|
||||
}
|
||||
}
|
220
arch/mips/mm/uasm-mips.c
Normal file
220
arch/mips/mm/uasm-mips.c
Normal file
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* A small micro-assembler. It is intentionally kept simple, does only
|
||||
* support a subset of instructions, and does not try to hide pipeline
|
||||
* effects like branch delay slots.
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
|
||||
* Copyright (C) 2005, 2007 Maciej W. Rozycki
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/inst.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/bugs.h>
|
||||
#define UASM_ISA _UASM_ISA_CLASSIC
|
||||
#include <asm/uasm.h>
|
||||
|
||||
#define RS_MASK 0x1f
|
||||
#define RS_SH 21
|
||||
#define RT_MASK 0x1f
|
||||
#define RT_SH 16
|
||||
#define SCIMM_MASK 0xfffff
|
||||
#define SCIMM_SH 6
|
||||
|
||||
/* This macro sets the non-variable bits of an instruction. */
|
||||
#define M(a, b, c, d, e, f) \
|
||||
((a) << OP_SH \
|
||||
| (b) << RS_SH \
|
||||
| (c) << RT_SH \
|
||||
| (d) << RD_SH \
|
||||
| (e) << RE_SH \
|
||||
| (f) << FUNC_SH)
|
||||
|
||||
/* Define these when we are not the ISA the kernel is being compiled with. */
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
|
||||
#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
|
||||
#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
|
||||
#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
|
||||
#endif
|
||||
|
||||
#include "uasm.c"
|
||||
|
||||
static struct insn insn_table[] = {
|
||||
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
|
||||
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
|
||||
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
|
||||
{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
|
||||
{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
|
||||
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
|
||||
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
|
||||
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
|
||||
{ insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
|
||||
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
{ insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
|
||||
{ insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
|
||||
{ insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
|
||||
{ insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
|
||||
{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
|
||||
{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
|
||||
{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
|
||||
{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
|
||||
{ insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
|
||||
{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
|
||||
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
|
||||
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
||||
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
|
||||
{ insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
|
||||
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
||||
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
|
||||
{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
|
||||
{ insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
|
||||
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
|
||||
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
{ insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD },
|
||||
{ insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
|
||||
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
|
||||
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
|
||||
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
|
||||
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
|
||||
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
|
||||
{ insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
|
||||
{ insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
|
||||
{ insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD },
|
||||
{ insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
|
||||
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
|
||||
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
|
||||
{ insn_srlv, M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD },
|
||||
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
|
||||
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE },
|
||||
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
|
||||
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
|
||||
{ insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
|
||||
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
|
||||
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
|
||||
{ insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM },
|
||||
{ insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD },
|
||||
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
|
||||
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
|
||||
{ insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
|
||||
{ insn_invalid, 0, 0 }
|
||||
};
|
||||
|
||||
#undef M
|
||||
|
||||
static inline u32 build_bimm(s32 arg)
|
||||
{
|
||||
WARN(arg > 0x1ffff || arg < -0x20000,
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
|
||||
|
||||
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
|
||||
}
|
||||
|
||||
static inline u32 build_jimm(u32 arg)
|
||||
{
|
||||
WARN(arg & ~(JIMM_MASK << 2),
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg >> 2) & JIMM_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* The order of opcode arguments is implicitly left to right,
|
||||
* starting with RS and ending with FUNC or IMM.
|
||||
*/
|
||||
static void build_insn(u32 **buf, enum opcode opc, ...)
|
||||
{
|
||||
struct insn *ip = NULL;
|
||||
unsigned int i;
|
||||
va_list ap;
|
||||
u32 op;
|
||||
|
||||
for (i = 0; insn_table[i].opcode != insn_invalid; i++)
|
||||
if (insn_table[i].opcode == opc) {
|
||||
ip = &insn_table[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
|
||||
panic("Unsupported Micro-assembler instruction %d", opc);
|
||||
|
||||
op = ip->match;
|
||||
va_start(ap, opc);
|
||||
if (ip->fields & RS)
|
||||
op |= build_rs(va_arg(ap, u32));
|
||||
if (ip->fields & RT)
|
||||
op |= build_rt(va_arg(ap, u32));
|
||||
if (ip->fields & RD)
|
||||
op |= build_rd(va_arg(ap, u32));
|
||||
if (ip->fields & RE)
|
||||
op |= build_re(va_arg(ap, u32));
|
||||
if (ip->fields & SIMM)
|
||||
op |= build_simm(va_arg(ap, s32));
|
||||
if (ip->fields & UIMM)
|
||||
op |= build_uimm(va_arg(ap, u32));
|
||||
if (ip->fields & BIMM)
|
||||
op |= build_bimm(va_arg(ap, s32));
|
||||
if (ip->fields & JIMM)
|
||||
op |= build_jimm(va_arg(ap, u32));
|
||||
if (ip->fields & FUNC)
|
||||
op |= build_func(va_arg(ap, u32));
|
||||
if (ip->fields & SET)
|
||||
op |= build_set(va_arg(ap, u32));
|
||||
if (ip->fields & SCIMM)
|
||||
op |= build_scimm(va_arg(ap, u32));
|
||||
va_end(ap);
|
||||
|
||||
**buf = op;
|
||||
(*buf)++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
{
|
||||
long laddr = (long)lab->addr;
|
||||
long raddr = (long)rel->addr;
|
||||
|
||||
switch (rel->type) {
|
||||
case R_MIPS_PC16:
|
||||
*rel->addr |= build_bimm(laddr - (raddr + 4));
|
||||
break;
|
||||
|
||||
default:
|
||||
panic("Unsupported Micro-assembler relocation %d",
|
||||
rel->type);
|
||||
}
|
||||
}
|
581
arch/mips/mm/uasm.c
Normal file
581
arch/mips/mm/uasm.c
Normal file
|
@ -0,0 +1,581 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* A small micro-assembler. It is intentionally kept simple, does only
|
||||
* support a subset of instructions, and does not try to hide pipeline
|
||||
* effects like branch delay slots.
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
|
||||
* Copyright (C) 2005, 2007 Maciej W. Rozycki
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
enum fields {
|
||||
RS = 0x001,
|
||||
RT = 0x002,
|
||||
RD = 0x004,
|
||||
RE = 0x008,
|
||||
SIMM = 0x010,
|
||||
UIMM = 0x020,
|
||||
BIMM = 0x040,
|
||||
JIMM = 0x080,
|
||||
FUNC = 0x100,
|
||||
SET = 0x200,
|
||||
SCIMM = 0x400
|
||||
};
|
||||
|
||||
#define OP_MASK 0x3f
|
||||
#define OP_SH 26
|
||||
#define RD_MASK 0x1f
|
||||
#define RD_SH 11
|
||||
#define RE_MASK 0x1f
|
||||
#define RE_SH 6
|
||||
#define IMM_MASK 0xffff
|
||||
#define IMM_SH 0
|
||||
#define JIMM_MASK 0x3ffffff
|
||||
#define JIMM_SH 0
|
||||
#define FUNC_MASK 0x3f
|
||||
#define FUNC_SH 0
|
||||
#define SET_MASK 0x7
|
||||
#define SET_SH 0
|
||||
|
||||
enum opcode {
|
||||
insn_invalid,
|
||||
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
|
||||
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
|
||||
insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
|
||||
insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
|
||||
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
|
||||
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
|
||||
insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
|
||||
insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
|
||||
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
|
||||
insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
|
||||
insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
|
||||
insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
|
||||
insn_xor, insn_xori, insn_yield,
|
||||
};
|
||||
|
||||
struct insn {
|
||||
enum opcode opcode;
|
||||
u32 match;
|
||||
enum fields fields;
|
||||
};
|
||||
|
||||
static inline u32 build_rs(u32 arg)
|
||||
{
|
||||
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg & RS_MASK) << RS_SH;
|
||||
}
|
||||
|
||||
static inline u32 build_rt(u32 arg)
|
||||
{
|
||||
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg & RT_MASK) << RT_SH;
|
||||
}
|
||||
|
||||
static inline u32 build_rd(u32 arg)
|
||||
{
|
||||
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg & RD_MASK) << RD_SH;
|
||||
}
|
||||
|
||||
static inline u32 build_re(u32 arg)
|
||||
{
|
||||
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg & RE_MASK) << RE_SH;
|
||||
}
|
||||
|
||||
static inline u32 build_simm(s32 arg)
|
||||
{
|
||||
WARN(arg > 0x7fff || arg < -0x8000,
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return arg & 0xffff;
|
||||
}
|
||||
|
||||
static inline u32 build_uimm(u32 arg)
|
||||
{
|
||||
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return arg & IMM_MASK;
|
||||
}
|
||||
|
||||
static inline u32 build_scimm(u32 arg)
|
||||
{
|
||||
WARN(arg & ~SCIMM_MASK,
|
||||
KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return (arg & SCIMM_MASK) << SCIMM_SH;
|
||||
}
|
||||
|
||||
static inline u32 build_func(u32 arg)
|
||||
{
|
||||
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return arg & FUNC_MASK;
|
||||
}
|
||||
|
||||
static inline u32 build_set(u32 arg)
|
||||
{
|
||||
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
|
||||
|
||||
return arg & SET_MASK;
|
||||
}
|
||||
|
||||
static void build_insn(u32 **buf, enum opcode opc, ...);
|
||||
|
||||
#define I_u1u2u3(op) \
|
||||
Ip_u1u2u3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, a, b, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_s3s1s2(op) \
|
||||
Ip_s3s1s2(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, c, a); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1u3(op) \
|
||||
Ip_u2u1u3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u3u2u1(op) \
|
||||
Ip_u3u2u1(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, c, b, a); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u3u1u2(op) \
|
||||
Ip_u3u1u2(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, c, a); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u1u2s3(op) \
|
||||
Ip_u1u2s3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, a, b, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2s3u1(op) \
|
||||
Ip_u2s3u1(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, c, a, b); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1s3(op) \
|
||||
Ip_u2u1s3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1msbu3(op) \
|
||||
Ip_u2u1msbu3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a, c+d-1, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1msb32u3(op) \
|
||||
Ip_u2u1msbu3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a, c+d-33, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1msbdu3(op) \
|
||||
Ip_u2u1msbu3(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a, d-1, c); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u1u2(op) \
|
||||
Ip_u1u2(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, a, b); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u2u1(op) \
|
||||
Ip_u1u2(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, b, a); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u1s2(op) \
|
||||
Ip_u1s2(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, a, b); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_u1(op) \
|
||||
Ip_u1(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op, a); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
#define I_0(op) \
|
||||
Ip_0(op) \
|
||||
{ \
|
||||
build_insn(buf, insn##op); \
|
||||
} \
|
||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||
|
||||
I_u2u1s3(_addiu)
|
||||
I_u3u1u2(_addu)
|
||||
I_u2u1u3(_andi)
|
||||
I_u3u1u2(_and)
|
||||
I_u1u2s3(_beq)
|
||||
I_u1u2s3(_beql)
|
||||
I_u1s2(_bgez)
|
||||
I_u1s2(_bgezl)
|
||||
I_u1s2(_bltz)
|
||||
I_u1s2(_bltzl)
|
||||
I_u1u2s3(_bne)
|
||||
I_u2s3u1(_cache)
|
||||
I_u1u2u3(_dmfc0)
|
||||
I_u1u2u3(_dmtc0)
|
||||
I_u2u1s3(_daddiu)
|
||||
I_u3u1u2(_daddu)
|
||||
I_u1u2(_divu)
|
||||
I_u2u1u3(_dsll)
|
||||
I_u2u1u3(_dsll32)
|
||||
I_u2u1u3(_dsra)
|
||||
I_u2u1u3(_dsrl)
|
||||
I_u2u1u3(_dsrl32)
|
||||
I_u2u1u3(_drotr)
|
||||
I_u2u1u3(_drotr32)
|
||||
I_u3u1u2(_dsubu)
|
||||
I_0(_eret)
|
||||
I_u2u1msbdu3(_ext)
|
||||
I_u2u1msbu3(_ins)
|
||||
I_u1(_j)
|
||||
I_u1(_jal)
|
||||
I_u2u1(_jalr)
|
||||
I_u1(_jr)
|
||||
I_u2s3u1(_lb)
|
||||
I_u2s3u1(_ld)
|
||||
I_u2s3u1(_lh)
|
||||
I_u2s3u1(_ll)
|
||||
I_u2s3u1(_lld)
|
||||
I_u1s2(_lui)
|
||||
I_u2s3u1(_lw)
|
||||
I_u1u2u3(_mfc0)
|
||||
I_u1(_mfhi)
|
||||
I_u1(_mflo)
|
||||
I_u1u2u3(_mtc0)
|
||||
I_u3u1u2(_mul)
|
||||
I_u2u1u3(_ori)
|
||||
I_u3u1u2(_or)
|
||||
I_0(_rfe)
|
||||
I_u2s3u1(_sc)
|
||||
I_u2s3u1(_scd)
|
||||
I_u2s3u1(_sd)
|
||||
I_u2u1u3(_sll)
|
||||
I_u3u2u1(_sllv)
|
||||
I_s3s1s2(_slt)
|
||||
I_u2u1s3(_sltiu)
|
||||
I_u3u1u2(_sltu)
|
||||
I_u2u1u3(_sra)
|
||||
I_u2u1u3(_srl)
|
||||
I_u3u2u1(_srlv)
|
||||
I_u2u1u3(_rotr)
|
||||
I_u3u1u2(_subu)
|
||||
I_u2s3u1(_sw)
|
||||
I_u1(_sync)
|
||||
I_0(_tlbp)
|
||||
I_0(_tlbr)
|
||||
I_0(_tlbwi)
|
||||
I_0(_tlbwr)
|
||||
I_u1(_wait);
|
||||
I_u2u1(_wsbh)
|
||||
I_u3u1u2(_xor)
|
||||
I_u2u1u3(_xori)
|
||||
I_u2u1(_yield)
|
||||
I_u2u1msbu3(_dins);
|
||||
I_u2u1msb32u3(_dinsm);
|
||||
I_u1(_syscall);
|
||||
I_u1u2s3(_bbit0);
|
||||
I_u1u2s3(_bbit1);
|
||||
I_u3u1u2(_lwx)
|
||||
I_u3u1u2(_ldx)
|
||||
|
||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||
#include <asm/octeon/octeon.h>
|
||||
void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
|
||||
unsigned int c)
|
||||
{
|
||||
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
|
||||
/*
|
||||
* As per erratum Core-14449, replace prefetches 0-4,
|
||||
* 6-24 with 'pref 28'.
|
||||
*/
|
||||
build_insn(buf, insn_pref, c, 28, b);
|
||||
else
|
||||
build_insn(buf, insn_pref, c, a, b);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
|
||||
#else
|
||||
I_u2s3u1(_pref)
|
||||
#endif
|
||||
|
||||
/* Handle labels. */
|
||||
void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
|
||||
{
|
||||
(*lab)->addr = addr;
|
||||
(*lab)->lab = lid;
|
||||
(*lab)++;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
|
||||
|
||||
int ISAFUNC(uasm_in_compat_space_p)(long addr)
|
||||
{
|
||||
/* Is this address in 32bit compat space? */
|
||||
#ifdef CONFIG_64BIT
|
||||
return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
|
||||
|
||||
static int uasm_rel_highest(long val)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int uasm_rel_higher(long val)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ISAFUNC(uasm_rel_hi)(long val)
|
||||
{
|
||||
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
|
||||
|
||||
int ISAFUNC(uasm_rel_lo)(long val)
|
||||
{
|
||||
return ((val & 0xffff) ^ 0x8000) - 0x8000;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
|
||||
|
||||
void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
|
||||
{
|
||||
if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
|
||||
ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
|
||||
if (uasm_rel_higher(addr))
|
||||
ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
|
||||
if (ISAFUNC(uasm_rel_hi(addr))) {
|
||||
ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
|
||||
ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
|
||||
ISAFUNC(uasm_rel_hi)(addr));
|
||||
ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
|
||||
} else
|
||||
ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
|
||||
} else
|
||||
ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
|
||||
|
||||
void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
|
||||
{
|
||||
ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
|
||||
if (ISAFUNC(uasm_rel_lo(addr))) {
|
||||
if (!ISAFUNC(uasm_in_compat_space_p)(addr))
|
||||
ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
|
||||
ISAFUNC(uasm_rel_lo(addr)));
|
||||
else
|
||||
ISAFUNC(uasm_i_addiu)(buf, rs, rs,
|
||||
ISAFUNC(uasm_rel_lo(addr)));
|
||||
}
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
|
||||
|
||||
/* Handle relocations. */
|
||||
void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
|
||||
{
|
||||
(*rel)->addr = addr;
|
||||
(*rel)->type = R_MIPS_PC16;
|
||||
(*rel)->lab = lid;
|
||||
(*rel)++;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
|
||||
|
||||
static inline void __resolve_relocs(struct uasm_reloc *rel,
|
||||
struct uasm_label *lab);
|
||||
|
||||
void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
|
||||
struct uasm_label *lab)
|
||||
{
|
||||
struct uasm_label *l;
|
||||
|
||||
for (; rel->lab != UASM_LABEL_INVALID; rel++)
|
||||
for (l = lab; l->lab != UASM_LABEL_INVALID; l++)
|
||||
if (rel->lab == l->lab)
|
||||
__resolve_relocs(rel, l);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
|
||||
|
||||
void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
|
||||
long off)
|
||||
{
|
||||
for (; rel->lab != UASM_LABEL_INVALID; rel++)
|
||||
if (rel->addr >= first && rel->addr < end)
|
||||
rel->addr += off;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
|
||||
|
||||
void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
|
||||
long off)
|
||||
{
|
||||
for (; lab->lab != UASM_LABEL_INVALID; lab++)
|
||||
if (lab->addr >= first && lab->addr < end)
|
||||
lab->addr += off;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
|
||||
|
||||
void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
|
||||
u32 *first, u32 *end, u32 *target)
|
||||
{
|
||||
long off = (long)(target - first);
|
||||
|
||||
memcpy(target, first, (end - first) * sizeof(u32));
|
||||
|
||||
ISAFUNC(uasm_move_relocs(rel, first, end, off));
|
||||
ISAFUNC(uasm_move_labels(lab, first, end, off));
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
|
||||
|
||||
int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
|
||||
{
|
||||
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
|
||||
if (rel->addr == addr
|
||||
&& (rel->type == R_MIPS_PC16
|
||||
|| rel->type == R_MIPS_26))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
|
||||
|
||||
/* Convenience functions for labeled branches. */
|
||||
void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bltz)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
|
||||
|
||||
void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_b)(p, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
|
||||
|
||||
void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
|
||||
unsigned int r2, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
|
||||
|
||||
void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_beqz)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
|
||||
|
||||
void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_beqzl)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
|
||||
|
||||
void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
|
||||
unsigned int reg2, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
|
||||
|
||||
void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bnez)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
|
||||
|
||||
void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bgezl)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
|
||||
|
||||
void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bgez)(p, reg, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
|
||||
|
||||
void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
unsigned int bit, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
|
||||
|
||||
void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||
unsigned int bit, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
|
||||
}
|
||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
|
Loading…
Add table
Add a link
Reference in a new issue