mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-07 16:58:04 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
272
arch/sh/mm/Kconfig
Normal file
272
arch/sh/mm/Kconfig
Normal file
|
@ -0,0 +1,272 @@
|
|||
menu "Memory management options"
|
||||
|
||||
config QUICKLIST
|
||||
def_bool y
|
||||
|
||||
config MMU
|
||||
bool "Support for memory management hardware"
|
||||
depends on !CPU_SH2
|
||||
default y
|
||||
help
|
||||
Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to
|
||||
boot on these systems, this option must not be set.
|
||||
|
||||
On other systems (such as the SH-3 and 4) where an MMU exists,
|
||||
turning this off will boot the kernel on these machines with the
|
||||
MMU implicitly switched off.
|
||||
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default "0x80000000" if MMU && SUPERH32
|
||||
default "0x20000000" if MMU && SUPERH64
|
||||
default "0x00000000"
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int "Maximum zone order"
|
||||
range 9 64 if PAGE_SIZE_16KB
|
||||
default "9" if PAGE_SIZE_16KB
|
||||
range 7 64 if PAGE_SIZE_64KB
|
||||
default "7" if PAGE_SIZE_64KB
|
||||
range 11 64
|
||||
default "14" if !MMU
|
||||
default "11"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
blocks into "zones", where each zone is a power of two number of
|
||||
pages. This option selects the largest power of two that the kernel
|
||||
keeps in the memory allocator. If you need to allocate very large
|
||||
blocks of physically contiguous memory, then you may need to
|
||||
increase this value.
|
||||
|
||||
This config option is actually maximum order plus one. For example,
|
||||
a value of 11 means that the largest free memory block is 2^10 pages.
|
||||
|
||||
The page size is not necessarily 4KB. Keep this in mind when
|
||||
choosing a value for this option.
|
||||
|
||||
config MEMORY_START
|
||||
hex "Physical memory start address"
|
||||
default "0x08000000"
|
||||
---help---
|
||||
Computers built with Hitachi SuperH processors always
|
||||
map the ROM starting at address zero. But the processor
|
||||
does not specify the range that RAM takes.
|
||||
|
||||
The physical memory (RAM) start address will be automatically
|
||||
set to 08000000. Other platforms, such as the Solution Engine
|
||||
boards typically map RAM at 0C000000.
|
||||
|
||||
Tweak this only when porting to a new machine which does not
|
||||
already have a defconfig. Changing it from the known correct
|
||||
value on any of the known systems will only lead to disaster.
|
||||
|
||||
config MEMORY_SIZE
|
||||
hex "Physical memory size"
|
||||
default "0x04000000"
|
||||
help
|
||||
This sets the default memory size assumed by your SH kernel. It can
|
||||
be overridden as normal by the 'mem=' argument on the kernel command
|
||||
line. If unsure, consult your board specifications or just leave it
|
||||
as 0x04000000 which was the default value before this became
|
||||
configurable.
|
||||
|
||||
# Physical addressing modes
|
||||
|
||||
config 29BIT
|
||||
def_bool !32BIT
|
||||
depends on SUPERH32
|
||||
select UNCACHED_MAPPING
|
||||
|
||||
config 32BIT
|
||||
bool
|
||||
default y if CPU_SH5 || !MMU
|
||||
|
||||
config PMB
|
||||
bool "Support 32-bit physical addressing through PMB"
|
||||
depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP
|
||||
select 32BIT
|
||||
select UNCACHED_MAPPING
|
||||
help
|
||||
If you say Y here, physical addressing will be extended to
|
||||
32-bits through the SH-4A PMB. If this is not set, legacy
|
||||
29-bit physical addressing will be used.
|
||||
|
||||
config X2TLB
|
||||
def_bool y
|
||||
depends on (CPU_SHX2 || CPU_SHX3) && MMU
|
||||
|
||||
config VSYSCALL
|
||||
bool "Support vsyscall page"
|
||||
depends on MMU && (CPU_SH3 || CPU_SH4)
|
||||
default y
|
||||
help
|
||||
This will enable support for the kernel mapping a vDSO page
|
||||
in process space, and subsequently handing down the entry point
|
||||
to the libc through the ELF auxiliary vector.
|
||||
|
||||
From the kernel side this is used for the signal trampoline.
|
||||
For systems with an MMU that can afford to give up a page,
|
||||
(the default value) say Y.
|
||||
|
||||
config NUMA
|
||||
bool "Non Uniform Memory Access (NUMA) Support"
|
||||
depends on MMU && SYS_SUPPORTS_NUMA
|
||||
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
|
||||
default n
|
||||
help
|
||||
Some SH systems have many various memories scattered around
|
||||
the address space, each with varying latencies. This enables
|
||||
support for these blocks by binding them to nodes and allowing
|
||||
memory policies to be used for prioritizing and controlling
|
||||
allocation behaviour.
|
||||
|
||||
config NODES_SHIFT
|
||||
int
|
||||
default "3" if CPU_SUBTYPE_SHX3
|
||||
default "1"
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool y
|
||||
depends on !NUMA
|
||||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
select SPARSEMEM_STATIC
|
||||
|
||||
config ARCH_SPARSEMEM_DEFAULT
|
||||
def_bool y
|
||||
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
def_bool y
|
||||
|
||||
config ARCH_ENABLE_MEMORY_HOTPLUG
|
||||
def_bool y
|
||||
depends on SPARSEMEM && MMU
|
||||
|
||||
config ARCH_ENABLE_MEMORY_HOTREMOVE
|
||||
def_bool y
|
||||
depends on SPARSEMEM && MMU
|
||||
|
||||
config ARCH_MEMORY_PROBE
|
||||
def_bool y
|
||||
depends on MEMORY_HOTPLUG
|
||||
|
||||
config IOREMAP_FIXED
|
||||
def_bool y
|
||||
depends on X2TLB || SUPERH64
|
||||
|
||||
config UNCACHED_MAPPING
|
||||
bool
|
||||
|
||||
config HAVE_SRAM_POOL
|
||||
bool
|
||||
select GENERIC_ALLOCATOR
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
default PAGE_SIZE_4KB
|
||||
|
||||
config PAGE_SIZE_4KB
|
||||
bool "4kB"
|
||||
help
|
||||
This is the default page size used by all SuperH CPUs.
|
||||
|
||||
config PAGE_SIZE_8KB
|
||||
bool "8kB"
|
||||
depends on !MMU || X2TLB
|
||||
help
|
||||
This enables 8kB pages as supported by SH-X2 and later MMUs.
|
||||
|
||||
config PAGE_SIZE_16KB
|
||||
bool "16kB"
|
||||
depends on !MMU
|
||||
help
|
||||
This enables 16kB pages on MMU-less SH systems.
|
||||
|
||||
config PAGE_SIZE_64KB
|
||||
bool "64kB"
|
||||
depends on !MMU || CPU_SH4 || CPU_SH5
|
||||
help
|
||||
This enables support for 64kB pages, possible on all SH-4
|
||||
CPUs and later.
|
||||
|
||||
endchoice
|
||||
|
||||
choice
|
||||
prompt "HugeTLB page size"
|
||||
depends on HUGETLB_PAGE
|
||||
default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB
|
||||
default HUGETLB_PAGE_SIZE_64K
|
||||
|
||||
config HUGETLB_PAGE_SIZE_64K
|
||||
bool "64kB"
|
||||
depends on !PAGE_SIZE_64KB
|
||||
|
||||
config HUGETLB_PAGE_SIZE_256K
|
||||
bool "256kB"
|
||||
depends on X2TLB
|
||||
|
||||
config HUGETLB_PAGE_SIZE_1MB
|
||||
bool "1MB"
|
||||
|
||||
config HUGETLB_PAGE_SIZE_4MB
|
||||
bool "4MB"
|
||||
depends on X2TLB
|
||||
|
||||
config HUGETLB_PAGE_SIZE_64MB
|
||||
bool "64MB"
|
||||
depends on X2TLB
|
||||
|
||||
config HUGETLB_PAGE_SIZE_512MB
|
||||
bool "512MB"
|
||||
depends on CPU_SH5
|
||||
|
||||
endchoice
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config SCHED_MC
|
||||
bool "Multi-core scheduler support"
|
||||
depends on SMP
|
||||
default y
|
||||
help
|
||||
Multi-core scheduler support improves the CPU scheduler's decision
|
||||
making when dealing with multi-core CPU chips at a cost of slightly
|
||||
increased overhead in some places. If unsure say N here.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Cache configuration"
|
||||
|
||||
config SH7705_CACHE_32KB
|
||||
bool "Enable 32KB cache size for SH7705"
|
||||
depends on CPU_SUBTYPE_SH7705
|
||||
default y
|
||||
|
||||
choice
|
||||
prompt "Cache mode"
|
||||
default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
|
||||
default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
|
||||
|
||||
config CACHE_WRITEBACK
|
||||
bool "Write-back"
|
||||
|
||||
config CACHE_WRITETHROUGH
|
||||
bool "Write-through"
|
||||
help
|
||||
Selecting this option will configure the caches in write-through
|
||||
mode, as opposed to the default write-back configuration.
|
||||
|
||||
Since there's sill some aliasing issues on SH-4, this option will
|
||||
unfortunately still require the majority of flushing functions to
|
||||
be implemented to deal with aliasing.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CACHE_OFF
|
||||
bool "Off"
|
||||
|
||||
endchoice
|
||||
|
||||
endmenu
|
72
arch/sh/mm/Makefile
Normal file
72
arch/sh/mm/Makefile
Normal file
|
@ -0,0 +1,72 @@
|
|||
#
|
||||
# Makefile for the Linux SuperH-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := alignment.o cache.o init.o consistent.o mmap.o
|
||||
|
||||
cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o
|
||||
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
|
||||
cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
|
||||
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
|
||||
cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
|
||||
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
|
||||
cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
|
||||
|
||||
obj-y += $(cacheops-y)
|
||||
|
||||
mmu-y := nommu.o extable_32.o
|
||||
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \
|
||||
pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
|
||||
|
||||
obj-y += $(mmu-y)
|
||||
|
||||
debugfs-y := asids-debugfs.o
|
||||
ifndef CONFIG_CACHE_OFF
|
||||
debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
|
||||
endif
|
||||
|
||||
ifdef CONFIG_MMU
|
||||
debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
|
||||
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
|
||||
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
|
||||
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
|
||||
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
|
||||
obj-y += $(tlb-y)
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DEBUG_FS) += $(debugfs-y)
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_PMB) += pmb.o
|
||||
obj-$(CONFIG_NUMA) += numa.o
|
||||
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
|
||||
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
|
||||
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
|
||||
|
||||
GCOV_PROFILE_pmb.o := n
|
||||
|
||||
# Special flags for tlbex_64.o. This puts restrictions on the number of
|
||||
# caller-save registers that the compiler can target when building this file.
|
||||
# This is required because the code is called from a context in entry.S where
|
||||
# very few registers have been saved in the exception handler (for speed
|
||||
# reasons).
|
||||
# The caller save registers that have been saved and which can be used are
|
||||
# r2,r3,r4,r5 : argument passing
|
||||
# r15, r18 : SP and LINK
|
||||
# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make
|
||||
# use of them, so it's probably beneficial to performance to save them
|
||||
# and have them available for it.
|
||||
#
|
||||
# The resources not listed below are callee save, i.e. the compiler is free to
|
||||
# use any of them and will spill them to the stack itself.
|
||||
|
||||
CFLAGS_tlbex_64.o += -ffixed-r7 \
|
||||
-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
|
||||
-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
|
||||
-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
|
||||
-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
|
||||
-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
|
||||
-ffixed-r41 -ffixed-r42 -ffixed-r43 \
|
||||
-ffixed-r60 -ffixed-r61 -ffixed-r62 \
|
||||
-fomit-frame-pointer
|
||||
|
||||
ccflags-y := -Werror
|
190
arch/sh/mm/alignment.c
Normal file
190
arch/sh/mm/alignment.c
Normal file
|
@ -0,0 +1,190 @@
|
|||
/*
|
||||
* Alignment access counters and corresponding user-space interfaces.
|
||||
*
|
||||
* Copyright (C) 2009 ST Microelectronics
|
||||
* Copyright (C) 2009 - 2010 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <asm/alignment.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static unsigned long se_user;
|
||||
static unsigned long se_sys;
|
||||
static unsigned long se_half;
|
||||
static unsigned long se_word;
|
||||
static unsigned long se_dword;
|
||||
static unsigned long se_multi;
|
||||
/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
|
||||
valid! */
|
||||
static int se_usermode = UM_WARN | UM_FIXUP;
|
||||
/* 0: no warning 1: print a warning message, disabled by default */
|
||||
static int se_kernmode_warn;
|
||||
|
||||
core_param(alignment, se_usermode, int, 0600);
|
||||
|
||||
void inc_unaligned_byte_access(void)
|
||||
{
|
||||
se_half++;
|
||||
}
|
||||
|
||||
void inc_unaligned_word_access(void)
|
||||
{
|
||||
se_word++;
|
||||
}
|
||||
|
||||
void inc_unaligned_dword_access(void)
|
||||
{
|
||||
se_dword++;
|
||||
}
|
||||
|
||||
void inc_unaligned_multi_access(void)
|
||||
{
|
||||
se_multi++;
|
||||
}
|
||||
|
||||
void inc_unaligned_user_access(void)
|
||||
{
|
||||
se_user++;
|
||||
}
|
||||
|
||||
void inc_unaligned_kernel_access(void)
|
||||
{
|
||||
se_sys++;
|
||||
}
|
||||
|
||||
/*
|
||||
* This defaults to the global policy which can be set from the command
|
||||
* line, while processes can overload their preferences via prctl().
|
||||
*/
|
||||
unsigned int unaligned_user_action(void)
|
||||
{
|
||||
unsigned int action = se_usermode;
|
||||
|
||||
if (current->thread.flags & SH_THREAD_UAC_SIGBUS) {
|
||||
action &= ~UM_FIXUP;
|
||||
action |= UM_SIGNAL;
|
||||
}
|
||||
|
||||
if (current->thread.flags & SH_THREAD_UAC_NOPRINT)
|
||||
action &= ~UM_WARN;
|
||||
|
||||
return action;
|
||||
}
|
||||
|
||||
int get_unalign_ctl(struct task_struct *tsk, unsigned long addr)
|
||||
{
|
||||
return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK,
|
||||
(unsigned int __user *)addr);
|
||||
}
|
||||
|
||||
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
|
||||
{
|
||||
tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) |
|
||||
(val & SH_THREAD_UAC_MASK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs) && (se_usermode & UM_WARN))
|
||||
pr_notice_ratelimited("Fixing up unaligned userspace access "
|
||||
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
|
||||
tsk->comm, task_pid_nr(tsk),
|
||||
(void *)instruction_pointer(regs), insn);
|
||||
else if (se_kernmode_warn)
|
||||
pr_notice_ratelimited("Fixing up unaligned kernel access "
|
||||
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
|
||||
tsk->comm, task_pid_nr(tsk),
|
||||
(void *)instruction_pointer(regs), insn);
|
||||
}
|
||||
|
||||
static const char *se_usermode_action[] = {
|
||||
"ignored",
|
||||
"warn",
|
||||
"fixup",
|
||||
"fixup+warn",
|
||||
"signal",
|
||||
"signal+warn"
|
||||
};
|
||||
|
||||
static int alignment_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
seq_printf(m, "User:\t\t%lu\n", se_user);
|
||||
seq_printf(m, "System:\t\t%lu\n", se_sys);
|
||||
seq_printf(m, "Half:\t\t%lu\n", se_half);
|
||||
seq_printf(m, "Word:\t\t%lu\n", se_word);
|
||||
seq_printf(m, "DWord:\t\t%lu\n", se_dword);
|
||||
seq_printf(m, "Multi:\t\t%lu\n", se_multi);
|
||||
seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
|
||||
se_usermode_action[se_usermode]);
|
||||
seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
|
||||
se_kernmode_warn ? "+warn" : "");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alignment_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, alignment_proc_show, NULL);
|
||||
}
|
||||
|
||||
static ssize_t alignment_proc_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
int *data = PDE_DATA(file_inode(file));
|
||||
char mode;
|
||||
|
||||
if (count > 0) {
|
||||
if (get_user(mode, buffer))
|
||||
return -EFAULT;
|
||||
if (mode >= '0' && mode <= '5')
|
||||
*data = mode - '0';
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations alignment_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = alignment_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = alignment_proc_write,
|
||||
};
|
||||
|
||||
/*
|
||||
* This needs to be done after sysctl_init, otherwise sys/ will be
|
||||
* overwritten. Actually, this shouldn't be in sys/ at all since
|
||||
* it isn't a sysctl, and it doesn't contain sysctl information.
|
||||
* We now locate it in /proc/cpu/alignment instead.
|
||||
*/
|
||||
static int __init alignment_init(void)
|
||||
{
|
||||
struct proc_dir_entry *dir, *res;
|
||||
|
||||
dir = proc_mkdir("cpu", NULL);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
|
||||
&alignment_proc_fops, &se_usermode);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
|
||||
&alignment_proc_fops, &se_kernmode_warn);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(alignment_init);
|
75
arch/sh/mm/asids-debugfs.c
Normal file
75
arch/sh/mm/asids-debugfs.c
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* debugfs ops for process ASIDs
|
||||
*
|
||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||
* Copyright (C) 2003 - 2008 Paul Mundt
|
||||
* Copyright (C) 2003, 2004 Richard Curnow
|
||||
*
|
||||
* Provides a debugfs file that lists out the ASIDs currently associated
|
||||
* with the processes.
|
||||
*
|
||||
* In the SH-5 case, if the DM.PC register is examined through the debug
|
||||
* link, this shows ASID + PC. To make use of this, the PID->ASID
|
||||
* relationship needs to be known. This is primarily for debugging.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static int asids_seq_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
for_each_process(p) {
|
||||
int pid = p->pid;
|
||||
|
||||
if (unlikely(!pid))
|
||||
continue;
|
||||
|
||||
if (p->mm)
|
||||
seq_printf(file, "%5d : %04lx\n", pid,
|
||||
cpu_asid(smp_processor_id(), p->mm));
|
||||
}
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int asids_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, asids_seq_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations asids_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = asids_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init asids_debugfs_init(void)
|
||||
{
|
||||
struct dentry *asids_dentry;
|
||||
|
||||
asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir,
|
||||
NULL, &asids_debugfs_fops);
|
||||
if (!asids_dentry)
|
||||
return -ENOMEM;
|
||||
|
||||
return PTR_ERR_OR_ZERO(asids_dentry);
|
||||
}
|
||||
module_init(asids_debugfs_init);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
132
arch/sh/mm/cache-debugfs.c
Normal file
132
arch/sh/mm/cache-debugfs.c
Normal file
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* debugfs ops for the L1 cache
|
||||
*
|
||||
* Copyright (C) 2006 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
enum cache_type {
|
||||
CACHE_TYPE_ICACHE,
|
||||
CACHE_TYPE_DCACHE,
|
||||
CACHE_TYPE_UNIFIED,
|
||||
};
|
||||
|
||||
static int cache_seq_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
unsigned int cache_type = (unsigned int)file->private;
|
||||
struct cache_info *cache;
|
||||
unsigned int waysize, way;
|
||||
unsigned long ccr;
|
||||
unsigned long addrstart = 0;
|
||||
|
||||
/*
|
||||
* Go uncached immediately so we don't skew the results any
|
||||
* more than we already are..
|
||||
*/
|
||||
jump_to_uncached();
|
||||
|
||||
ccr = __raw_readl(SH_CCR);
|
||||
if ((ccr & CCR_CACHE_ENABLE) == 0) {
|
||||
back_to_cached();
|
||||
|
||||
seq_printf(file, "disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cache_type == CACHE_TYPE_DCACHE) {
|
||||
addrstart = CACHE_OC_ADDRESS_ARRAY;
|
||||
cache = ¤t_cpu_data.dcache;
|
||||
} else {
|
||||
addrstart = CACHE_IC_ADDRESS_ARRAY;
|
||||
cache = ¤t_cpu_data.icache;
|
||||
}
|
||||
|
||||
waysize = cache->sets;
|
||||
|
||||
/*
|
||||
* If the OC is already in RAM mode, we only have
|
||||
* half of the entries to consider..
|
||||
*/
|
||||
if ((ccr & CCR_CACHE_ORA) && cache_type == CACHE_TYPE_DCACHE)
|
||||
waysize >>= 1;
|
||||
|
||||
waysize <<= cache->entry_shift;
|
||||
|
||||
for (way = 0; way < cache->ways; way++) {
|
||||
unsigned long addr;
|
||||
unsigned int line;
|
||||
|
||||
seq_printf(file, "-----------------------------------------\n");
|
||||
seq_printf(file, "Way %d\n", way);
|
||||
seq_printf(file, "-----------------------------------------\n");
|
||||
|
||||
for (addr = addrstart, line = 0;
|
||||
addr < addrstart + waysize;
|
||||
addr += cache->linesz, line++) {
|
||||
unsigned long data = __raw_readl(addr);
|
||||
|
||||
/* Check the V bit, ignore invalid cachelines */
|
||||
if ((data & 1) == 0)
|
||||
continue;
|
||||
|
||||
/* U: Dirty, cache tag is 10 bits up */
|
||||
seq_printf(file, "%3d: %c 0x%lx\n",
|
||||
line, data & 2 ? 'U' : ' ',
|
||||
data & 0x1ffffc00);
|
||||
}
|
||||
|
||||
addrstart += cache->way_incr;
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cache_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, cache_seq_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations cache_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = cache_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init cache_debugfs_init(void)
|
||||
{
|
||||
struct dentry *dcache_dentry, *icache_dentry;
|
||||
|
||||
dcache_dentry = debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)CACHE_TYPE_DCACHE,
|
||||
&cache_debugfs_fops);
|
||||
if (!dcache_dentry)
|
||||
return -ENOMEM;
|
||||
|
||||
icache_dentry = debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)CACHE_TYPE_ICACHE,
|
||||
&cache_debugfs_fops);
|
||||
if (!icache_dentry) {
|
||||
debugfs_remove(dcache_dentry);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(cache_debugfs_init);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
91
arch/sh/mm/cache-sh2.c
Normal file
91
arch/sh/mm/cache-sh2.c
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh2.c
|
||||
*
|
||||
* Copyright (C) 2002 Paul Mundt
|
||||
* Copyright (C) 2008 Yoshinori Sato
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static void sh2__flush_wback_region(void *start, int size)
|
||||
{
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
|
||||
unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0);
|
||||
int way;
|
||||
for (way = 0; way < 4; way++) {
|
||||
unsigned long data = __raw_readl(addr | (way << 12));
|
||||
if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
|
||||
data &= ~SH_CACHE_UPDATED;
|
||||
__raw_writel(data, addr | (way << 12));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void sh2__flush_purge_region(void *start, int size)
|
||||
{
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES)
|
||||
__raw_writel((v & CACHE_PHYSADDR_MASK),
|
||||
CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
|
||||
}
|
||||
|
||||
static void sh2__flush_invalidate_region(void *start, int size)
|
||||
{
|
||||
#ifdef CONFIG_CACHE_WRITEBACK
|
||||
/*
|
||||
* SH-2 does not support individual line invalidation, only a
|
||||
* global invalidate.
|
||||
*/
|
||||
unsigned long ccr;
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
ccr = __raw_readl(SH_CCR);
|
||||
ccr |= CCR_CACHE_INVALIDATE;
|
||||
__raw_writel(ccr, SH_CCR);
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
#else
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES)
|
||||
__raw_writel((v & CACHE_PHYSADDR_MASK),
|
||||
CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init sh2_cache_init(void)
|
||||
{
|
||||
__flush_wback_region = sh2__flush_wback_region;
|
||||
__flush_purge_region = sh2__flush_purge_region;
|
||||
__flush_invalidate_region = sh2__flush_invalidate_region;
|
||||
}
|
189
arch/sh/mm/cache-sh2a.c
Normal file
189
arch/sh/mm/cache-sh2a.c
Normal file
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh2a.c
|
||||
*
|
||||
* Copyright (C) 2008 Yoshinori Sato
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* The maximum number of pages we support up to when doing ranged dcache
|
||||
* flushing. Anything exceeding this will simply flush the dcache in its
|
||||
* entirety.
|
||||
*/
|
||||
#define MAX_OCACHE_PAGES 32
|
||||
#define MAX_ICACHE_PAGES 32
|
||||
|
||||
#ifdef CONFIG_CACHE_WRITEBACK
|
||||
static void sh2a_flush_oc_line(unsigned long v, int way)
|
||||
{
|
||||
unsigned long addr = (v & 0x000007f0) | (way << 11);
|
||||
unsigned long data;
|
||||
|
||||
data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
|
||||
if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
|
||||
data &= ~SH_CACHE_UPDATED;
|
||||
__raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
|
||||
{
|
||||
/* Set associative bit to hit all ways */
|
||||
unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
|
||||
__raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches, but not invalidate them.
|
||||
*/
|
||||
static void sh2a__flush_wback_region(void *start, int size)
|
||||
{
|
||||
#ifdef CONFIG_CACHE_WRITEBACK
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
unsigned long flags;
|
||||
int nr_ways;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
nr_ways = current_cpu_data.dcache.ways;
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
/* If there are too many pages then flush the entire cache */
|
||||
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
|
||||
begin = CACHE_OC_ADDRESS_ARRAY;
|
||||
end = begin + (nr_ways * current_cpu_data.dcache.way_size);
|
||||
|
||||
for (v = begin; v < end; v += L1_CACHE_BYTES) {
|
||||
unsigned long data = __raw_readl(v);
|
||||
if (data & SH_CACHE_UPDATED)
|
||||
__raw_writel(data & ~SH_CACHE_UPDATED, v);
|
||||
}
|
||||
} else {
|
||||
int way;
|
||||
for (way = 0; way < nr_ways; way++) {
|
||||
for (v = begin; v < end; v += L1_CACHE_BYTES)
|
||||
sh2a_flush_oc_line(v, way);
|
||||
}
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches and invalidate them.
|
||||
*/
|
||||
static void sh2a__flush_purge_region(void *start, int size)
|
||||
{
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
unsigned long flags;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
|
||||
#ifdef CONFIG_CACHE_WRITEBACK
|
||||
int way;
|
||||
int nr_ways = current_cpu_data.dcache.ways;
|
||||
for (way = 0; way < nr_ways; way++)
|
||||
sh2a_flush_oc_line(v, way);
|
||||
#endif
|
||||
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate the D-caches, but no write back please
|
||||
*/
|
||||
static void sh2a__flush_invalidate_region(void *start, int size)
|
||||
{
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
unsigned long flags;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
/* If there are too many pages then just blow the cache */
|
||||
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
|
||||
__raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
|
||||
SH_CCR);
|
||||
} else {
|
||||
for (v = begin; v < end; v += L1_CACHE_BYTES)
|
||||
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the range of D-cache, and purge the I-cache.
|
||||
*/
|
||||
static void sh2a_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
unsigned long v;
|
||||
unsigned long flags;
|
||||
|
||||
start = data->addr1 & ~(L1_CACHE_BYTES-1);
|
||||
end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
|
||||
|
||||
#ifdef CONFIG_CACHE_WRITEBACK
|
||||
sh2a__flush_wback_region((void *)start, end-start);
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
/* I-Cache invalidate */
|
||||
/* If there are too many pages then just blow the cache */
|
||||
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
|
||||
__raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
|
||||
SH_CCR);
|
||||
} else {
|
||||
for (v = start; v < end; v += L1_CACHE_BYTES)
|
||||
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __init sh2a_cache_init(void)
|
||||
{
|
||||
local_flush_icache_range = sh2a_flush_icache_range;
|
||||
|
||||
__flush_wback_region = sh2a__flush_wback_region;
|
||||
__flush_purge_region = sh2a__flush_purge_region;
|
||||
__flush_invalidate_region = sh2a__flush_invalidate_region;
|
||||
}
|
105
arch/sh/mm/cache-sh3.c
Normal file
105
arch/sh/mm/cache-sh3.c
Normal file
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh3.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000 Niibe Yutaka
|
||||
* Copyright (C) 2002 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches, but not invalidate them.
|
||||
*
|
||||
* Is this really worth it, or should we just alias this routine
|
||||
* to __flush_purge_region too?
|
||||
*
|
||||
* START: Virtual Address (U0, P1, or P3)
|
||||
* SIZE: Size of the region.
|
||||
*/
|
||||
|
||||
static void sh3__flush_wback_region(void *start, int size)
|
||||
{
|
||||
unsigned long v, j;
|
||||
unsigned long begin, end;
|
||||
unsigned long flags;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
|
||||
unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
|
||||
for (j = 0; j < current_cpu_data.dcache.ways; j++) {
|
||||
unsigned long data, addr, p;
|
||||
|
||||
p = __pa(v);
|
||||
addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
|
||||
local_irq_save(flags);
|
||||
data = __raw_readl(addr);
|
||||
|
||||
if ((data & CACHE_PHYSADDR_MASK) ==
|
||||
(p & CACHE_PHYSADDR_MASK)) {
|
||||
data &= ~SH_CACHE_UPDATED;
|
||||
__raw_writel(data, addr);
|
||||
local_irq_restore(flags);
|
||||
break;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
addrstart += current_cpu_data.dcache.way_incr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches and invalidate them.
|
||||
*
|
||||
* START: Virtual Address (U0, P1, or P3)
|
||||
* SIZE: Size of the region.
|
||||
*/
|
||||
static void sh3__flush_purge_region(void *start, int size)
|
||||
{
|
||||
unsigned long v;
|
||||
unsigned long begin, end;
|
||||
|
||||
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
|
||||
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
|
||||
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
|
||||
unsigned long data, addr;
|
||||
|
||||
data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
|
||||
addr = CACHE_OC_ADDRESS_ARRAY |
|
||||
(v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
|
||||
__raw_writel(data, addr);
|
||||
}
|
||||
}
|
||||
|
||||
void __init sh3_cache_init(void)
|
||||
{
|
||||
__flush_wback_region = sh3__flush_wback_region;
|
||||
__flush_purge_region = sh3__flush_purge_region;
|
||||
|
||||
/*
|
||||
* No write back please
|
||||
*
|
||||
* Except I don't think there's any way to avoid the writeback.
|
||||
* So we just alias it to sh3__flush_purge_region(). dwmw2.
|
||||
*/
|
||||
__flush_invalidate_region = sh3__flush_purge_region;
|
||||
}
|
394
arch/sh/mm/cache-sh4.c
Normal file
394
arch/sh/mm/cache-sh4.c
Normal file
|
@ -0,0 +1,394 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh4.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
|
||||
* Copyright (C) 2001 - 2009 Paul Mundt
|
||||
* Copyright (C) 2003 Richard Curnow
|
||||
* Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cache_insns.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* The maximum number of pages we support up to when doing ranged dcache
|
||||
* flushing. Anything exceeding this will simply flush the dcache in its
|
||||
* entirety.
|
||||
*/
|
||||
#define MAX_ICACHE_PAGES 32
|
||||
|
||||
static void __flush_cache_one(unsigned long addr, unsigned long phys,
|
||||
unsigned long exec_offset);
|
||||
|
||||
/*
|
||||
* Write back the range of D-cache, and purge the I-cache.
|
||||
*
|
||||
* Called from kernel/module.c:sys_init_module and routine for a.out format,
|
||||
* signal handler code and kprobes code
|
||||
*/
|
||||
static void sh4_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
unsigned long flags, v;
|
||||
int i;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
/* If there are too many pages then just blow away the caches */
|
||||
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
|
||||
local_flush_cache_all(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Selectively flush d-cache then invalidate the i-cache.
|
||||
* This is inefficient, so only use this for small ranges.
|
||||
*/
|
||||
start &= ~(L1_CACHE_BYTES-1);
|
||||
end += L1_CACHE_BYTES-1;
|
||||
end &= ~(L1_CACHE_BYTES-1);
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
for (v = start; v < end; v += L1_CACHE_BYTES) {
|
||||
unsigned long icacheaddr;
|
||||
int j, n;
|
||||
|
||||
__ocbwb(v);
|
||||
|
||||
icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
|
||||
cpu_data->icache.entry_mask);
|
||||
|
||||
/* Clear i-cache line valid-bit */
|
||||
n = boot_cpu_data.icache.n_aliases;
|
||||
for (i = 0; i < cpu_data->icache.ways; i++) {
|
||||
for (j = 0; j < n; j++)
|
||||
__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
|
||||
icacheaddr += cpu_data->icache.way_incr;
|
||||
}
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void flush_cache_one(unsigned long start, unsigned long phys)
|
||||
{
|
||||
unsigned long flags, exec_offset = 0;
|
||||
|
||||
/*
|
||||
* All types of SH-4 require PC to be uncached to operate on the I-cache.
|
||||
* Some types of SH-4 require PC to be uncached to operate on the D-cache.
|
||||
*/
|
||||
if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
|
||||
(start < CACHE_OC_ADDRESS_ARRAY))
|
||||
exec_offset = cached_to_uncached;
|
||||
|
||||
local_irq_save(flags);
|
||||
__flush_cache_one(start, phys, exec_offset);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back & invalidate the D-cache of the page.
|
||||
* (To avoid "alias" issues)
|
||||
*/
|
||||
static void sh4_flush_dcache_page(void *arg)
|
||||
{
|
||||
struct page *page = arg;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
#ifndef CONFIG_SMP
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
else
|
||||
#endif
|
||||
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
||||
(addr & shm_align_mask), page_to_phys(page));
|
||||
|
||||
wmb();
|
||||
}
|
||||
|
||||
/* TODO: Selective icache invalidation through IC address array.. */
|
||||
static void flush_icache_all(void)
|
||||
{
|
||||
unsigned long flags, ccr;
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
/* Flush I-cache */
|
||||
ccr = __raw_readl(SH_CCR);
|
||||
ccr |= CCR_CACHE_ICI;
|
||||
__raw_writel(ccr, SH_CCR);
|
||||
|
||||
/*
|
||||
* back_to_cached() will take care of the barrier for us, don't add
|
||||
* another one!
|
||||
*/
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void flush_dcache_all(void)
|
||||
{
|
||||
unsigned long addr, end_addr, entry_offset;
|
||||
|
||||
end_addr = CACHE_OC_ADDRESS_ARRAY +
|
||||
(current_cpu_data.dcache.sets <<
|
||||
current_cpu_data.dcache.entry_shift) *
|
||||
current_cpu_data.dcache.ways;
|
||||
|
||||
entry_offset = 1 << current_cpu_data.dcache.entry_shift;
|
||||
|
||||
for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
__raw_writel(0, addr); addr += entry_offset;
|
||||
}
|
||||
}
|
||||
|
||||
static void sh4_flush_cache_all(void *unused)
|
||||
{
|
||||
flush_dcache_all();
|
||||
flush_icache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Note : (RPC) since the caches are physically tagged, the only point
|
||||
* of flush_cache_mm for SH-4 is to get rid of aliases from the
|
||||
* D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
|
||||
* lines can stay resident so long as the virtual address they were
|
||||
* accessed with (hence cache set) is in accord with the physical
|
||||
* address (i.e. tag). It's no different here.
|
||||
*
|
||||
* Caller takes mm->mmap_sem.
|
||||
*/
|
||||
static void sh4_flush_cache_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = arg;
|
||||
|
||||
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
flush_dcache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back and invalidate I/D-caches for the page.
|
||||
*
|
||||
* ADDR: Virtual Address (U0 address)
|
||||
* PFN: Physical page number
|
||||
*/
|
||||
static void sh4_flush_cache_page(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
struct page *page;
|
||||
unsigned long address, pfn, phys;
|
||||
int map_coherent = 0;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
void *vaddr;
|
||||
|
||||
vma = data->vma;
|
||||
address = data->addr1 & PAGE_MASK;
|
||||
pfn = data->addr2;
|
||||
phys = pfn << PAGE_SHIFT;
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
pgd = pgd_offset(vma->vm_mm, address);
|
||||
pud = pud_offset(pgd, address);
|
||||
pmd = pmd_offset(pud, address);
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
|
||||
/* If the page isn't present, there is nothing to do here. */
|
||||
if (!(pte_val(*pte) & _PAGE_PRESENT))
|
||||
return;
|
||||
|
||||
if ((vma->vm_mm == current->active_mm))
|
||||
vaddr = NULL;
|
||||
else {
|
||||
/*
|
||||
* Use kmap_coherent or kmap_atomic to do flushes for
|
||||
* another ASID than the current one.
|
||||
*/
|
||||
map_coherent = (current_cpu_data.dcache.n_aliases &&
|
||||
test_bit(PG_dcache_clean, &page->flags) &&
|
||||
page_mapped(page));
|
||||
if (map_coherent)
|
||||
vaddr = kmap_coherent(page, address);
|
||||
else
|
||||
vaddr = kmap_atomic(page);
|
||||
|
||||
address = (unsigned long)vaddr;
|
||||
}
|
||||
|
||||
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
||||
(address & shm_align_mask), phys);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_icache_all();
|
||||
|
||||
if (vaddr) {
|
||||
if (map_coherent)
|
||||
kunmap_coherent(vaddr);
|
||||
else
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back and invalidate D-caches.
|
||||
*
|
||||
* START, END: Virtual Address (U0 address)
|
||||
*
|
||||
* NOTE: We need to flush the _physical_ page entry.
|
||||
* Flushing the cache lines for U0 only isn't enough.
|
||||
* We need to flush for P1 too, which may contain aliases.
|
||||
*/
|
||||
static void sh4_flush_cache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
|
||||
vma = data->vma;
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If cache is only 4k-per-way, there are never any 'aliases'. Since
|
||||
* the cache is physically tagged, the data can just be left in there.
|
||||
*/
|
||||
if (boot_cpu_data.dcache.n_aliases == 0)
|
||||
return;
|
||||
|
||||
flush_dcache_all();
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_icache_all();
|
||||
}
|
||||
|
||||
/**
|
||||
* __flush_cache_one
|
||||
*
|
||||
* @addr: address in memory mapped cache array
|
||||
* @phys: P1 address to flush (has to match tags if addr has 'A' bit
|
||||
* set i.e. associative write)
|
||||
* @exec_offset: set to 0x20000000 if flush has to be executed from P2
|
||||
* region else 0x0
|
||||
*
|
||||
* The offset into the cache array implied by 'addr' selects the
|
||||
* 'colour' of the virtual address range that will be flushed. The
|
||||
* operation (purge/write-back) is selected by the lower 2 bits of
|
||||
* 'phys'.
|
||||
*/
|
||||
static void __flush_cache_one(unsigned long addr, unsigned long phys,
|
||||
unsigned long exec_offset)
|
||||
{
|
||||
int way_count;
|
||||
unsigned long base_addr = addr;
|
||||
struct cache_info *dcache;
|
||||
unsigned long way_incr;
|
||||
unsigned long a, ea, p;
|
||||
unsigned long temp_pc;
|
||||
|
||||
dcache = &boot_cpu_data.dcache;
|
||||
/* Write this way for better assembly. */
|
||||
way_count = dcache->ways;
|
||||
way_incr = dcache->way_incr;
|
||||
|
||||
/*
|
||||
* Apply exec_offset (i.e. branch to P2 if required.).
|
||||
*
|
||||
* FIXME:
|
||||
*
|
||||
* If I write "=r" for the (temp_pc), it puts this in r6 hence
|
||||
* trashing exec_offset before it's been added on - why? Hence
|
||||
* "=&r" as a 'workaround'
|
||||
*/
|
||||
asm volatile("mov.l 1f, %0\n\t"
|
||||
"add %1, %0\n\t"
|
||||
"jmp @%0\n\t"
|
||||
"nop\n\t"
|
||||
".balign 4\n\t"
|
||||
"1: .long 2f\n\t"
|
||||
"2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
|
||||
|
||||
/*
|
||||
* We know there will be >=1 iteration, so write as do-while to avoid
|
||||
* pointless nead-of-loop check for 0 iterations.
|
||||
*/
|
||||
do {
|
||||
ea = base_addr + PAGE_SIZE;
|
||||
a = base_addr;
|
||||
p = phys;
|
||||
|
||||
do {
|
||||
*(volatile unsigned long *)a = p;
|
||||
/*
|
||||
* Next line: intentionally not p+32, saves an add, p
|
||||
* will do since only the cache tag bits need to
|
||||
* match.
|
||||
*/
|
||||
*(volatile unsigned long *)(a+32) = p;
|
||||
a += 64;
|
||||
p += 64;
|
||||
} while (a < ea);
|
||||
|
||||
base_addr += way_incr;
|
||||
} while (--way_count != 0);
|
||||
}
|
||||
|
||||
extern void __weak sh4__flush_region_init(void);
|
||||
|
||||
/*
|
||||
* SH-4 has virtually indexed and physically tagged cache.
|
||||
*/
|
||||
void __init sh4_cache_init(void)
|
||||
{
|
||||
printk("PVR=%08x CVR=%08x PRR=%08x\n",
|
||||
__raw_readl(CCN_PVR),
|
||||
__raw_readl(CCN_CVR),
|
||||
__raw_readl(CCN_PRR));
|
||||
|
||||
local_flush_icache_range = sh4_flush_icache_range;
|
||||
local_flush_dcache_page = sh4_flush_dcache_page;
|
||||
local_flush_cache_all = sh4_flush_cache_all;
|
||||
local_flush_cache_mm = sh4_flush_cache_mm;
|
||||
local_flush_cache_dup_mm = sh4_flush_cache_mm;
|
||||
local_flush_cache_page = sh4_flush_cache_page;
|
||||
local_flush_cache_range = sh4_flush_cache_range;
|
||||
|
||||
sh4__flush_region_init();
|
||||
}
|
621
arch/sh/mm/cache-sh5.c
Normal file
621
arch/sh/mm/cache-sh5.c
Normal file
|
@ -0,0 +1,621 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh5.c
|
||||
*
|
||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||
* Copyright (C) 2002 Benedict Gaster
|
||||
* Copyright (C) 2003 Richard Curnow
|
||||
* Copyright (C) 2003 - 2008 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
extern void __weak sh4__flush_region_init(void);
|
||||
|
||||
/* Wired TLB entry for the D-cache */
|
||||
static unsigned long long dtlb_cache_slot;
|
||||
|
||||
/*
|
||||
* The following group of functions deal with mapping and unmapping a
|
||||
* temporary page into a DTLB slot that has been set aside for exclusive
|
||||
* use.
|
||||
*/
|
||||
static inline void
|
||||
sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
|
||||
unsigned long paddr)
|
||||
{
|
||||
local_irq_disable();
|
||||
sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
|
||||
}
|
||||
|
||||
static inline void sh64_teardown_dtlb_cache_slot(void)
|
||||
{
|
||||
sh64_teardown_tlb_slot(dtlb_cache_slot);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static inline void sh64_icache_inv_all(void)
|
||||
{
|
||||
unsigned long long addr, flag, data;
|
||||
unsigned long flags;
|
||||
|
||||
addr = ICCR0;
|
||||
flag = ICCR0_ICI;
|
||||
data = 0;
|
||||
|
||||
/* Make this a critical section for safety (probably not strictly necessary.) */
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Without %1 it gets unexplicably wrong */
|
||||
__asm__ __volatile__ (
|
||||
"getcfg %3, 0, %0\n\t"
|
||||
"or %0, %2, %0\n\t"
|
||||
"putcfg %3, 0, %0\n\t"
|
||||
"synci"
|
||||
: "=&r" (data)
|
||||
: "0" (data), "r" (flag), "r" (addr));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
/* Invalidate range of addresses [start,end] from the I-cache, where
|
||||
* the addresses lie in the kernel superpage. */
|
||||
|
||||
unsigned long long ullend, addr, aligned_start;
|
||||
aligned_start = (unsigned long long)(signed long long)(signed long) start;
|
||||
addr = L1_CACHE_ALIGN(aligned_start);
|
||||
ullend = (unsigned long long) (signed long long) (signed long) end;
|
||||
|
||||
while (addr <= ullend) {
|
||||
__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
|
||||
addr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
|
||||
{
|
||||
/* If we get called, we know that vma->vm_flags contains VM_EXEC.
|
||||
Also, eaddr is page-aligned. */
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long long addr, end_addr;
|
||||
unsigned long flags = 0;
|
||||
unsigned long running_asid, vma_asid;
|
||||
addr = eaddr;
|
||||
end_addr = addr + PAGE_SIZE;
|
||||
|
||||
/* Check whether we can use the current ASID for the I-cache
|
||||
invalidation. For example, if we're called via
|
||||
access_process_vm->flush_cache_page->here, (e.g. when reading from
|
||||
/proc), 'running_asid' will be that of the reader, not of the
|
||||
victim.
|
||||
|
||||
Also, note the risk that we might get pre-empted between the ASID
|
||||
compare and blocking IRQs, and before we regain control, the
|
||||
pid->ASID mapping changes. However, the whole cache will get
|
||||
invalidated when the mapping is renewed, so the worst that can
|
||||
happen is that the loop below ends up invalidating somebody else's
|
||||
cache entries.
|
||||
*/
|
||||
|
||||
running_asid = get_asid();
|
||||
vma_asid = cpu_asid(cpu, vma->vm_mm);
|
||||
if (running_asid != vma_asid) {
|
||||
local_irq_save(flags);
|
||||
switch_and_save_asid(vma_asid);
|
||||
}
|
||||
while (addr < end_addr) {
|
||||
/* Worth unrolling a little */
|
||||
__asm__ __volatile__("icbi %0, 0" : : "r" (addr));
|
||||
__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
|
||||
__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
|
||||
__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
|
||||
addr += 128;
|
||||
}
|
||||
if (running_asid != vma_asid) {
|
||||
switch_and_save_asid(running_asid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
/* Used for invalidating big chunks of I-cache, i.e. assume the range
|
||||
is whole pages. If 'start' or 'end' is not page aligned, the code
|
||||
is conservative and invalidates to the ends of the enclosing pages.
|
||||
This is functionally OK, just a performance loss. */
|
||||
|
||||
/* See the comments below in sh64_dcache_purge_user_range() regarding
|
||||
the choice of algorithm. However, for the I-cache option (2) isn't
|
||||
available because there are no physical tags so aliases can't be
|
||||
resolved. The icbi instruction has to be used through the user
|
||||
mapping. Because icbi is cheaper than ocbp on a cache hit, it
|
||||
would be cheaper to use the selective code for a large range than is
|
||||
possible with the D-cache. Just assume 64 for now as a working
|
||||
figure.
|
||||
*/
|
||||
int n_pages;
|
||||
|
||||
if (!mm)
|
||||
return;
|
||||
|
||||
n_pages = ((end - start) >> PAGE_SHIFT);
|
||||
if (n_pages >= 64) {
|
||||
sh64_icache_inv_all();
|
||||
} else {
|
||||
unsigned long aligned_start;
|
||||
unsigned long eaddr;
|
||||
unsigned long after_last_page_start;
|
||||
unsigned long mm_asid, current_asid;
|
||||
unsigned long flags = 0;
|
||||
|
||||
mm_asid = cpu_asid(smp_processor_id(), mm);
|
||||
current_asid = get_asid();
|
||||
|
||||
if (mm_asid != current_asid) {
|
||||
/* Switch ASID and run the invalidate loop under cli */
|
||||
local_irq_save(flags);
|
||||
switch_and_save_asid(mm_asid);
|
||||
}
|
||||
|
||||
aligned_start = start & PAGE_MASK;
|
||||
after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
|
||||
|
||||
while (aligned_start < after_last_page_start) {
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long vma_end;
|
||||
vma = find_vma(mm, aligned_start);
|
||||
if (!vma || (aligned_start <= vma->vm_end)) {
|
||||
/* Avoid getting stuck in an error condition */
|
||||
aligned_start += PAGE_SIZE;
|
||||
continue;
|
||||
}
|
||||
vma_end = vma->vm_end;
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
/* Executable */
|
||||
eaddr = aligned_start;
|
||||
while (eaddr < vma_end) {
|
||||
sh64_icache_inv_user_page(vma, eaddr);
|
||||
eaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
aligned_start = vma->vm_end; /* Skip to start of next region */
|
||||
}
|
||||
|
||||
if (mm_asid != current_asid) {
|
||||
switch_and_save_asid(current_asid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
/* The icbi instruction never raises ITLBMISS. i.e. if there's not a
|
||||
cache hit on the virtual tag the instruction ends there, without a
|
||||
TLB lookup. */
|
||||
|
||||
unsigned long long aligned_start;
|
||||
unsigned long long ull_end;
|
||||
unsigned long long addr;
|
||||
|
||||
ull_end = end;
|
||||
|
||||
/* Just invalidate over the range using the natural addresses. TLB
|
||||
miss handling will be OK (TBC). Since it's for the current process,
|
||||
either we're already in the right ASID context, or the ASIDs have
|
||||
been recycled since we were last active in which case we might just
|
||||
invalidate another processes I-cache entries : no worries, just a
|
||||
performance drop for him. */
|
||||
aligned_start = L1_CACHE_ALIGN(start);
|
||||
addr = aligned_start;
|
||||
while (addr < ull_end) {
|
||||
__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
|
||||
__asm__ __volatile__ ("nop");
|
||||
__asm__ __volatile__ ("nop");
|
||||
addr += L1_CACHE_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
/* Buffer used as the target of alloco instructions to purge data from cache
|
||||
sets by natural eviction. -- RPC */
|
||||
#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
|
||||
static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
|
||||
|
||||
static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
|
||||
{
|
||||
/* Purge all ways in a particular block of sets, specified by the base
|
||||
set number and number of sets. Can handle wrap-around, if that's
|
||||
needed. */
|
||||
|
||||
int dummy_buffer_base_set;
|
||||
unsigned long long eaddr, eaddr0, eaddr1;
|
||||
int j;
|
||||
int set_offset;
|
||||
|
||||
dummy_buffer_base_set = ((int)&dummy_alloco_area &
|
||||
cpu_data->dcache.entry_mask) >>
|
||||
cpu_data->dcache.entry_shift;
|
||||
set_offset = sets_to_purge_base - dummy_buffer_base_set;
|
||||
|
||||
for (j = 0; j < n_sets; j++, set_offset++) {
|
||||
set_offset &= (cpu_data->dcache.sets - 1);
|
||||
eaddr0 = (unsigned long long)dummy_alloco_area +
|
||||
(set_offset << cpu_data->dcache.entry_shift);
|
||||
|
||||
/*
|
||||
* Do one alloco which hits the required set per cache
|
||||
* way. For write-back mode, this will purge the #ways
|
||||
* resident lines. There's little point unrolling this
|
||||
* loop because the allocos stall more if they're too
|
||||
* close together.
|
||||
*/
|
||||
eaddr1 = eaddr0 + cpu_data->dcache.way_size *
|
||||
cpu_data->dcache.ways;
|
||||
|
||||
for (eaddr = eaddr0; eaddr < eaddr1;
|
||||
eaddr += cpu_data->dcache.way_size) {
|
||||
__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
|
||||
__asm__ __volatile__ ("synco"); /* TAKum03020 */
|
||||
}
|
||||
|
||||
eaddr1 = eaddr0 + cpu_data->dcache.way_size *
|
||||
cpu_data->dcache.ways;
|
||||
|
||||
for (eaddr = eaddr0; eaddr < eaddr1;
|
||||
eaddr += cpu_data->dcache.way_size) {
|
||||
/*
|
||||
* Load from each address. Required because
|
||||
* alloco is a NOP if the cache is write-through.
|
||||
*/
|
||||
if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
|
||||
__raw_readb((unsigned long)eaddr);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't use OCBI to invalidate the lines. That costs cycles
|
||||
* directly. If the dummy block is just left resident, it will
|
||||
* naturally get evicted as required.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge the entire contents of the dcache. The most efficient way to
|
||||
* achieve this is to use alloco instructions on a region of unused
|
||||
* memory equal in size to the cache, thereby causing the current
|
||||
* contents to be discarded by natural eviction. The alternative, namely
|
||||
* reading every tag, setting up a mapping for the corresponding page and
|
||||
* doing an OCBP for the line, would be much more expensive.
|
||||
*/
|
||||
static void sh64_dcache_purge_all(void)
|
||||
{
|
||||
|
||||
sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
|
||||
}
|
||||
|
||||
|
||||
/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
|
||||
anything else in the kernel */
|
||||
#define MAGIC_PAGE0_START 0xffffffffec000000ULL
|
||||
|
||||
/* Purge the physical page 'paddr' from the cache. It's known that any
|
||||
* cache lines requiring attention have the same page colour as the the
|
||||
* address 'eaddr'.
|
||||
*
|
||||
* This relies on the fact that the D-cache matches on physical tags when
|
||||
* no virtual tag matches. So we create an alias for the original page
|
||||
* and purge through that. (Alternatively, we could have done this by
|
||||
* switching ASID to match the original mapping and purged through that,
|
||||
* but that involves ASID switching cost + probably a TLBMISS + refill
|
||||
* anyway.)
|
||||
*/
|
||||
static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
|
||||
unsigned long eaddr)
|
||||
{
|
||||
unsigned long long magic_page_start;
|
||||
unsigned long long magic_eaddr, magic_eaddr_end;
|
||||
|
||||
magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
|
||||
|
||||
/* As long as the kernel is not pre-emptible, this doesn't need to be
|
||||
under cli/sti. */
|
||||
sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
|
||||
|
||||
magic_eaddr = magic_page_start;
|
||||
magic_eaddr_end = magic_eaddr + PAGE_SIZE;
|
||||
|
||||
while (magic_eaddr < magic_eaddr_end) {
|
||||
/* Little point in unrolling this loop - the OCBPs are blocking
|
||||
and won't go any quicker (i.e. the loop overhead is parallel
|
||||
to part of the OCBP execution.) */
|
||||
__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
|
||||
magic_eaddr += L1_CACHE_BYTES;
|
||||
}
|
||||
|
||||
sh64_teardown_dtlb_cache_slot();
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge a page given its physical start address, by creating a temporary
|
||||
* 1 page mapping and purging across that. Even if we know the virtual
|
||||
* address (& vma or mm) of the page, the method here is more elegant
|
||||
* because it avoids issues of coping with page faults on the purge
|
||||
* instructions (i.e. no special-case code required in the critical path
|
||||
* in the TLB miss handling).
|
||||
*/
|
||||
static void sh64_dcache_purge_phy_page(unsigned long paddr)
|
||||
{
|
||||
unsigned long long eaddr_start, eaddr, eaddr_end;
|
||||
int i;
|
||||
|
||||
/* As long as the kernel is not pre-emptible, this doesn't need to be
|
||||
under cli/sti. */
|
||||
eaddr_start = MAGIC_PAGE0_START;
|
||||
for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
|
||||
sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
|
||||
|
||||
eaddr = eaddr_start;
|
||||
eaddr_end = eaddr + PAGE_SIZE;
|
||||
while (eaddr < eaddr_end) {
|
||||
__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
|
||||
eaddr += L1_CACHE_BYTES;
|
||||
}
|
||||
|
||||
sh64_teardown_dtlb_cache_slot();
|
||||
eaddr_start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long end)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t entry;
|
||||
spinlock_t *ptl;
|
||||
unsigned long paddr;
|
||||
|
||||
if (!mm)
|
||||
return; /* No way to find physical address of page */
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_bad(*pgd))
|
||||
return;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
return;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||
return;
|
||||
|
||||
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
do {
|
||||
entry = *pte;
|
||||
if (pte_none(entry) || !pte_present(entry))
|
||||
continue;
|
||||
paddr = pte_val(entry) & PAGE_MASK;
|
||||
sh64_dcache_purge_coloured_phy_page(paddr, addr);
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are at least 5 choices for the implementation of this, with
|
||||
* pros (+), cons(-), comments(*):
|
||||
*
|
||||
* 1. ocbp each line in the range through the original user's ASID
|
||||
* + no lines spuriously evicted
|
||||
* - tlbmiss handling (must either handle faults on demand => extra
|
||||
* special-case code in tlbmiss critical path), or map the page in
|
||||
* advance (=> flush_tlb_range in advance to avoid multiple hits)
|
||||
* - ASID switching
|
||||
* - expensive for large ranges
|
||||
*
|
||||
* 2. temporarily map each page in the range to a special effective
|
||||
* address and ocbp through the temporary mapping; relies on the
|
||||
* fact that SH-5 OCB* always do TLB lookup and match on ptags (they
|
||||
* never look at the etags)
|
||||
* + no spurious evictions
|
||||
* - expensive for large ranges
|
||||
* * surely cheaper than (1)
|
||||
*
|
||||
* 3. walk all the lines in the cache, check the tags, if a match
|
||||
* occurs create a page mapping to ocbp the line through
|
||||
* + no spurious evictions
|
||||
* - tag inspection overhead
|
||||
* - (especially for small ranges)
|
||||
* - potential cost of setting up/tearing down page mapping for
|
||||
* every line that matches the range
|
||||
* * cost partly independent of range size
|
||||
*
|
||||
* 4. walk all the lines in the cache, check the tags, if a match
|
||||
* occurs use 4 * alloco to purge the line (+3 other probably
|
||||
* innocent victims) by natural eviction
|
||||
* + no tlb mapping overheads
|
||||
* - spurious evictions
|
||||
* - tag inspection overhead
|
||||
*
|
||||
* 5. implement like flush_cache_all
|
||||
* + no tag inspection overhead
|
||||
* - spurious evictions
|
||||
* - bad for small ranges
|
||||
*
|
||||
* (1) can be ruled out as more expensive than (2). (2) appears best
|
||||
* for small ranges. The choice between (3), (4) and (5) for large
|
||||
* ranges and the range size for the large/small boundary need
|
||||
* benchmarking to determine.
|
||||
*
|
||||
* For now use approach (2) for small ranges and (5) for large ones.
|
||||
*/
|
||||
static void sh64_dcache_purge_user_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
int n_pages = ((end - start) >> PAGE_SHIFT);
|
||||
|
||||
if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
|
||||
sh64_dcache_purge_all();
|
||||
} else {
|
||||
/* Small range, covered by a single page table page */
|
||||
start &= PAGE_MASK; /* should already be so */
|
||||
end = PAGE_ALIGN(end); /* should already be so */
|
||||
sh64_dcache_purge_user_pages(mm, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate the entire contents of both caches, after writing back to
|
||||
* memory any dirty data from the D-cache.
|
||||
*/
|
||||
static void sh5_flush_cache_all(void *unused)
|
||||
{
|
||||
sh64_dcache_purge_all();
|
||||
sh64_icache_inv_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate an entire user-address space from both caches, after
|
||||
* writing back dirty data (e.g. for shared mmap etc).
|
||||
*
|
||||
* This could be coded selectively by inspecting all the tags then
|
||||
* doing 4*alloco on any set containing a match (as for
|
||||
* flush_cache_range), but fork/exit/execve (where this is called from)
|
||||
* are expensive anyway.
|
||||
*
|
||||
* Have to do a purge here, despite the comments re I-cache below.
|
||||
* There could be odd-coloured dirty data associated with the mm still
|
||||
* in the cache - if this gets written out through natural eviction
|
||||
* after the kernel has reused the page there will be chaos.
|
||||
*
|
||||
* The mm being torn down won't ever be active again, so any Icache
|
||||
* lines tagged with its ASID won't be visible for the rest of the
|
||||
* lifetime of this ASID cycle. Before the ASID gets reused, there
|
||||
* will be a flush_cache_all. Hence we don't need to touch the
|
||||
* I-cache. This is similar to the lack of action needed in
|
||||
* flush_tlb_mm - see fault.c.
|
||||
*/
|
||||
static void sh5_flush_cache_mm(void *unused)
|
||||
{
|
||||
sh64_dcache_purge_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate (from both caches) the range [start,end) of virtual
|
||||
* addresses from the user address space specified by mm, after writing
|
||||
* back any dirty data.
|
||||
*
|
||||
* Note, 'end' is 1 byte beyond the end of the range to flush.
|
||||
*/
|
||||
static void sh5_flush_cache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
|
||||
vma = data->vma;
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
sh64_dcache_purge_user_range(vma->vm_mm, start, end);
|
||||
sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate any entries in either cache for the vma within the user
|
||||
* address space vma->vm_mm for the page starting at virtual address
|
||||
* 'eaddr'. This seems to be used primarily in breaking COW. Note,
|
||||
* the I-cache must be searched too in case the page in question is
|
||||
* both writable and being executed from (e.g. stack trampolines.)
|
||||
*
|
||||
* Note, this is called with pte lock held.
|
||||
*/
|
||||
static void sh5_flush_cache_page(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long eaddr, pfn;
|
||||
|
||||
vma = data->vma;
|
||||
eaddr = data->addr1;
|
||||
pfn = data->addr2;
|
||||
|
||||
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
sh64_icache_inv_user_page(vma, eaddr);
|
||||
}
|
||||
|
||||
static void sh5_flush_dcache_page(void *page)
|
||||
{
|
||||
sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
|
||||
wmb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the range [start,end] of kernel virtual address space from
|
||||
* the I-cache. The corresponding range must be purged from the
|
||||
* D-cache also because the SH-5 doesn't have cache snooping between
|
||||
* the caches. The addresses will be visible through the superpage
|
||||
* mapping, therefore it's guaranteed that there no cache entries for
|
||||
* the range in cache sets of the wrong colour.
|
||||
*/
|
||||
static void sh5_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
__flush_purge_region((void *)start, end);
|
||||
wmb();
|
||||
sh64_icache_inv_kernel_range(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* For the address range [start,end), write back the data from the
|
||||
* D-cache and invalidate the corresponding region of the I-cache for the
|
||||
* current process. Used to flush signal trampolines on the stack to
|
||||
* make them executable.
|
||||
*/
|
||||
static void sh5_flush_cache_sigtramp(void *vaddr)
|
||||
{
|
||||
unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
|
||||
|
||||
__flush_wback_region(vaddr, L1_CACHE_BYTES);
|
||||
wmb();
|
||||
sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
|
||||
}
|
||||
|
||||
void __init sh5_cache_init(void)
|
||||
{
|
||||
local_flush_cache_all = sh5_flush_cache_all;
|
||||
local_flush_cache_mm = sh5_flush_cache_mm;
|
||||
local_flush_cache_dup_mm = sh5_flush_cache_mm;
|
||||
local_flush_cache_page = sh5_flush_cache_page;
|
||||
local_flush_cache_range = sh5_flush_cache_range;
|
||||
local_flush_dcache_page = sh5_flush_dcache_page;
|
||||
local_flush_icache_range = sh5_flush_icache_range;
|
||||
local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
|
||||
|
||||
/* Reserve a slot for dcache colouring in the DTLB */
|
||||
dtlb_cache_slot = sh64_get_wired_dtlb_entry();
|
||||
|
||||
sh4__flush_region_init();
|
||||
}
|
195
arch/sh/mm/cache-sh7705.c
Normal file
195
arch/sh/mm/cache-sh7705.c
Normal file
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-sh7705.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000 Niibe Yutaka
|
||||
* Copyright (C) 2004 Alex Song
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* The 32KB cache on the SH7705 suffers from the same synonym problem
|
||||
* as SH4 CPUs
|
||||
*/
|
||||
static inline void cache_wback_all(void)
|
||||
{
|
||||
unsigned long ways, waysize, addrstart;
|
||||
|
||||
ways = current_cpu_data.dcache.ways;
|
||||
waysize = current_cpu_data.dcache.sets;
|
||||
waysize <<= current_cpu_data.dcache.entry_shift;
|
||||
|
||||
addrstart = CACHE_OC_ADDRESS_ARRAY;
|
||||
|
||||
do {
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = addrstart;
|
||||
addr < addrstart + waysize;
|
||||
addr += current_cpu_data.dcache.linesz) {
|
||||
unsigned long data;
|
||||
int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
|
||||
|
||||
data = __raw_readl(addr);
|
||||
|
||||
if ((data & v) == v)
|
||||
__raw_writel(data & ~v, addr);
|
||||
|
||||
}
|
||||
|
||||
addrstart += current_cpu_data.dcache.way_incr;
|
||||
} while (--ways);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the range of D-cache, and purge the I-cache.
|
||||
*
|
||||
* Called from kernel/module.c:sys_init_module and routine for a.out format.
|
||||
*/
|
||||
static void sh7705_flush_icache_range(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long start, end;
|
||||
|
||||
start = data->addr1;
|
||||
end = data->addr2;
|
||||
|
||||
__flush_wback_region((void *)start, end - start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Writeback&Invalidate the D-cache of the page
|
||||
*/
|
||||
static void __flush_dcache_page(unsigned long phys)
|
||||
{
|
||||
unsigned long ways, waysize, addrstart;
|
||||
unsigned long flags;
|
||||
|
||||
phys |= SH_CACHE_VALID;
|
||||
|
||||
/*
|
||||
* Here, phys is the physical address of the page. We check all the
|
||||
* tags in the cache for those with the same page number as this page
|
||||
* (by masking off the lowest 2 bits of the 19-bit tag; these bits are
|
||||
* derived from the offset within in the 4k page). Matching valid
|
||||
* entries are invalidated.
|
||||
*
|
||||
* Since 2 bits of the cache index are derived from the virtual page
|
||||
* number, knowing this would reduce the number of cache entries to be
|
||||
* searched by a factor of 4. However this function exists to deal with
|
||||
* potential cache aliasing, therefore the optimisation is probably not
|
||||
* possible.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
ways = current_cpu_data.dcache.ways;
|
||||
waysize = current_cpu_data.dcache.sets;
|
||||
waysize <<= current_cpu_data.dcache.entry_shift;
|
||||
|
||||
addrstart = CACHE_OC_ADDRESS_ARRAY;
|
||||
|
||||
do {
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = addrstart;
|
||||
addr < addrstart + waysize;
|
||||
addr += current_cpu_data.dcache.linesz) {
|
||||
unsigned long data;
|
||||
|
||||
data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
|
||||
if (data == phys) {
|
||||
data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
|
||||
__raw_writel(data, addr);
|
||||
}
|
||||
}
|
||||
|
||||
addrstart += current_cpu_data.dcache.way_incr;
|
||||
} while (--ways);
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back & invalidate the D-cache of the page.
|
||||
* (To avoid "alias" issues)
|
||||
*/
|
||||
static void sh7705_flush_dcache_page(void *arg)
|
||||
{
|
||||
struct page *page = arg;
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
else
|
||||
__flush_dcache_page(__pa(page_address(page)));
|
||||
}
|
||||
|
||||
static void sh7705_flush_cache_all(void *args)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
cache_wback_all();
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back and invalidate I/D-caches for the page.
|
||||
*
|
||||
* ADDRESS: Virtual Address (U0 address)
|
||||
*/
|
||||
static void sh7705_flush_cache_page(void *args)
|
||||
{
|
||||
struct flusher_data *data = args;
|
||||
unsigned long pfn = data->addr2;
|
||||
|
||||
__flush_dcache_page(pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called when a page-cache page is about to be mapped into a
|
||||
* user process' address space. It offers an opportunity for a
|
||||
* port to ensure d-cache/i-cache coherency if necessary.
|
||||
*
|
||||
* Not entirely sure why this is necessary on SH3 with 32K cache but
|
||||
* without it we get occasional "Memory fault" when loading a program.
|
||||
*/
|
||||
static void sh7705_flush_icache_page(void *page)
|
||||
{
|
||||
__flush_purge_region(page_address(page), PAGE_SIZE);
|
||||
}
|
||||
|
||||
void __init sh7705_cache_init(void)
|
||||
{
|
||||
local_flush_icache_range = sh7705_flush_icache_range;
|
||||
local_flush_dcache_page = sh7705_flush_dcache_page;
|
||||
local_flush_cache_all = sh7705_flush_cache_all;
|
||||
local_flush_cache_mm = sh7705_flush_cache_all;
|
||||
local_flush_cache_dup_mm = sh7705_flush_cache_all;
|
||||
local_flush_cache_range = sh7705_flush_cache_all;
|
||||
local_flush_cache_page = sh7705_flush_cache_page;
|
||||
local_flush_icache_page = sh7705_flush_icache_page;
|
||||
}
|
44
arch/sh/mm/cache-shx3.c
Normal file
44
arch/sh/mm/cache-shx3.c
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
|
||||
*
|
||||
* Copyright (C) 2010 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
|
||||
#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
|
||||
|
||||
void __init shx3_cache_init(void)
|
||||
{
|
||||
unsigned int ccr;
|
||||
|
||||
ccr = __raw_readl(SH_CCR);
|
||||
|
||||
/*
|
||||
* If we've got cache aliases, resolve them in hardware.
|
||||
*/
|
||||
if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
|
||||
ccr |= CCR_CACHE_SNM;
|
||||
|
||||
boot_cpu_data.icache.n_aliases = 0;
|
||||
boot_cpu_data.dcache.n_aliases = 0;
|
||||
|
||||
pr_info("Enabling hardware synonym avoidance\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Broadcast I-cache block invalidations by default.
|
||||
*/
|
||||
ccr |= CCR_CACHE_IBE;
|
||||
#endif
|
||||
|
||||
writel_uncached(ccr, SH_CCR);
|
||||
}
|
356
arch/sh/mm/cache.c
Normal file
356
arch/sh/mm/cache.c
Normal file
|
@ -0,0 +1,356 @@
|
|||
/*
|
||||
* arch/sh/mm/cache.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2010 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void (*local_flush_cache_all)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_mm)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_range)(void *args) = cache_noop;
|
||||
void (*local_flush_dcache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_icache_range)(void *args) = cache_noop;
|
||||
void (*local_flush_icache_page)(void *args) = cache_noop;
|
||||
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
|
||||
|
||||
void (*__flush_wback_region)(void *start, int size);
|
||||
EXPORT_SYMBOL(__flush_wback_region);
|
||||
void (*__flush_purge_region)(void *start, int size);
|
||||
EXPORT_SYMBOL(__flush_purge_region);
|
||||
void (*__flush_invalidate_region)(void *start, int size);
|
||||
EXPORT_SYMBOL(__flush_invalidate_region);
|
||||
|
||||
static inline void noop__flush_region(void *start, int size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* It's possible that this gets called early on when IRQs are
|
||||
* still disabled due to ioremapping by the boot CPU, so don't
|
||||
* even attempt IPIs unless there are other CPUs online.
|
||||
*/
|
||||
if (num_online_cpus() > 1)
|
||||
smp_call_function(func, info, wait);
|
||||
|
||||
func(info);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||
test_bit(PG_dcache_clean, &page->flags)) {
|
||||
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(vto, src, len);
|
||||
kunmap_coherent(vto);
|
||||
} else {
|
||||
memcpy(dst, src, len);
|
||||
if (boot_cpu_data.dcache.n_aliases)
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||
test_bit(PG_dcache_clean, &page->flags)) {
|
||||
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(dst, vfrom, len);
|
||||
kunmap_coherent(vfrom);
|
||||
} else {
|
||||
memcpy(dst, src, len);
|
||||
if (boot_cpu_data.dcache.n_aliases)
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
void *vfrom, *vto;
|
||||
|
||||
vto = kmap_atomic(to);
|
||||
|
||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
|
||||
test_bit(PG_dcache_clean, &from->flags)) {
|
||||
vfrom = kmap_coherent(from, vaddr);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_coherent(vfrom);
|
||||
} else {
|
||||
vfrom = kmap_atomic(from);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_atomic(vfrom);
|
||||
}
|
||||
|
||||
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
|
||||
(vma->vm_flags & VM_EXEC))
|
||||
__flush_purge_region(vto, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(vto);
|
||||
/* Make sure this page is cleared on other CPU's too before using it */
|
||||
smp_wmb();
|
||||
}
|
||||
EXPORT_SYMBOL(copy_user_highpage);
|
||||
|
||||
void clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *kaddr = kmap_atomic(page);
|
||||
|
||||
clear_page(kaddr);
|
||||
|
||||
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
|
||||
__flush_purge_region(kaddr, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
EXPORT_SYMBOL(clear_user_highpage);
|
||||
|
||||
void __update_cache(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t pte)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
if (!boot_cpu_data.dcache.n_aliases)
|
||||
return;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (pfn_valid(pfn)) {
|
||||
int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
|
||||
if (dirty)
|
||||
__flush_purge_region(page_address(page), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
void __flush_anon_page(struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) page_address(page);
|
||||
|
||||
if (pages_do_alias(addr, vmaddr)) {
|
||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||
test_bit(PG_dcache_clean, &page->flags)) {
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_coherent(page, vmaddr);
|
||||
/* XXX.. For now kunmap_coherent() does a purge */
|
||||
/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
|
||||
kunmap_coherent(kaddr);
|
||||
} else
|
||||
__flush_purge_region((void *)addr, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
void flush_cache_all(void)
|
||||
{
|
||||
cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_all);
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (boot_cpu_data.dcache.n_aliases == 0)
|
||||
return;
|
||||
|
||||
cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
|
||||
}
|
||||
|
||||
void flush_cache_dup_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (boot_cpu_data.dcache.n_aliases == 0)
|
||||
return;
|
||||
|
||||
cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = vma;
|
||||
data.addr1 = addr;
|
||||
data.addr2 = pfn;
|
||||
|
||||
cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = vma;
|
||||
data.addr1 = start;
|
||||
data.addr2 = end;
|
||||
|
||||
cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_range);
|
||||
|
||||
void flush_dcache_page(struct page *page)
|
||||
{
|
||||
cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct flusher_data data;
|
||||
|
||||
data.vma = NULL;
|
||||
data.addr1 = start;
|
||||
data.addr2 = end;
|
||||
|
||||
cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
/* Nothing uses the VMA, so just pass the struct page along */
|
||||
cacheop_on_each_cpu(local_flush_icache_page, page, 1);
|
||||
}
|
||||
|
||||
void flush_cache_sigtramp(unsigned long address)
|
||||
{
|
||||
cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
|
||||
}
|
||||
|
||||
static void compute_alias(struct cache_info *c)
|
||||
{
|
||||
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
|
||||
c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
|
||||
}
|
||||
|
||||
static void __init emit_cache_params(void)
|
||||
{
|
||||
printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
|
||||
boot_cpu_data.icache.ways,
|
||||
boot_cpu_data.icache.sets,
|
||||
boot_cpu_data.icache.way_incr);
|
||||
printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
|
||||
boot_cpu_data.icache.entry_mask,
|
||||
boot_cpu_data.icache.alias_mask,
|
||||
boot_cpu_data.icache.n_aliases);
|
||||
printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
|
||||
boot_cpu_data.dcache.ways,
|
||||
boot_cpu_data.dcache.sets,
|
||||
boot_cpu_data.dcache.way_incr);
|
||||
printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
|
||||
boot_cpu_data.dcache.entry_mask,
|
||||
boot_cpu_data.dcache.alias_mask,
|
||||
boot_cpu_data.dcache.n_aliases);
|
||||
|
||||
/*
|
||||
* Emit Secondary Cache parameters if the CPU has a probed L2.
|
||||
*/
|
||||
if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
|
||||
printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
|
||||
boot_cpu_data.scache.ways,
|
||||
boot_cpu_data.scache.sets,
|
||||
boot_cpu_data.scache.way_incr);
|
||||
printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
|
||||
boot_cpu_data.scache.entry_mask,
|
||||
boot_cpu_data.scache.alias_mask,
|
||||
boot_cpu_data.scache.n_aliases);
|
||||
}
|
||||
}
|
||||
|
||||
void __init cpu_cache_init(void)
|
||||
{
|
||||
unsigned int cache_disabled = 0;
|
||||
|
||||
#ifdef SH_CCR
|
||||
cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
|
||||
#endif
|
||||
|
||||
compute_alias(&boot_cpu_data.icache);
|
||||
compute_alias(&boot_cpu_data.dcache);
|
||||
compute_alias(&boot_cpu_data.scache);
|
||||
|
||||
__flush_wback_region = noop__flush_region;
|
||||
__flush_purge_region = noop__flush_region;
|
||||
__flush_invalidate_region = noop__flush_region;
|
||||
|
||||
/*
|
||||
* No flushing is necessary in the disabled cache case so we can
|
||||
* just keep the noop functions in local_flush_..() and __flush_..()
|
||||
*/
|
||||
if (unlikely(cache_disabled))
|
||||
goto skip;
|
||||
|
||||
if (boot_cpu_data.family == CPU_FAMILY_SH2) {
|
||||
extern void __weak sh2_cache_init(void);
|
||||
|
||||
sh2_cache_init();
|
||||
}
|
||||
|
||||
if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
|
||||
extern void __weak sh2a_cache_init(void);
|
||||
|
||||
sh2a_cache_init();
|
||||
}
|
||||
|
||||
if (boot_cpu_data.family == CPU_FAMILY_SH3) {
|
||||
extern void __weak sh3_cache_init(void);
|
||||
|
||||
sh3_cache_init();
|
||||
|
||||
if ((boot_cpu_data.type == CPU_SH7705) &&
|
||||
(boot_cpu_data.dcache.sets == 512)) {
|
||||
extern void __weak sh7705_cache_init(void);
|
||||
|
||||
sh7705_cache_init();
|
||||
}
|
||||
}
|
||||
|
||||
if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
|
||||
(boot_cpu_data.family == CPU_FAMILY_SH4A) ||
|
||||
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
|
||||
extern void __weak sh4_cache_init(void);
|
||||
|
||||
sh4_cache_init();
|
||||
|
||||
if ((boot_cpu_data.type == CPU_SH7786) ||
|
||||
(boot_cpu_data.type == CPU_SHX3)) {
|
||||
extern void __weak shx3_cache_init(void);
|
||||
|
||||
shx3_cache_init();
|
||||
}
|
||||
}
|
||||
|
||||
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
|
||||
extern void __weak sh5_cache_init(void);
|
||||
|
||||
sh5_cache_init();
|
||||
}
|
||||
|
||||
skip:
|
||||
emit_cache_params();
|
||||
}
|
159
arch/sh/mm/consistent.c
Normal file
159
arch/sh/mm/consistent.c
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* arch/sh/mm/consistent.c
|
||||
*
|
||||
* Copyright (C) 2004 - 2007 Paul Mundt
|
||||
*
|
||||
* Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
static int __init dma_init(void)
|
||||
{
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(dma_init);
|
||||
|
||||
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret, *ret_nocache;
|
||||
int order = get_order(size);
|
||||
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
ret = (void *)__get_free_pages(gfp, order);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Pages from the page allocator may have data present in
|
||||
* cache. So flush the cache before using uncached memory.
|
||||
*/
|
||||
dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
|
||||
if (!ret_nocache) {
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
|
||||
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
|
||||
return ret_nocache;
|
||||
}
|
||||
|
||||
void dma_generic_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
unsigned long pfn = dma_handle >> PAGE_SHIFT;
|
||||
int k;
|
||||
|
||||
for (k = 0; k < (1 << order); k++)
|
||||
__free_pages(pfn_to_page(pfn + k), 0);
|
||||
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
addr = __in_29bit_mode() ?
|
||||
(void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_FROM_DEVICE: /* invalidate only */
|
||||
__flush_invalidate_region(addr, size);
|
||||
break;
|
||||
case DMA_TO_DEVICE: /* writeback only */
|
||||
__flush_wback_region(addr, size);
|
||||
break;
|
||||
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
||||
__flush_purge_region(addr, size);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
static int __init memchunk_setup(char *str)
|
||||
{
|
||||
return 1; /* accept anything that begins with "memchunk." */
|
||||
}
|
||||
__setup("memchunk.", memchunk_setup);
|
||||
|
||||
static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
|
||||
{
|
||||
char *p = boot_command_line;
|
||||
int k = strlen(name);
|
||||
|
||||
while ((p = strstr(p, "memchunk."))) {
|
||||
p += 9; /* strlen("memchunk.") */
|
||||
if (!strncmp(name, p, k) && p[k] == '=') {
|
||||
p += k + 1;
|
||||
*sizep = memparse(p, NULL);
|
||||
pr_info("%s: forcing memory chunk size to 0x%08lx\n",
|
||||
name, *sizep);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int __init platform_resource_setup_memory(struct platform_device *pdev,
|
||||
char *name, unsigned long memsize)
|
||||
{
|
||||
struct resource *r;
|
||||
dma_addr_t dma_handle;
|
||||
void *buf;
|
||||
|
||||
r = pdev->resource + pdev->num_resources - 1;
|
||||
if (r->flags) {
|
||||
pr_warning("%s: unable to find empty space for resource\n",
|
||||
name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memchunk_cmdline_override(name, &memsize);
|
||||
if (!memsize)
|
||||
return 0;
|
||||
|
||||
buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_warning("%s: unable to allocate memory\n", name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(buf, 0, memsize);
|
||||
|
||||
r->flags = IORESOURCE_MEM;
|
||||
r->start = dma_handle;
|
||||
r->end = r->start + memsize - 1;
|
||||
r->name = name;
|
||||
return 0;
|
||||
}
|
21
arch/sh/mm/extable_32.c
Normal file
21
arch/sh/mm/extable_32.c
Normal file
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* linux/arch/sh/mm/extable.c
|
||||
* Taken from:
|
||||
* linux/arch/i386/mm/extable.c
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(regs->pc);
|
||||
if (fixup) {
|
||||
regs->pc = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
82
arch/sh/mm/extable_64.c
Normal file
82
arch/sh/mm/extable_64.c
Normal file
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* arch/sh/mm/extable_64.c
|
||||
*
|
||||
* Copyright (C) 2003 Richard Curnow
|
||||
* Copyright (C) 2003, 2004 Paul Mundt
|
||||
*
|
||||
* Cloned from the 2.5 SH version..
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
|
||||
extern void __copy_user_fixup(void);
|
||||
|
||||
static const struct exception_table_entry __copy_user_fixup_ex = {
|
||||
.fixup = (unsigned long)&__copy_user_fixup,
|
||||
};
|
||||
|
||||
/*
|
||||
* Some functions that may trap due to a bad user-mode address have too
|
||||
* many loads and stores in them to make it at all practical to label
|
||||
* each one and put them all in the main exception table.
|
||||
*
|
||||
* In particular, the fast memcpy routine is like this. It's fix-up is
|
||||
* just to fall back to a slow byte-at-a-time copy, which is handled the
|
||||
* conventional way. So it's functionally OK to just handle any trap
|
||||
* occurring in the fast memcpy with that fixup.
|
||||
*/
|
||||
static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
|
||||
{
|
||||
if ((addr >= (unsigned long)©_user_memcpy) &&
|
||||
(addr <= (unsigned long)©_user_memcpy_end))
|
||||
return &__copy_user_fixup_ex;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Simple binary search */
|
||||
const struct exception_table_entry *
|
||||
search_extable(const struct exception_table_entry *first,
|
||||
const struct exception_table_entry *last,
|
||||
unsigned long value)
|
||||
{
|
||||
const struct exception_table_entry *mid;
|
||||
|
||||
mid = check_exception_ranges(value);
|
||||
if (mid)
|
||||
return mid;
|
||||
|
||||
while (first <= last) {
|
||||
long diff;
|
||||
|
||||
mid = (last - first) / 2 + first;
|
||||
diff = mid->insn - value;
|
||||
if (diff == 0)
|
||||
return mid;
|
||||
else if (diff < 0)
|
||||
first = mid+1;
|
||||
else
|
||||
last = mid-1;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(regs->pc);
|
||||
if (fixup) {
|
||||
regs->pc = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
519
arch/sh/mm/fault.c
Normal file
519
arch/sh/mm/fault.c
Normal file
|
@ -0,0 +1,519 @@
|
|||
/*
|
||||
* Page fault handler for SH with an MMU.
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2003 - 2012 Paul Mundt
|
||||
*
|
||||
* Based on linux/arch/i386/mm/fault.c:
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <asm/io_trapped.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
static inline int notify_page_fault(struct pt_regs *regs, int trap)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (kprobes_built_in() && !user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, trap))
|
||||
ret = 1;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
info.si_signo = si_signo;
|
||||
info.si_errno = 0;
|
||||
info.si_code = si_code;
|
||||
info.si_addr = (void __user *)address;
|
||||
|
||||
force_sig_info(si_signo, &info, tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is useful to dump out the page tables associated with
|
||||
* 'addr' in mm 'mm'.
|
||||
*/
|
||||
static void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
||||
if (mm) {
|
||||
pgd = mm->pgd;
|
||||
} else {
|
||||
pgd = get_TTB();
|
||||
|
||||
if (unlikely(!pgd))
|
||||
pgd = swapper_pg_dir;
|
||||
}
|
||||
|
||||
printk(KERN_ALERT "pgd = %p\n", pgd);
|
||||
pgd += pgd_index(addr);
|
||||
printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
|
||||
(u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
break;
|
||||
|
||||
if (pgd_bad(*pgd)) {
|
||||
printk("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (PTRS_PER_PUD != 1)
|
||||
printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
|
||||
(u64)pud_val(*pud));
|
||||
|
||||
if (pud_none(*pud))
|
||||
break;
|
||||
|
||||
if (pud_bad(*pud)) {
|
||||
printk("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (PTRS_PER_PMD != 1)
|
||||
printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
|
||||
(u64)pmd_val(*pmd));
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
break;
|
||||
|
||||
if (pmd_bad(*pmd)) {
|
||||
printk("(bad)");
|
||||
break;
|
||||
}
|
||||
|
||||
/* We must not map this if we have highmem enabled */
|
||||
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
|
||||
break;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
|
||||
(u64)pte_val(*pte));
|
||||
} while (0);
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
unsigned index = pgd_index(address);
|
||||
pgd_t *pgd_k;
|
||||
pud_t *pud, *pud_k;
|
||||
pmd_t *pmd, *pmd_k;
|
||||
|
||||
pgd += index;
|
||||
pgd_k = init_mm.pgd + index;
|
||||
|
||||
if (!pgd_present(*pgd_k))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
pud_k = pud_offset(pgd_k, address);
|
||||
if (!pud_present(*pud_k))
|
||||
return NULL;
|
||||
|
||||
if (!pud_present(*pud))
|
||||
set_pud(pud, *pud_k);
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
pmd_k = pmd_offset(pud_k, address);
|
||||
if (!pmd_present(*pmd_k))
|
||||
return NULL;
|
||||
|
||||
if (!pmd_present(*pmd))
|
||||
set_pmd(pmd, *pmd_k);
|
||||
else {
|
||||
/*
|
||||
* The page tables are fully synchronised so there must
|
||||
* be another reason for the fault. Return NULL here to
|
||||
* signal that we have not taken care of the fault.
|
||||
*/
|
||||
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pmd_k;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SH_STORE_QUEUES
|
||||
#define __FAULT_ADDR_LIMIT P3_ADDR_MAX
|
||||
#else
|
||||
#define __FAULT_ADDR_LIMIT VMALLOC_END
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Handle a fault on the vmalloc or module mapping area
|
||||
*/
|
||||
static noinline int vmalloc_fault(unsigned long address)
|
||||
{
|
||||
pgd_t *pgd_k;
|
||||
pmd_t *pmd_k;
|
||||
pte_t *pte_k;
|
||||
|
||||
/* Make sure we are in vmalloc/module/P3 area: */
|
||||
if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*
|
||||
* Do _not_ use "current" here. We might be inside
|
||||
* an interrupt in the middle of a task switch..
|
||||
*/
|
||||
pgd_k = get_TTB();
|
||||
pmd_k = vmalloc_sync_one(pgd_k, address);
|
||||
if (!pmd_k)
|
||||
return -1;
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, address);
|
||||
if (!pte_present(*pte_k))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
show_fault_oops(struct pt_regs *regs, unsigned long address)
|
||||
{
|
||||
if (!oops_may_print())
|
||||
return;
|
||||
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel ");
|
||||
if (address < PAGE_SIZE)
|
||||
printk(KERN_CONT "NULL pointer dereference");
|
||||
else
|
||||
printk(KERN_CONT "paging request");
|
||||
|
||||
printk(KERN_CONT " at %08lx\n", address);
|
||||
printk(KERN_ALERT "PC:");
|
||||
printk_address(regs->pc, 1);
|
||||
|
||||
show_pte(NULL, address);
|
||||
}
|
||||
|
||||
static noinline void
|
||||
no_context(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
/* Are we prepared to handle this kernel fault? */
|
||||
if (fixup_exception(regs))
|
||||
return;
|
||||
|
||||
if (handle_trapped_io(regs, address))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
bust_spinlocks(1);
|
||||
|
||||
show_fault_oops(regs, address);
|
||||
|
||||
die("Oops", regs, error_code);
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
static void
|
||||
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, int si_code)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if (user_mode(regs)) {
|
||||
/*
|
||||
* It's possible to have interrupts off here:
|
||||
*/
|
||||
local_irq_enable();
|
||||
|
||||
force_sig_info_fault(SIGSEGV, si_code, address, tsk);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
no_context(regs, error_code, address);
|
||||
}
|
||||
|
||||
static noinline void
|
||||
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
|
||||
}
|
||||
|
||||
static void
|
||||
__bad_area(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, int si_code)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
__bad_area_nosemaphore(regs, error_code, address, si_code);
|
||||
}
|
||||
|
||||
static noinline void
|
||||
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
||||
{
|
||||
__bad_area(regs, error_code, address, SEGV_MAPERR);
|
||||
}
|
||||
|
||||
static noinline void
|
||||
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
__bad_area(regs, error_code, address, SEGV_ACCERR);
|
||||
}
|
||||
|
||||
static void
|
||||
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs))
|
||||
no_context(regs, error_code, address);
|
||||
|
||||
force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
|
||||
}
|
||||
|
||||
static noinline int
|
||||
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, unsigned int fault)
|
||||
{
|
||||
/*
|
||||
* Pagefault was interrupted by SIGKILL. We have no reason to
|
||||
* continue pagefault.
|
||||
*/
|
||||
if (fatal_signal_pending(current)) {
|
||||
if (!(fault & VM_FAULT_RETRY))
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (!user_mode(regs))
|
||||
no_context(regs, error_code, address);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!(fault & VM_FAULT_ERROR))
|
||||
return 0;
|
||||
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs)) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
no_context(regs, error_code, address);
|
||||
return 1;
|
||||
}
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
/*
|
||||
* We ran out of memory, call the OOM killer, and return the
|
||||
* userspace (which will retry the fault, or kill us if we got
|
||||
* oom-killed):
|
||||
*/
|
||||
pagefault_out_of_memory();
|
||||
} else {
|
||||
if (fault & VM_FAULT_SIGBUS)
|
||||
do_sigbus(regs, error_code, address);
|
||||
else if (fault & VM_FAULT_SIGSEGV)
|
||||
bad_area(regs, error_code, address);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int access_error(int error_code, struct vm_area_struct *vma)
|
||||
{
|
||||
if (error_code & FAULT_CODE_WRITE) {
|
||||
/* write, present and write, not present: */
|
||||
if (unlikely(!(vma->vm_flags & VM_WRITE)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ITLB miss on NX page */
|
||||
if (unlikely((error_code & FAULT_CODE_ITLB) &&
|
||||
!(vma->vm_flags & VM_EXEC)))
|
||||
return 1;
|
||||
|
||||
/* read, not present: */
|
||||
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fault_in_kernel_space(unsigned long address)
|
||||
{
|
||||
return address >= TASK_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine handles page faults. It determines the address,
|
||||
* and the problem, and then passes it off to one of the appropriate
|
||||
* routines.
|
||||
*/
|
||||
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||
unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
unsigned long vec;
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct * vma;
|
||||
int fault;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
vec = lookup_exception_vector();
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*/
|
||||
if (unlikely(fault_in_kernel_space(address))) {
|
||||
if (vmalloc_fault(address) >= 0)
|
||||
return;
|
||||
if (notify_page_fault(regs, vec))
|
||||
return;
|
||||
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(notify_page_fault(regs, vec)))
|
||||
return;
|
||||
|
||||
/* Only enable interrupts if they were on before the fault */
|
||||
if ((regs->sr & SR_IMASK) != SR_IMASK)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
* in an atomic region then we must not take the fault:
|
||||
*/
|
||||
if (unlikely(in_atomic() || !mm)) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if (unlikely(!vma)) {
|
||||
bad_area(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
if (likely(vma->vm_start <= address))
|
||||
goto good_area;
|
||||
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
|
||||
bad_area(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
if (unlikely(expand_stack(vma, address))) {
|
||||
bad_area(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
if (unlikely(access_error(error_code, vma))) {
|
||||
bad_area_access_error(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
set_thread_fault_code(error_code);
|
||||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (error_code & FAULT_CODE_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
|
||||
if (mm_fault_error(regs, error_code, address, fault))
|
||||
return;
|
||||
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||
regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/*
|
||||
* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
110
arch/sh/mm/flush-sh4.c
Normal file
110
arch/sh/mm/flush-sh4.c
Normal file
|
@ -0,0 +1,110 @@
|
|||
#include <linux/mm.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cache_insns.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches, but not invalidate them.
|
||||
*
|
||||
* START: Virtual Address (U0, P1, or P3)
|
||||
* SIZE: Size of the region.
|
||||
*/
|
||||
static void sh4__flush_wback_region(void *start, int size)
|
||||
{
|
||||
reg_size_t aligned_start, v, cnt, end;
|
||||
|
||||
aligned_start = register_align(start);
|
||||
v = aligned_start & ~(L1_CACHE_BYTES-1);
|
||||
end = (aligned_start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
cnt = (end - v) / L1_CACHE_BYTES;
|
||||
|
||||
while (cnt >= 8) {
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
cnt -= 8;
|
||||
}
|
||||
|
||||
while (cnt) {
|
||||
__ocbwb(v); v += L1_CACHE_BYTES;
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the dirty D-caches and invalidate them.
|
||||
*
|
||||
* START: Virtual Address (U0, P1, or P3)
|
||||
* SIZE: Size of the region.
|
||||
*/
|
||||
static void sh4__flush_purge_region(void *start, int size)
|
||||
{
|
||||
reg_size_t aligned_start, v, cnt, end;
|
||||
|
||||
aligned_start = register_align(start);
|
||||
v = aligned_start & ~(L1_CACHE_BYTES-1);
|
||||
end = (aligned_start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
cnt = (end - v) / L1_CACHE_BYTES;
|
||||
|
||||
while (cnt >= 8) {
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
cnt -= 8;
|
||||
}
|
||||
while (cnt) {
|
||||
__ocbp(v); v += L1_CACHE_BYTES;
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* No write back please
|
||||
*/
|
||||
static void sh4__flush_invalidate_region(void *start, int size)
|
||||
{
|
||||
reg_size_t aligned_start, v, cnt, end;
|
||||
|
||||
aligned_start = register_align(start);
|
||||
v = aligned_start & ~(L1_CACHE_BYTES-1);
|
||||
end = (aligned_start + size + L1_CACHE_BYTES-1)
|
||||
& ~(L1_CACHE_BYTES-1);
|
||||
cnt = (end - v) / L1_CACHE_BYTES;
|
||||
|
||||
while (cnt >= 8) {
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
cnt -= 8;
|
||||
}
|
||||
|
||||
while (cnt) {
|
||||
__ocbi(v); v += L1_CACHE_BYTES;
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
void __init sh4__flush_region_init(void)
|
||||
{
|
||||
__flush_wback_region = sh4__flush_wback_region;
|
||||
__flush_invalidate_region = sh4__flush_invalidate_region;
|
||||
__flush_purge_region = sh4__flush_purge_region;
|
||||
}
|
275
arch/sh/mm/gup.c
Normal file
275
arch/sh/mm/gup.c
Normal file
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Lockless get_user_pages_fast for SuperH
|
||||
*
|
||||
* Copyright (C) 2009 - 2010 Paul Mundt
|
||||
*
|
||||
* Cloned from the x86 and PowerPC versions, by:
|
||||
*
|
||||
* Copyright (C) 2008 Nick Piggin
|
||||
* Copyright (C) 2008 Novell Inc.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static inline pte_t gup_get_pte(pte_t *ptep)
|
||||
{
|
||||
#ifndef CONFIG_X2TLB
|
||||
return ACCESS_ONCE(*ptep);
|
||||
#else
|
||||
/*
|
||||
* With get_user_pages_fast, we walk down the pagetables without
|
||||
* taking any locks. For this we would like to load the pointers
|
||||
* atomically, but that is not possible with 64-bit PTEs. What
|
||||
* we do have is the guarantee that a pte will only either go
|
||||
* from not present to present, or present to not present or both
|
||||
* -- it will not switch to a completely different present page
|
||||
* without a TLB flush in between; something that we are blocking
|
||||
* by holding interrupts off.
|
||||
*
|
||||
* Setting ptes from not present to present goes:
|
||||
* ptep->pte_high = h;
|
||||
* smp_wmb();
|
||||
* ptep->pte_low = l;
|
||||
*
|
||||
* And present to not present goes:
|
||||
* ptep->pte_low = 0;
|
||||
* smp_wmb();
|
||||
* ptep->pte_high = 0;
|
||||
*
|
||||
* We must ensure here that the load of pte_low sees l iff pte_high
|
||||
* sees h. We load pte_high *after* loading pte_low, which ensures we
|
||||
* don't see an older value of pte_high. *Then* we recheck pte_low,
|
||||
* which ensures that we haven't picked up a changed pte high. We might
|
||||
* have got rubbish values from pte_low and pte_high, but we are
|
||||
* guaranteed that pte_low will not have the present bit set *unless*
|
||||
* it is 'l'. And get_user_pages_fast only operates on present ptes, so
|
||||
* we're safe.
|
||||
*
|
||||
* gup_get_pte should not be used or copied outside gup.c without being
|
||||
* very careful -- it does not atomically load the pte or anything that
|
||||
* is likely to be useful for you.
|
||||
*/
|
||||
pte_t pte;
|
||||
|
||||
retry:
|
||||
pte.pte_low = ptep->pte_low;
|
||||
smp_rmb();
|
||||
pte.pte_high = ptep->pte_high;
|
||||
smp_rmb();
|
||||
if (unlikely(pte.pte_low != ptep->pte_low))
|
||||
goto retry;
|
||||
|
||||
return pte;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
* register pressure.
|
||||
*/
|
||||
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
u64 mask, result;
|
||||
pte_t *ptep;
|
||||
|
||||
#ifdef CONFIG_X2TLB
|
||||
result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
|
||||
if (write)
|
||||
result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
|
||||
#elif defined(CONFIG_SUPERH64)
|
||||
result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE;
|
||||
#else
|
||||
result = _PAGE_PRESENT | _PAGE_USER;
|
||||
if (write)
|
||||
result |= _PAGE_RW;
|
||||
#endif
|
||||
|
||||
mask = result | _PAGE_SPECIAL;
|
||||
|
||||
ptep = pte_offset_map(&pmd, addr);
|
||||
do {
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
if ((pte_val(pte) & mask) != result) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
get_page(page);
|
||||
__flush_anon_page(page, addr);
|
||||
flush_dcache_page(page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
pte_unmap(ptep - 1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
if (!gup_pte_range(pmd, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
unsigned long flags;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
||||
(void __user *)start, len)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This doesn't prevent pagetable teardown, but does prevent
|
||||
* the pagetables and pages from being freed.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
break;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
break;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
||||
* If not successful, it will fall back to taking the lock and
|
||||
* calling get_user_pages().
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
|
||||
end = start + len;
|
||||
if (end < start)
|
||||
goto slow_irqon;
|
||||
|
||||
local_irq_disable();
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_enable();
|
||||
|
||||
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||
return nr;
|
||||
|
||||
{
|
||||
int ret;
|
||||
|
||||
slow:
|
||||
local_irq_enable();
|
||||
slow_irqon:
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
if (ret < 0)
|
||||
ret = nr;
|
||||
else
|
||||
ret += nr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
78
arch/sh/mm/hugetlbpage.c
Normal file
78
arch/sh/mm/hugetlbpage.c
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* arch/sh/mm/hugetlbpage.c
|
||||
*
|
||||
* SuperH HugeTLB page support.
|
||||
*
|
||||
* Cloned from sparc64 by Paul Mundt.
|
||||
*
|
||||
* Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <asm/mman.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd) {
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_alloc_map(mm, NULL, pmd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
534
arch/sh/mm/init.c
Normal file
534
arch/sh/mm/init.c
Normal file
|
@ -0,0 +1,534 @@
|
|||
/*
|
||||
* linux/arch/sh/mm/init.c
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2011 Paul Mundt
|
||||
*
|
||||
* Based on linux/arch/i386/mm/init.c:
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/sizes.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
void __init generic_mem_init(void)
|
||||
{
|
||||
memblock_add(__MEMORY_START, __MEMORY_SIZE);
|
||||
}
|
||||
|
||||
void __init __weak plat_mem_setup(void)
|
||||
{
|
||||
/* Nothing to see here, move along. */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static pte_t *__get_pte_phys(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
pgd_ERROR(*pgd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pud = pud_alloc(NULL, pgd, addr);
|
||||
if (unlikely(!pud)) {
|
||||
pud_ERROR(*pud);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pmd = pmd_alloc(NULL, pud, addr);
|
||||
if (unlikely(!pmd)) {
|
||||
pmd_ERROR(*pmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pte_offset_kernel(pmd, addr);
|
||||
}
|
||||
|
||||
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = __get_pte_phys(addr);
|
||||
if (!pte_none(*pte)) {
|
||||
pte_ERROR(*pte);
|
||||
return;
|
||||
}
|
||||
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
local_flush_tlb_one(get_asid(), addr);
|
||||
|
||||
if (pgprot_val(prot) & _PAGE_WIRED)
|
||||
tlb_wire_entry(NULL, addr, *pte);
|
||||
}
|
||||
|
||||
static void clear_pte_phys(unsigned long addr, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = __get_pte_phys(addr);
|
||||
|
||||
if (pgprot_val(prot) & _PAGE_WIRED)
|
||||
tlb_unwire_entry();
|
||||
|
||||
set_pte(pte, pfn_pte(0, __pgprot(0)));
|
||||
local_flush_tlb_one(get_asid(), addr);
|
||||
}
|
||||
|
||||
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
|
||||
{
|
||||
unsigned long address = __fix_to_virt(idx);
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
set_pte_phys(address, phys, prot);
|
||||
}
|
||||
|
||||
void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
|
||||
{
|
||||
unsigned long address = __fix_to_virt(idx);
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
clear_pte_phys(address, prot);
|
||||
}
|
||||
|
||||
static pmd_t * __init one_md_table_init(pud_t *pud)
|
||||
{
|
||||
if (pud_none(*pud)) {
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = alloc_bootmem_pages(PAGE_SIZE);
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
BUG_ON(pmd != pmd_offset(pud, 0));
|
||||
}
|
||||
|
||||
return pmd_offset(pud, 0);
|
||||
}
|
||||
|
||||
static pte_t * __init one_page_table_init(pmd_t *pmd)
|
||||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
pte_t *pte;
|
||||
|
||||
pte = alloc_bootmem_pages(PAGE_SIZE);
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||
}
|
||||
|
||||
return pte_offset_kernel(pmd, 0);
|
||||
}
|
||||
|
||||
static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
|
||||
unsigned long vaddr, pte_t *lastpte)
|
||||
{
|
||||
return pte;
|
||||
}
|
||||
|
||||
void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
int i, j, k;
|
||||
unsigned long vaddr;
|
||||
|
||||
vaddr = start;
|
||||
i = __pgd_offset(vaddr);
|
||||
j = __pud_offset(vaddr);
|
||||
k = __pmd_offset(vaddr);
|
||||
pgd = pgd_base + i;
|
||||
|
||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||
pud = (pud_t *)pgd;
|
||||
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
||||
pmd = one_md_table_init(pud);
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
pmd += k;
|
||||
#endif
|
||||
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
||||
pte = page_table_kmap_check(one_page_table_init(pmd),
|
||||
pmd, vaddr, pte);
|
||||
vaddr += PMD_SIZE;
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
void __init allocate_pgdat(unsigned int nid)
|
||||
{
|
||||
unsigned long start_pfn, end_pfn;
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
unsigned long phys;
|
||||
#endif
|
||||
|
||||
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
phys = __memblock_alloc_base(sizeof(struct pglist_data),
|
||||
SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
|
||||
/* Retry with all of system memory */
|
||||
if (!phys)
|
||||
phys = __memblock_alloc_base(sizeof(struct pglist_data),
|
||||
SMP_CACHE_BYTES, memblock_end_of_DRAM());
|
||||
if (!phys)
|
||||
panic("Can't allocate pgdat for node %d\n", nid);
|
||||
|
||||
NODE_DATA(nid) = __va(phys);
|
||||
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
|
||||
|
||||
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
|
||||
#endif
|
||||
|
||||
NODE_DATA(nid)->node_start_pfn = start_pfn;
|
||||
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
static void __init bootmem_init_one_node(unsigned int nid)
|
||||
{
|
||||
unsigned long total_pages, paddr;
|
||||
unsigned long end_pfn;
|
||||
struct pglist_data *p;
|
||||
|
||||
p = NODE_DATA(nid);
|
||||
|
||||
/* Nothing to do.. */
|
||||
if (!p->node_spanned_pages)
|
||||
return;
|
||||
|
||||
end_pfn = pgdat_end_pfn(p);
|
||||
|
||||
total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
|
||||
|
||||
paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
|
||||
if (!paddr)
|
||||
panic("Can't allocate bootmap for nid[%d]\n", nid);
|
||||
|
||||
init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
|
||||
|
||||
free_bootmem_with_active_regions(nid, end_pfn);
|
||||
|
||||
/*
|
||||
* XXX Handle initial reservations for the system memory node
|
||||
* only for the moment, we'll refactor this later for handling
|
||||
* reservations in other nodes.
|
||||
*/
|
||||
if (nid == 0) {
|
||||
struct memblock_region *reg;
|
||||
|
||||
/* Reserve the sections we're already using. */
|
||||
for_each_memblock(reserved, reg) {
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
sparse_memory_present_with_active_regions(nid);
|
||||
}
|
||||
|
||||
static void __init do_init_bootmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
int i;
|
||||
|
||||
/* Add active regions with valid PFNs. */
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
start_pfn = memblock_region_memory_base_pfn(reg);
|
||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
||||
__add_active_range(0, start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
/* All of system RAM sits in node 0 for the non-NUMA case */
|
||||
allocate_pgdat(0);
|
||||
node_set_online(0);
|
||||
|
||||
plat_mem_setup();
|
||||
|
||||
for_each_online_node(i)
|
||||
bootmem_init_one_node(i);
|
||||
|
||||
sparse_init();
|
||||
}
|
||||
|
||||
static void __init early_reserve_mem(void)
|
||||
{
|
||||
unsigned long start_pfn;
|
||||
u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
|
||||
u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
|
||||
|
||||
/*
|
||||
* Partially used pages are not usable - thus
|
||||
* we are rounding upwards:
|
||||
*/
|
||||
start_pfn = PFN_UP(__pa(_end));
|
||||
|
||||
/*
|
||||
* Reserve the kernel text and Reserve the bootmem bitmap. We do
|
||||
* this in two steps (first step was init_bootmem()), because
|
||||
* this catches the (definitely buggy) case of us accidentally
|
||||
* initializing the bootmem allocator with an invalid RAM area.
|
||||
*/
|
||||
memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
|
||||
|
||||
/*
|
||||
* Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
|
||||
*/
|
||||
if (CONFIG_ZERO_PAGE_OFFSET != 0)
|
||||
memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
|
||||
|
||||
/*
|
||||
* Handle additional early reservations
|
||||
*/
|
||||
check_for_initrd();
|
||||
reserve_crashkernel();
|
||||
}
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
unsigned long vaddr, end;
|
||||
int nid;
|
||||
|
||||
sh_mv.mv_mem_init();
|
||||
|
||||
early_reserve_mem();
|
||||
|
||||
/*
|
||||
* Once the early reservations are out of the way, give the
|
||||
* platforms a chance to kick out some memory.
|
||||
*/
|
||||
if (sh_mv.mv_mem_reserve)
|
||||
sh_mv.mv_mem_reserve();
|
||||
|
||||
memblock_enforce_memory_limit(memory_limit);
|
||||
memblock_allow_resize();
|
||||
|
||||
memblock_dump_all();
|
||||
|
||||
/*
|
||||
* Determine low and high memory ranges:
|
||||
*/
|
||||
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
|
||||
|
||||
nodes_clear(node_online_map);
|
||||
|
||||
memory_start = (unsigned long)__va(__MEMORY_START);
|
||||
memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
|
||||
|
||||
uncached_init();
|
||||
pmb_init();
|
||||
do_init_bootmem();
|
||||
ioremap_fixed_init();
|
||||
|
||||
/* We don't need to map the kernel through the TLB, as
|
||||
* it is permanatly mapped using P1. So clear the
|
||||
* entire pgd. */
|
||||
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
|
||||
|
||||
/* Set an initial value for the MMU.TTB so we don't have to
|
||||
* check for a null value. */
|
||||
set_TTB(swapper_pg_dir);
|
||||
|
||||
/*
|
||||
* Populate the relevant portions of swapper_pg_dir so that
|
||||
* we can use the fixmap entries without calling kmalloc.
|
||||
* pte's will be filled in by __set_fixmap().
|
||||
*/
|
||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
|
||||
page_table_range_init(vaddr, end, swapper_pg_dir);
|
||||
|
||||
kmap_coherent_init();
|
||||
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
|
||||
for_each_online_node(nid) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
unsigned long low, start_pfn;
|
||||
|
||||
start_pfn = pgdat->bdata->node_min_pfn;
|
||||
low = pgdat->bdata->node_low_pfn;
|
||||
|
||||
if (max_zone_pfns[ZONE_NORMAL] < low)
|
||||
max_zone_pfns[ZONE_NORMAL] = low;
|
||||
|
||||
printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
|
||||
nid, start_pfn, low);
|
||||
}
|
||||
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
/*
|
||||
* Early initialization for any I/O MMUs we might have.
|
||||
*/
|
||||
static void __init iommu_init(void)
|
||||
{
|
||||
no_iommu_init();
|
||||
}
|
||||
|
||||
unsigned int mem_init_done = 0;
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
|
||||
iommu_init();
|
||||
|
||||
high_memory = NULL;
|
||||
for_each_online_pgdat(pgdat)
|
||||
high_memory = max_t(void *, high_memory,
|
||||
__va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
|
||||
|
||||
free_all_bootmem();
|
||||
|
||||
/* Set this up early, so we can take care of the zero page */
|
||||
cpu_cache_init();
|
||||
|
||||
/* clear the zero-page */
|
||||
memset(empty_zero_page, 0, PAGE_SIZE);
|
||||
__flush_wback_region(empty_zero_page, PAGE_SIZE);
|
||||
|
||||
vsyscall_init();
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
pr_info("virtual kernel memory layout:\n"
|
||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#endif
|
||||
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
|
||||
" lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
|
||||
#ifdef CONFIG_UNCACHED_MAPPING
|
||||
" : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
|
||||
#endif
|
||||
" .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
|
||||
FIXADDR_START, FIXADDR_TOP,
|
||||
(FIXADDR_TOP - FIXADDR_START) >> 10,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
||||
#endif
|
||||
|
||||
(unsigned long)VMALLOC_START, VMALLOC_END,
|
||||
(VMALLOC_END - VMALLOC_START) >> 20,
|
||||
|
||||
(unsigned long)memory_start, (unsigned long)high_memory,
|
||||
((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
|
||||
|
||||
#ifdef CONFIG_UNCACHED_MAPPING
|
||||
uncached_start, uncached_end, uncached_size >> 20,
|
||||
#endif
|
||||
|
||||
(unsigned long)&__init_begin, (unsigned long)&__init_end,
|
||||
((unsigned long)&__init_end -
|
||||
(unsigned long)&__init_begin) >> 10,
|
||||
|
||||
(unsigned long)&_etext, (unsigned long)&_edata,
|
||||
((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
|
||||
|
||||
(unsigned long)&_text, (unsigned long)&_etext,
|
||||
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
|
||||
|
||||
mem_init_done = 1;
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int arch_add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
pgdat = NODE_DATA(nid);
|
||||
|
||||
/* We only have ZONE_NORMAL, so this is easy.. */
|
||||
ret = __add_pages(nid, pgdat->node_zones +
|
||||
zone_for_memory(nid, start, size, ZONE_NORMAL),
|
||||
start_pfn, nr_pages);
|
||||
if (unlikely(ret))
|
||||
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_add_memory);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int memory_add_physaddr_to_nid(u64 addr)
|
||||
{
|
||||
/* Node 0 for now.. */
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
int ret;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
ret = __remove_pages(zone, start_pfn, nr_pages);
|
||||
if (unlikely(ret))
|
||||
pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
137
arch/sh/mm/ioremap.c
Normal file
137
arch/sh/mm/ioremap.c
Normal file
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* arch/sh/mm/ioremap.c
|
||||
*
|
||||
* (C) Copyright 1995 1996 Linus Torvalds
|
||||
* (C) Copyright 2005 - 2010 Paul Mundt
|
||||
*
|
||||
* Re-map IO memory to kernel address space so that we can access it.
|
||||
* This is needed for high PCI addresses that aren't mapped in the
|
||||
* 640k-1MB IO memory area on PC's
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file "COPYING" in the main directory of this
|
||||
* archive for more details.
|
||||
*/
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
* directly.
|
||||
*
|
||||
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem * __init_refok
|
||||
__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
|
||||
pgprot_t pgprot, void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long offset, last_addr, addr, orig_addr;
|
||||
void __iomem *mapped;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
last_addr = phys_addr + size - 1;
|
||||
if (!size || last_addr < phys_addr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we can't yet use the regular approach, go the fixmap route.
|
||||
*/
|
||||
if (!mem_init_done)
|
||||
return ioremap_fixed(phys_addr, size, pgprot);
|
||||
|
||||
/*
|
||||
* First try to remap through the PMB.
|
||||
* PMB entries are all pre-faulted.
|
||||
*/
|
||||
mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
|
||||
if (mapped && !IS_ERR(mapped))
|
||||
return mapped;
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr+1) - phys_addr;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
area->phys_addr = phys_addr;
|
||||
orig_addr = addr = (unsigned long)area->addr;
|
||||
|
||||
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
|
||||
vunmap((void *)orig_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)(offset + (char *)orig_addr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap_caller);
|
||||
|
||||
/*
|
||||
* Simple checks for non-translatable mappings.
|
||||
*/
|
||||
static inline int iomapping_nontranslatable(unsigned long offset)
|
||||
{
|
||||
#ifdef CONFIG_29BIT
|
||||
/*
|
||||
* In 29-bit mode this includes the fixed P1/P2 areas, as well as
|
||||
* parts of P3.
|
||||
*/
|
||||
if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __iounmap(void __iomem *addr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long __force)addr;
|
||||
struct vm_struct *p;
|
||||
|
||||
/*
|
||||
* Nothing to do if there is no translatable mapping.
|
||||
*/
|
||||
if (iomapping_nontranslatable(vaddr))
|
||||
return;
|
||||
|
||||
/*
|
||||
* There's no VMA if it's from an early fixed mapping.
|
||||
*/
|
||||
if (iounmap_fixed(addr) == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the PMB handled it, there's nothing else to do.
|
||||
*/
|
||||
if (pmb_unmap(addr) == 0)
|
||||
return;
|
||||
|
||||
p = remove_vm_area((void *)(vaddr & PAGE_MASK));
|
||||
if (!p) {
|
||||
printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(p);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
134
arch/sh/mm/ioremap_fixed.c
Normal file
134
arch/sh/mm/ioremap_fixed.c
Normal file
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Re-map IO memory to kernel address space so that we can access it.
|
||||
*
|
||||
* These functions should only be used when it is necessary to map a
|
||||
* physical address space into the kernel address space before ioremap()
|
||||
* can be used, e.g. early in boot before paging_init().
|
||||
*
|
||||
* Copyright (C) 2009 Matt Fleming
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
struct ioremap_map {
|
||||
void __iomem *addr;
|
||||
unsigned long size;
|
||||
unsigned long fixmap_addr;
|
||||
};
|
||||
|
||||
static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
|
||||
|
||||
void __init ioremap_fixed_init(void)
|
||||
{
|
||||
struct ioremap_map *map;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
|
||||
}
|
||||
}
|
||||
|
||||
void __init __iomem *
|
||||
ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
enum fixed_addresses idx0, idx;
|
||||
struct ioremap_map *map;
|
||||
unsigned int nrpages;
|
||||
unsigned long offset;
|
||||
int i, slot;
|
||||
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(phys_addr + size) - phys_addr;
|
||||
|
||||
slot = -1;
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
if (!map->addr) {
|
||||
map->size = size;
|
||||
slot = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot < 0)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Mappings have to fit in the FIX_IOREMAP area.
|
||||
*/
|
||||
nrpages = size >> PAGE_SHIFT;
|
||||
if (nrpages > FIX_N_IOREMAPS)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
idx0 = FIX_IOREMAP_BEGIN + slot;
|
||||
idx = idx0;
|
||||
while (nrpages > 0) {
|
||||
pgprot_val(prot) |= _PAGE_WIRED;
|
||||
__set_fixmap(idx, phys_addr, prot);
|
||||
phys_addr += PAGE_SIZE;
|
||||
idx++;
|
||||
--nrpages;
|
||||
}
|
||||
|
||||
map->addr = (void __iomem *)(offset + map->fixmap_addr);
|
||||
return map->addr;
|
||||
}
|
||||
|
||||
int iounmap_fixed(void __iomem *addr)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
struct ioremap_map *map;
|
||||
unsigned int nrpages;
|
||||
int i, slot;
|
||||
|
||||
slot = -1;
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
if (map->addr == addr) {
|
||||
slot = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't match, it's not for us.
|
||||
*/
|
||||
if (slot < 0)
|
||||
return -EINVAL;
|
||||
|
||||
nrpages = map->size >> PAGE_SHIFT;
|
||||
|
||||
idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1;
|
||||
while (nrpages > 0) {
|
||||
__clear_fixmap(idx, __pgprot(_PAGE_WIRED));
|
||||
--idx;
|
||||
--nrpages;
|
||||
}
|
||||
|
||||
map->size = 0;
|
||||
map->addr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
67
arch/sh/mm/kmap.c
Normal file
67
arch/sh/mm/kmap.c
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* arch/sh/mm/kmap.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2009 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define kmap_get_fixmap_pte(vaddr) \
|
||||
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
|
||||
|
||||
static pte_t *kmap_coherent_pte;
|
||||
|
||||
void __init kmap_coherent_init(void)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
|
||||
/* cache the first coherent kmap pte */
|
||||
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
|
||||
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
|
||||
}
|
||||
|
||||
void *kmap_coherent(struct page *page, unsigned long addr)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
idx = FIX_CMAP_END -
|
||||
(((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
|
||||
(FIX_N_COLOURS * smp_processor_id()));
|
||||
|
||||
vaddr = __fix_to_virt(idx);
|
||||
|
||||
BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
|
||||
set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
void kunmap_coherent(void *kvaddr)
|
||||
{
|
||||
if (kvaddr >= (void *)FIXADDR_START) {
|
||||
unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
|
||||
enum fixed_addresses idx = __virt_to_fix(vaddr);
|
||||
|
||||
/* XXX.. Kill this later, here for sanity at the moment.. */
|
||||
__flush_purge_region((void *)vaddr, PAGE_SIZE);
|
||||
|
||||
pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
|
||||
local_flush_tlb_one(get_asid(), vaddr);
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
}
|
163
arch/sh/mm/mmap.c
Normal file
163
arch/sh/mm/mmap.c
Normal file
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* arch/sh/mm/mmap.c
|
||||
*
|
||||
* Copyright (C) 2008 - 2009 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
||||
EXPORT_SYMBOL(shm_align_mask);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* To avoid cache aliases, we map the shared page with same color.
|
||||
*/
|
||||
static inline unsigned long COLOUR_ALIGN(unsigned long addr,
|
||||
unsigned long pgoff)
|
||||
{
|
||||
unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
|
||||
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
|
||||
|
||||
return base + off;
|
||||
}
|
||||
|
||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int do_colour_align;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
/* We do not accept a shared mapping if it would violate
|
||||
* cache aliasing constraints.
|
||||
*/
|
||||
if ((flags & MAP_SHARED) &&
|
||||
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
}
|
||||
|
||||
if (unlikely(len > TASK_SIZE))
|
||||
return -ENOMEM;
|
||||
|
||||
do_colour_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_colour_align = 1;
|
||||
|
||||
if (addr) {
|
||||
if (do_colour_align)
|
||||
addr = COLOUR_ALIGN(addr, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
||||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
int do_colour_align;
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
/* We do not accept a shared mapping if it would violate
|
||||
* cache aliasing constraints.
|
||||
*/
|
||||
if ((flags & MAP_SHARED) &&
|
||||
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
}
|
||||
|
||||
if (unlikely(len > TASK_SIZE))
|
||||
return -ENOMEM;
|
||||
|
||||
do_colour_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_colour_align = 1;
|
||||
|
||||
/* requesting a specific address */
|
||||
if (addr) {
|
||||
if (do_colour_align)
|
||||
addr = COLOUR_ALIGN(addr, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
return addr;
|
||||
}
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = mm->mmap_base;
|
||||
info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
/*
|
||||
* A failed mmap() very likely causes application failure,
|
||||
* so fall back to the bottom-up function here. This scenario
|
||||
* can happen with large stack limits and large mmap()
|
||||
* allocations.
|
||||
*/
|
||||
if (addr & ~PAGE_MASK) {
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
info.flags = 0;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = TASK_SIZE;
|
||||
addr = vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* You really shouldn't be using read() or write() on /dev/mem. This
|
||||
* might go away in the future.
|
||||
*/
|
||||
int valid_phys_addr_range(phys_addr_t addr, size_t count)
|
||||
{
|
||||
if (addr < __MEMORY_START)
|
||||
return 0;
|
||||
if (addr + count > __pa(high_memory))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
||||
{
|
||||
return 1;
|
||||
}
|
104
arch/sh/mm/nommu.c
Normal file
104
arch/sh/mm/nommu.c
Normal file
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* arch/sh/mm/nommu.c
|
||||
*
|
||||
* Various helper routines and stubs for MMUless SH.
|
||||
*
|
||||
* Copyright (C) 2002 - 2009 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Nothing too terribly exciting here ..
|
||||
*/
|
||||
void copy_page(void *to, void *from)
|
||||
{
|
||||
memcpy(to, from, PAGE_SIZE);
|
||||
}
|
||||
|
||||
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
|
||||
{
|
||||
memcpy(to, from, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__kernel_size_t __clear_user(void *to, __kernel_size_t n)
|
||||
{
|
||||
memset(to, 0, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
||||
void __init kmap_coherent_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
void *kmap_coherent(struct page *page, unsigned long addr)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void kunmap_coherent(void *kvaddr)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
}
|
||||
|
||||
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
|
||||
{
|
||||
}
|
||||
|
||||
void pgtable_cache_init(void)
|
||||
{
|
||||
}
|
75
arch/sh/mm/numa.c
Normal file
75
arch/sh/mm/numa.c
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* arch/sh/mm/numa.c - Multiple node support for SH machines
|
||||
*
|
||||
* Copyright (C) 2007 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(node_data);
|
||||
|
||||
/*
|
||||
* On SH machines the conventional approach is to stash system RAM
|
||||
* in node 0, and other memory blocks in to node 1 and up, ordered by
|
||||
* latency. Each node's pgdat is node-local at the beginning of the node,
|
||||
* immediately followed by the node mem map.
|
||||
*/
|
||||
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long bootmap_pages;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long bootmem_paddr;
|
||||
|
||||
/* Don't allow bogus node assignment */
|
||||
BUG_ON(nid > MAX_NUMNODES || nid <= 0);
|
||||
|
||||
start_pfn = start >> PAGE_SHIFT;
|
||||
end_pfn = end >> PAGE_SHIFT;
|
||||
|
||||
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
|
||||
PAGE_KERNEL);
|
||||
|
||||
memblock_add(start, end - start);
|
||||
|
||||
__add_active_range(nid, start_pfn, end_pfn);
|
||||
|
||||
/* Node-local pgdat */
|
||||
NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data),
|
||||
SMP_CACHE_BYTES, end));
|
||||
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
|
||||
|
||||
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
|
||||
NODE_DATA(nid)->node_start_pfn = start_pfn;
|
||||
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
|
||||
|
||||
/* Node-local bootmap */
|
||||
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
|
||||
bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
|
||||
PAGE_SIZE, end);
|
||||
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
|
||||
start_pfn, end_pfn);
|
||||
|
||||
free_bootmem_with_active_regions(nid, end_pfn);
|
||||
|
||||
/* Reserve the pgdat and bootmap space with the bootmem allocator */
|
||||
reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
|
||||
sizeof(struct pglist_data), BOOTMEM_DEFAULT);
|
||||
reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
|
||||
bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
|
||||
|
||||
/* It's up */
|
||||
node_set_online(nid);
|
||||
|
||||
/* Kick sparsemem */
|
||||
sparse_memory_present_with_active_regions(nid);
|
||||
}
|
57
arch/sh/mm/pgtable.c
Normal file
57
arch/sh/mm/pgtable.c
Normal file
|
@ -0,0 +1,57 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
|
||||
|
||||
static struct kmem_cache *pgd_cachep;
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
static struct kmem_cache *pmd_cachep;
|
||||
#endif
|
||||
|
||||
void pgd_ctor(void *x)
|
||||
{
|
||||
pgd_t *pgd = x;
|
||||
|
||||
memcpy(pgd + USER_PTRS_PER_PGD,
|
||||
swapper_pg_dir + USER_PTRS_PER_PGD,
|
||||
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
void pgtable_cache_init(void)
|
||||
{
|
||||
pgd_cachep = kmem_cache_create("pgd_cache",
|
||||
PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
|
||||
PAGE_SIZE, SLAB_PANIC, pgd_ctor);
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
pmd_cachep = kmem_cache_create("pmd_cache",
|
||||
PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
|
||||
PAGE_SIZE, SLAB_PANIC, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
|
||||
}
|
||||
|
||||
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
kmem_cache_free(pgd_cachep, pgd);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
{
|
||||
set_pud(pud, __pud((unsigned long)pmd));
|
||||
}
|
||||
|
||||
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
|
||||
}
|
||||
|
||||
void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
kmem_cache_free(pmd_cachep, pmd);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
903
arch/sh/mm/pmb.c
Normal file
903
arch/sh/mm/pmb.c
Normal file
|
@ -0,0 +1,903 @@
|
|||
/*
|
||||
* arch/sh/mm/pmb.c
|
||||
*
|
||||
* Privileged Space Mapping Buffer (PMB) Support.
|
||||
*
|
||||
* Copyright (C) 2005 - 2011 Paul Mundt
|
||||
* Copyright (C) 2010 Matt Fleming
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
struct pmb_entry;
|
||||
|
||||
struct pmb_entry {
|
||||
unsigned long vpn;
|
||||
unsigned long ppn;
|
||||
unsigned long flags;
|
||||
unsigned long size;
|
||||
|
||||
raw_spinlock_t lock;
|
||||
|
||||
/*
|
||||
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
|
||||
* PMB_NO_ENTRY to search for a free one
|
||||
*/
|
||||
int entry;
|
||||
|
||||
/* Adjacent entry link for contiguous multi-entry mappings */
|
||||
struct pmb_entry *link;
|
||||
};
|
||||
|
||||
static struct {
|
||||
unsigned long size;
|
||||
int flag;
|
||||
} pmb_sizes[] = {
|
||||
{ .size = SZ_512M, .flag = PMB_SZ_512M, },
|
||||
{ .size = SZ_128M, .flag = PMB_SZ_128M, },
|
||||
{ .size = SZ_64M, .flag = PMB_SZ_64M, },
|
||||
{ .size = SZ_16M, .flag = PMB_SZ_16M, },
|
||||
};
|
||||
|
||||
static void pmb_unmap_entry(struct pmb_entry *, int depth);
|
||||
|
||||
static DEFINE_RWLOCK(pmb_rwlock);
|
||||
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
|
||||
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
|
||||
|
||||
static unsigned int pmb_iomapping_enabled;
|
||||
|
||||
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
|
||||
{
|
||||
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
|
||||
{
|
||||
return mk_pmb_entry(entry) | PMB_ADDR;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
|
||||
{
|
||||
return mk_pmb_entry(entry) | PMB_DATA;
|
||||
}
|
||||
|
||||
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
|
||||
{
|
||||
return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that the PMB entries match our cache configuration.
|
||||
*
|
||||
* When we are in 32-bit address extended mode, CCR.CB becomes
|
||||
* invalid, so care must be taken to manually adjust cacheable
|
||||
* translations.
|
||||
*/
|
||||
static __always_inline unsigned long pmb_cache_flags(void)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
|
||||
#if defined(CONFIG_CACHE_OFF)
|
||||
flags |= PMB_WT | PMB_UB;
|
||||
#elif defined(CONFIG_CACHE_WRITETHROUGH)
|
||||
flags |= PMB_C | PMB_WT | PMB_UB;
|
||||
#elif defined(CONFIG_CACHE_WRITEBACK)
|
||||
flags |= PMB_C;
|
||||
#endif
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert typical pgprot value to the PMB equivalent
|
||||
*/
|
||||
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
|
||||
{
|
||||
unsigned long pmb_flags = 0;
|
||||
u64 flags = pgprot_val(prot);
|
||||
|
||||
if (flags & _PAGE_CACHABLE)
|
||||
pmb_flags |= PMB_C;
|
||||
if (flags & _PAGE_WT)
|
||||
pmb_flags |= PMB_WT | PMB_UB;
|
||||
|
||||
return pmb_flags;
|
||||
}
|
||||
|
||||
static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
|
||||
{
|
||||
return (b->vpn == (a->vpn + a->size)) &&
|
||||
(b->ppn == (a->ppn + a->size)) &&
|
||||
(b->flags == a->flags);
|
||||
}
|
||||
|
||||
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
|
||||
unsigned long size)
|
||||
{
|
||||
int i;
|
||||
|
||||
read_lock(&pmb_rwlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
struct pmb_entry *pmbe, *iter;
|
||||
unsigned long span;
|
||||
|
||||
if (!test_bit(i, pmb_map))
|
||||
continue;
|
||||
|
||||
pmbe = &pmb_entry_list[i];
|
||||
|
||||
/*
|
||||
* See if VPN and PPN are bounded by an existing mapping.
|
||||
*/
|
||||
if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
|
||||
continue;
|
||||
if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Now see if we're in range of a simple mapping.
|
||||
*/
|
||||
if (size <= pmbe->size) {
|
||||
read_unlock(&pmb_rwlock);
|
||||
return true;
|
||||
}
|
||||
|
||||
span = pmbe->size;
|
||||
|
||||
/*
|
||||
* Finally for sizes that involve compound mappings, walk
|
||||
* the chain.
|
||||
*/
|
||||
for (iter = pmbe->link; iter; iter = iter->link)
|
||||
span += iter->size;
|
||||
|
||||
/*
|
||||
* Nothing else to do if the range requirements are met.
|
||||
*/
|
||||
if (size <= span) {
|
||||
read_unlock(&pmb_rwlock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool pmb_size_valid(unsigned long size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
|
||||
if (pmb_sizes[i].size == size)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
|
||||
{
|
||||
return (addr >= P1SEG && (addr + size - 1) < P3SEG);
|
||||
}
|
||||
|
||||
static inline bool pmb_prot_valid(pgprot_t prot)
|
||||
{
|
||||
return (pgprot_val(prot) & _PAGE_USER) == 0;
|
||||
}
|
||||
|
||||
static int pmb_size_to_flags(unsigned long size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
|
||||
if (pmb_sizes[i].size == size)
|
||||
return pmb_sizes[i].flag;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmb_alloc_entry(void)
|
||||
{
|
||||
int pos;
|
||||
|
||||
pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
|
||||
if (pos >= 0 && pos < NR_PMB_ENTRIES)
|
||||
__set_bit(pos, pmb_map);
|
||||
else
|
||||
pos = -ENOSPC;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
|
||||
unsigned long flags, int entry)
|
||||
{
|
||||
struct pmb_entry *pmbe;
|
||||
unsigned long irqflags;
|
||||
void *ret = NULL;
|
||||
int pos;
|
||||
|
||||
write_lock_irqsave(&pmb_rwlock, irqflags);
|
||||
|
||||
if (entry == PMB_NO_ENTRY) {
|
||||
pos = pmb_alloc_entry();
|
||||
if (unlikely(pos < 0)) {
|
||||
ret = ERR_PTR(pos);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (__test_and_set_bit(entry, pmb_map)) {
|
||||
ret = ERR_PTR(-ENOSPC);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pos = entry;
|
||||
}
|
||||
|
||||
write_unlock_irqrestore(&pmb_rwlock, irqflags);
|
||||
|
||||
pmbe = &pmb_entry_list[pos];
|
||||
|
||||
memset(pmbe, 0, sizeof(struct pmb_entry));
|
||||
|
||||
raw_spin_lock_init(&pmbe->lock);
|
||||
|
||||
pmbe->vpn = vpn;
|
||||
pmbe->ppn = ppn;
|
||||
pmbe->flags = flags;
|
||||
pmbe->entry = pos;
|
||||
|
||||
return pmbe;
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&pmb_rwlock, irqflags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pmb_free(struct pmb_entry *pmbe)
|
||||
{
|
||||
__clear_bit(pmbe->entry, pmb_map);
|
||||
|
||||
pmbe->entry = PMB_NO_ENTRY;
|
||||
pmbe->link = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be run uncached.
|
||||
*/
|
||||
static void __set_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
unsigned long addr, data;
|
||||
|
||||
addr = mk_pmb_addr(pmbe->entry);
|
||||
data = mk_pmb_data(pmbe->entry);
|
||||
|
||||
jump_to_uncached();
|
||||
|
||||
/* Set V-bit */
|
||||
__raw_writel(pmbe->vpn | PMB_V, addr);
|
||||
__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
|
||||
|
||||
back_to_cached();
|
||||
}
|
||||
|
||||
static void __clear_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
unsigned long addr, data;
|
||||
unsigned long addr_val, data_val;
|
||||
|
||||
addr = mk_pmb_addr(pmbe->entry);
|
||||
data = mk_pmb_data(pmbe->entry);
|
||||
|
||||
addr_val = __raw_readl(addr);
|
||||
data_val = __raw_readl(data);
|
||||
|
||||
/* Clear V-bit */
|
||||
writel_uncached(addr_val & ~PMB_V, addr);
|
||||
writel_uncached(data_val & ~PMB_V, data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void set_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
__set_pmb_entry(pmbe);
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
|
||||
unsigned long size, pgprot_t prot)
|
||||
{
|
||||
struct pmb_entry *pmbp, *pmbe;
|
||||
unsigned long orig_addr, orig_size;
|
||||
unsigned long flags, pmb_flags;
|
||||
int i, mapped;
|
||||
|
||||
if (size < SZ_16M)
|
||||
return -EINVAL;
|
||||
if (!pmb_addr_valid(vaddr, size))
|
||||
return -EFAULT;
|
||||
if (pmb_mapping_exists(vaddr, phys, size))
|
||||
return 0;
|
||||
|
||||
orig_addr = vaddr;
|
||||
orig_size = size;
|
||||
|
||||
flush_tlb_kernel_range(vaddr, vaddr + size);
|
||||
|
||||
pmb_flags = pgprot_to_pmb_flags(prot);
|
||||
pmbp = NULL;
|
||||
|
||||
do {
|
||||
for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
|
||||
if (size < pmb_sizes[i].size)
|
||||
continue;
|
||||
|
||||
pmbe = pmb_alloc(vaddr, phys, pmb_flags |
|
||||
pmb_sizes[i].flag, PMB_NO_ENTRY);
|
||||
if (IS_ERR(pmbe)) {
|
||||
pmb_unmap_entry(pmbp, mapped);
|
||||
return PTR_ERR(pmbe);
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
|
||||
pmbe->size = pmb_sizes[i].size;
|
||||
|
||||
__set_pmb_entry(pmbe);
|
||||
|
||||
phys += pmbe->size;
|
||||
vaddr += pmbe->size;
|
||||
size -= pmbe->size;
|
||||
|
||||
/*
|
||||
* Link adjacent entries that span multiple PMB
|
||||
* entries for easier tear-down.
|
||||
*/
|
||||
if (likely(pmbp)) {
|
||||
raw_spin_lock_nested(&pmbp->lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
pmbp->link = pmbe;
|
||||
raw_spin_unlock(&pmbp->lock);
|
||||
}
|
||||
|
||||
pmbp = pmbe;
|
||||
|
||||
/*
|
||||
* Instead of trying smaller sizes on every
|
||||
* iteration (even if we succeed in allocating
|
||||
* space), try using pmb_sizes[i].size again.
|
||||
*/
|
||||
i--;
|
||||
mapped++;
|
||||
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
} while (size >= SZ_16M);
|
||||
|
||||
flush_cache_vmap(orig_addr, orig_addr + orig_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
phys_addr_t offset, last_addr;
|
||||
phys_addr_t align_mask;
|
||||
unsigned long aligned;
|
||||
struct vm_struct *area;
|
||||
int i, ret;
|
||||
|
||||
if (!pmb_iomapping_enabled)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Small mappings need to go through the TLB.
|
||||
*/
|
||||
if (size < SZ_16M)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!pmb_prot_valid(prot))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
|
||||
if (size >= pmb_sizes[i].size)
|
||||
break;
|
||||
|
||||
last_addr = phys + size;
|
||||
align_mask = ~(pmb_sizes[i].size - 1);
|
||||
offset = phys & ~align_mask;
|
||||
phys &= align_mask;
|
||||
aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
|
||||
|
||||
/*
|
||||
* XXX: This should really start from uncached_end, but this
|
||||
* causes the MMU to reset, so for now we restrict it to the
|
||||
* 0xb000...0xc000 range.
|
||||
*/
|
||||
area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
|
||||
P3SEG, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
area->phys_addr = phys;
|
||||
vaddr = (unsigned long)area->addr;
|
||||
|
||||
ret = pmb_bolt_mapping(vaddr, phys, size, prot);
|
||||
if (unlikely(ret != 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return (void __iomem *)(offset + (char *)vaddr);
|
||||
}
|
||||
|
||||
int pmb_unmap(void __iomem *addr)
|
||||
{
|
||||
struct pmb_entry *pmbe = NULL;
|
||||
unsigned long vaddr = (unsigned long __force)addr;
|
||||
int i, found = 0;
|
||||
|
||||
read_lock(&pmb_rwlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
if (test_bit(i, pmb_map)) {
|
||||
pmbe = &pmb_entry_list[i];
|
||||
if (pmbe->vpn == vaddr) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
|
||||
if (found) {
|
||||
pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
|
||||
{
|
||||
do {
|
||||
struct pmb_entry *pmblink = pmbe;
|
||||
|
||||
/*
|
||||
* We may be called before this pmb_entry has been
|
||||
* entered into the PMB table via set_pmb_entry(), but
|
||||
* that's OK because we've allocated a unique slot for
|
||||
* this entry in pmb_alloc() (even if we haven't filled
|
||||
* it yet).
|
||||
*
|
||||
* Therefore, calling __clear_pmb_entry() is safe as no
|
||||
* other mapping can be using that slot.
|
||||
*/
|
||||
__clear_pmb_entry(pmbe);
|
||||
|
||||
flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
|
||||
|
||||
pmbe = pmblink->link;
|
||||
|
||||
pmb_free(pmblink);
|
||||
} while (pmbe && --depth);
|
||||
}
|
||||
|
||||
static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!pmbe))
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&pmb_rwlock, flags);
|
||||
__pmb_unmap_entry(pmbe, depth);
|
||||
write_unlock_irqrestore(&pmb_rwlock, flags);
|
||||
}
|
||||
|
||||
static void __init pmb_notify(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("PMB: boot mappings:\n");
|
||||
|
||||
read_lock(&pmb_rwlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
struct pmb_entry *pmbe;
|
||||
|
||||
if (!test_bit(i, pmb_map))
|
||||
continue;
|
||||
|
||||
pmbe = &pmb_entry_list[i];
|
||||
|
||||
pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
|
||||
pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
|
||||
pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync our software copy of the PMB mappings with those in hardware. The
|
||||
* mappings in the hardware PMB were either set up by the bootloader or
|
||||
* very early on by the kernel.
|
||||
*/
|
||||
static void __init pmb_synchronize(void)
|
||||
{
|
||||
struct pmb_entry *pmbp = NULL;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* Run through the initial boot mappings, log the established
|
||||
* ones, and blow away anything that falls outside of the valid
|
||||
* PPN range. Specifically, we only care about existing mappings
|
||||
* that impact the cached/uncached sections.
|
||||
*
|
||||
* Note that touching these can be a bit of a minefield; the boot
|
||||
* loader can establish multi-page mappings with the same caching
|
||||
* attributes, so we need to ensure that we aren't modifying a
|
||||
* mapping that we're presently executing from, or may execute
|
||||
* from in the case of straddling page boundaries.
|
||||
*
|
||||
* In the future we will have to tidy up after the boot loader by
|
||||
* jumping between the cached and uncached mappings and tearing
|
||||
* down alternating mappings while executing from the other.
|
||||
*/
|
||||
for (i = 0; i < NR_PMB_ENTRIES; i++) {
|
||||
unsigned long addr, data;
|
||||
unsigned long addr_val, data_val;
|
||||
unsigned long ppn, vpn, flags;
|
||||
unsigned long irqflags;
|
||||
unsigned int size;
|
||||
struct pmb_entry *pmbe;
|
||||
|
||||
addr = mk_pmb_addr(i);
|
||||
data = mk_pmb_data(i);
|
||||
|
||||
addr_val = __raw_readl(addr);
|
||||
data_val = __raw_readl(data);
|
||||
|
||||
/*
|
||||
* Skip over any bogus entries
|
||||
*/
|
||||
if (!(data_val & PMB_V) || !(addr_val & PMB_V))
|
||||
continue;
|
||||
|
||||
ppn = data_val & PMB_PFN_MASK;
|
||||
vpn = addr_val & PMB_PFN_MASK;
|
||||
|
||||
/*
|
||||
* Only preserve in-range mappings.
|
||||
*/
|
||||
if (!pmb_ppn_in_range(ppn)) {
|
||||
/*
|
||||
* Invalidate anything out of bounds.
|
||||
*/
|
||||
writel_uncached(addr_val & ~PMB_V, addr);
|
||||
writel_uncached(data_val & ~PMB_V, data);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the caching attributes if necessary
|
||||
*/
|
||||
if (data_val & PMB_C) {
|
||||
data_val &= ~PMB_CACHE_MASK;
|
||||
data_val |= pmb_cache_flags();
|
||||
|
||||
writel_uncached(data_val, data);
|
||||
}
|
||||
|
||||
size = data_val & PMB_SZ_MASK;
|
||||
flags = size | (data_val & PMB_CACHE_MASK);
|
||||
|
||||
pmbe = pmb_alloc(vpn, ppn, flags, i);
|
||||
if (IS_ERR(pmbe)) {
|
||||
WARN_ON_ONCE(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmbe->lock, irqflags);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
|
||||
if (pmb_sizes[j].flag == size)
|
||||
pmbe->size = pmb_sizes[j].size;
|
||||
|
||||
if (pmbp) {
|
||||
raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
|
||||
/*
|
||||
* Compare the previous entry against the current one to
|
||||
* see if the entries span a contiguous mapping. If so,
|
||||
* setup the entry links accordingly. Compound mappings
|
||||
* are later coalesced.
|
||||
*/
|
||||
if (pmb_can_merge(pmbp, pmbe))
|
||||
pmbp->link = pmbe;
|
||||
raw_spin_unlock(&pmbp->lock);
|
||||
}
|
||||
|
||||
pmbp = pmbe;
|
||||
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init pmb_merge(struct pmb_entry *head)
|
||||
{
|
||||
unsigned long span, newsize;
|
||||
struct pmb_entry *tail;
|
||||
int i = 1, depth = 0;
|
||||
|
||||
span = newsize = head->size;
|
||||
|
||||
tail = head->link;
|
||||
while (tail) {
|
||||
span += tail->size;
|
||||
|
||||
if (pmb_size_valid(span)) {
|
||||
newsize = span;
|
||||
depth = i;
|
||||
}
|
||||
|
||||
/* This is the end of the line.. */
|
||||
if (!tail->link)
|
||||
break;
|
||||
|
||||
tail = tail->link;
|
||||
i++;
|
||||
}
|
||||
|
||||
/*
|
||||
* The merged page size must be valid.
|
||||
*/
|
||||
if (!depth || !pmb_size_valid(newsize))
|
||||
return;
|
||||
|
||||
head->flags &= ~PMB_SZ_MASK;
|
||||
head->flags |= pmb_size_to_flags(newsize);
|
||||
|
||||
head->size = newsize;
|
||||
|
||||
__pmb_unmap_entry(head->link, depth);
|
||||
__set_pmb_entry(head);
|
||||
}
|
||||
|
||||
static void __init pmb_coalesce(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
write_lock_irqsave(&pmb_rwlock, flags);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
struct pmb_entry *pmbe;
|
||||
|
||||
if (!test_bit(i, pmb_map))
|
||||
continue;
|
||||
|
||||
pmbe = &pmb_entry_list[i];
|
||||
|
||||
/*
|
||||
* We're only interested in compound mappings
|
||||
*/
|
||||
if (!pmbe->link)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Nothing to do if it already uses the largest possible
|
||||
* page size.
|
||||
*/
|
||||
if (pmbe->size == SZ_512M)
|
||||
continue;
|
||||
|
||||
pmb_merge(pmbe);
|
||||
}
|
||||
|
||||
write_unlock_irqrestore(&pmb_rwlock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNCACHED_MAPPING
|
||||
static void __init pmb_resize(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* If the uncached mapping was constructed by the kernel, it will
|
||||
* already be a reasonable size.
|
||||
*/
|
||||
if (uncached_size == SZ_16M)
|
||||
return;
|
||||
|
||||
read_lock(&pmb_rwlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
struct pmb_entry *pmbe;
|
||||
unsigned long flags;
|
||||
|
||||
if (!test_bit(i, pmb_map))
|
||||
continue;
|
||||
|
||||
pmbe = &pmb_entry_list[i];
|
||||
|
||||
if (pmbe->vpn != uncached_start)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Found it, now resize it.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmbe->lock, flags);
|
||||
|
||||
pmbe->size = SZ_16M;
|
||||
pmbe->flags &= ~PMB_SZ_MASK;
|
||||
pmbe->flags |= pmb_size_to_flags(pmbe->size);
|
||||
|
||||
uncached_resize(pmbe->size);
|
||||
|
||||
__set_pmb_entry(pmbe);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init early_pmb(char *p)
|
||||
{
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
if (strstr(p, "iomap"))
|
||||
pmb_iomapping_enabled = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("pmb", early_pmb);
|
||||
|
||||
void __init pmb_init(void)
|
||||
{
|
||||
/* Synchronize software state */
|
||||
pmb_synchronize();
|
||||
|
||||
/* Attempt to combine compound mappings */
|
||||
pmb_coalesce();
|
||||
|
||||
#ifdef CONFIG_UNCACHED_MAPPING
|
||||
/* Resize initial mappings, if necessary */
|
||||
pmb_resize();
|
||||
#endif
|
||||
|
||||
/* Log them */
|
||||
pmb_notify();
|
||||
|
||||
writel_uncached(0, PMB_IRMCR);
|
||||
|
||||
/* Flush out the TLB */
|
||||
local_flush_tlb_all();
|
||||
ctrl_barrier();
|
||||
}
|
||||
|
||||
bool __in_29bit_mode(void)
|
||||
{
|
||||
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
|
||||
}
|
||||
|
||||
static int pmb_seq_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
int i;
|
||||
|
||||
seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
|
||||
"CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
|
||||
seq_printf(file, "ety vpn ppn size flags\n");
|
||||
|
||||
for (i = 0; i < NR_PMB_ENTRIES; i++) {
|
||||
unsigned long addr, data;
|
||||
unsigned int size;
|
||||
char *sz_str = NULL;
|
||||
|
||||
addr = __raw_readl(mk_pmb_addr(i));
|
||||
data = __raw_readl(mk_pmb_data(i));
|
||||
|
||||
size = data & PMB_SZ_MASK;
|
||||
sz_str = (size == PMB_SZ_16M) ? " 16MB":
|
||||
(size == PMB_SZ_64M) ? " 64MB":
|
||||
(size == PMB_SZ_128M) ? "128MB":
|
||||
"512MB";
|
||||
|
||||
/* 02: V 0x88 0x08 128MB C CB B */
|
||||
seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
|
||||
i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
|
||||
(addr >> 24) & 0xff, (data >> 24) & 0xff,
|
||||
sz_str, (data & PMB_C) ? 'C' : ' ',
|
||||
(data & PMB_WT) ? "WT" : "CB",
|
||||
(data & PMB_UB) ? "UB" : " B");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmb_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, pmb_seq_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations pmb_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pmb_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init pmb_debugfs_init(void)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
|
||||
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
|
||||
arch_debugfs_dir, NULL, &pmb_debugfs_fops);
|
||||
if (!dentry)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(pmb_debugfs_init);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void pmb_syscore_resume(void)
|
||||
{
|
||||
struct pmb_entry *pmbe;
|
||||
int i;
|
||||
|
||||
read_lock(&pmb_rwlock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
||||
if (test_bit(i, pmb_map)) {
|
||||
pmbe = &pmb_entry_list[i];
|
||||
set_pmb_entry(pmbe);
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&pmb_rwlock);
|
||||
}
|
||||
|
||||
static struct syscore_ops pmb_syscore_ops = {
|
||||
.resume = pmb_syscore_resume,
|
||||
};
|
||||
|
||||
static int __init pmb_sysdev_init(void)
|
||||
{
|
||||
register_syscore_ops(&pmb_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(pmb_sysdev_init);
|
||||
#endif
|
35
arch/sh/mm/sram.c
Normal file
35
arch/sh/mm/sram.c
Normal file
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* SRAM pool for tiny memories not otherwise managed.
|
||||
*
|
||||
* Copyright (C) 2010 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/sram.h>
|
||||
|
||||
/*
|
||||
* This provides a standard SRAM pool for tiny memories that can be
|
||||
* added either by the CPU or the platform code. Typical SRAM sizes
|
||||
* to be inserted in to the pool will generally be less than the page
|
||||
* size, with anything more reasonably sized handled as a NUMA memory
|
||||
* node.
|
||||
*/
|
||||
struct gen_pool *sram_pool;
|
||||
|
||||
static int __init sram_pool_init(void)
|
||||
{
|
||||
/*
|
||||
* This is a global pool, we don't care about node locality.
|
||||
*/
|
||||
sram_pool = gen_pool_create(1, -1);
|
||||
if (unlikely(!sram_pool))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(sram_pool_init);
|
172
arch/sh/mm/tlb-debugfs.c
Normal file
172
arch/sh/mm/tlb-debugfs.c
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-debugfs.c
|
||||
*
|
||||
* debugfs ops for SH-4 ITLB/UTLBs.
|
||||
*
|
||||
* Copyright (C) 2010 Matt Fleming
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
enum tlb_type {
|
||||
TLB_TYPE_ITLB,
|
||||
TLB_TYPE_UTLB,
|
||||
};
|
||||
|
||||
static struct {
|
||||
int bits;
|
||||
const char *size;
|
||||
} tlb_sizes[] = {
|
||||
{ 0x0, " 1KB" },
|
||||
{ 0x1, " 4KB" },
|
||||
{ 0x2, " 8KB" },
|
||||
{ 0x4, " 64KB" },
|
||||
{ 0x5, "256KB" },
|
||||
{ 0x7, " 1MB" },
|
||||
{ 0x8, " 4MB" },
|
||||
{ 0xc, " 64MB" },
|
||||
};
|
||||
|
||||
static int tlb_seq_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
unsigned int tlb_type = (unsigned int)file->private;
|
||||
unsigned long addr1, addr2, data1, data2;
|
||||
unsigned long flags;
|
||||
unsigned long mmucr;
|
||||
unsigned int nentries, entry;
|
||||
unsigned int urb;
|
||||
|
||||
mmucr = __raw_readl(MMUCR);
|
||||
if ((mmucr & 0x1) == 0) {
|
||||
seq_printf(file, "address translation disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tlb_type == TLB_TYPE_ITLB) {
|
||||
addr1 = MMU_ITLB_ADDRESS_ARRAY;
|
||||
addr2 = MMU_ITLB_ADDRESS_ARRAY2;
|
||||
data1 = MMU_ITLB_DATA_ARRAY;
|
||||
data2 = MMU_ITLB_DATA_ARRAY2;
|
||||
nentries = 4;
|
||||
} else {
|
||||
addr1 = MMU_UTLB_ADDRESS_ARRAY;
|
||||
addr2 = MMU_UTLB_ADDRESS_ARRAY2;
|
||||
data1 = MMU_UTLB_DATA_ARRAY;
|
||||
data2 = MMU_UTLB_DATA_ARRAY2;
|
||||
nentries = 64;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
|
||||
/* Make the "entry >= urb" test fail. */
|
||||
if (urb == 0)
|
||||
urb = MMUCR_URB_NENTRIES + 1;
|
||||
|
||||
if (tlb_type == TLB_TYPE_ITLB) {
|
||||
addr1 = MMU_ITLB_ADDRESS_ARRAY;
|
||||
addr2 = MMU_ITLB_ADDRESS_ARRAY2;
|
||||
data1 = MMU_ITLB_DATA_ARRAY;
|
||||
data2 = MMU_ITLB_DATA_ARRAY2;
|
||||
nentries = 4;
|
||||
} else {
|
||||
addr1 = MMU_UTLB_ADDRESS_ARRAY;
|
||||
addr2 = MMU_UTLB_ADDRESS_ARRAY2;
|
||||
data1 = MMU_UTLB_DATA_ARRAY;
|
||||
data2 = MMU_UTLB_DATA_ARRAY2;
|
||||
nentries = 64;
|
||||
}
|
||||
|
||||
seq_printf(file, "entry: vpn ppn asid size valid wired\n");
|
||||
|
||||
for (entry = 0; entry < nentries; entry++) {
|
||||
unsigned long vpn, ppn, asid, size;
|
||||
unsigned long valid;
|
||||
unsigned long val;
|
||||
const char *sz = " ?";
|
||||
int i;
|
||||
|
||||
val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
|
||||
ctrl_barrier();
|
||||
vpn = val & 0xfffffc00;
|
||||
valid = val & 0x100;
|
||||
|
||||
val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
|
||||
ctrl_barrier();
|
||||
asid = val & MMU_CONTEXT_ASID_MASK;
|
||||
|
||||
val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
|
||||
ctrl_barrier();
|
||||
ppn = (val & 0x0ffffc00) << 4;
|
||||
|
||||
val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
|
||||
ctrl_barrier();
|
||||
size = (val & 0xf0) >> 4;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
|
||||
if (tlb_sizes[i].bits == size)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i != ARRAY_SIZE(tlb_sizes))
|
||||
sz = tlb_sizes[i].size;
|
||||
|
||||
seq_printf(file, "%2d: 0x%08lx 0x%08lx %5lu %s %s %s\n",
|
||||
entry, vpn, ppn, asid,
|
||||
sz, valid ? "V" : "-",
|
||||
(urb <= entry) ? "W" : "-");
|
||||
}
|
||||
|
||||
back_to_cached();
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tlb_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, tlb_seq_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations tlb_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = tlb_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init tlb_debugfs_init(void)
|
||||
{
|
||||
struct dentry *itlb, *utlb;
|
||||
|
||||
itlb = debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)TLB_TYPE_ITLB,
|
||||
&tlb_debugfs_fops);
|
||||
if (unlikely(!itlb))
|
||||
return -ENOMEM;
|
||||
|
||||
utlb = debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir,
|
||||
(unsigned int *)TLB_TYPE_UTLB,
|
||||
&tlb_debugfs_fops);
|
||||
if (unlikely(!utlb)) {
|
||||
debugfs_remove(itlb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(tlb_debugfs_init);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
106
arch/sh/mm/tlb-pteaex.c
Normal file
106
arch/sh/mm/tlb-pteaex.c
Normal file
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-pteaex.c
|
||||
*
|
||||
* TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
|
||||
*
|
||||
* Copyright (C) 2009 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags, pteval, vpn;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (vma && current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Set PTEH register */
|
||||
vpn = address & MMU_VPN_MASK;
|
||||
__raw_writel(vpn, MMU_PTEH);
|
||||
|
||||
/* Set PTEAEX */
|
||||
__raw_writel(get_asid(), MMU_PTEAEX);
|
||||
|
||||
pteval = pte.pte_low;
|
||||
|
||||
/* Set PTEA register */
|
||||
#ifdef CONFIG_X2TLB
|
||||
/*
|
||||
* For the extended mode TLB this is trivial, only the ESZ and
|
||||
* EPR bits need to be written out to PTEA, with the remainder of
|
||||
* the protection bits (with the exception of the compat-mode SZ
|
||||
* and PR bits, which are cleared) being written out in PTEL.
|
||||
*/
|
||||
__raw_writel(pte.pte_high, MMU_PTEA);
|
||||
#endif
|
||||
|
||||
/* Set PTEL register */
|
||||
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
||||
#ifdef CONFIG_CACHE_WRITETHROUGH
|
||||
pteval |= _PAGE_WT;
|
||||
#endif
|
||||
/* conveniently, we want all the software flags to be 0 anyway */
|
||||
__raw_writel(pteval, MMU_PTEL);
|
||||
|
||||
/* Load the TLB */
|
||||
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
|
||||
* data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
|
||||
* address arrays. In compat mode the second array is inaccessible, while
|
||||
* in extended mode, the legacy 8-bit ASID field in address array 1 has
|
||||
* undefined behaviour.
|
||||
*/
|
||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
||||
{
|
||||
jump_to_uncached();
|
||||
__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
|
||||
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
|
||||
__raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
|
||||
__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
|
||||
back_to_cached();
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags, status;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Flush all the TLB.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
status = __raw_readl(MMUCR);
|
||||
status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
|
||||
|
||||
if (status == 0)
|
||||
status = MMUCR_URB_NENTRIES;
|
||||
|
||||
for (i = 0; i < status; i++)
|
||||
__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
|
||||
|
||||
back_to_cached();
|
||||
ctrl_barrier();
|
||||
local_irq_restore(flags);
|
||||
}
|
97
arch/sh/mm/tlb-sh3.c
Normal file
97
arch/sh/mm/tlb-sh3.c
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-sh3.c
|
||||
*
|
||||
* SH-3 specific TLB operations
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2002 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags, pteval, vpn;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (vma && current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Set PTEH register */
|
||||
vpn = (address & MMU_VPN_MASK) | get_asid();
|
||||
__raw_writel(vpn, MMU_PTEH);
|
||||
|
||||
pteval = pte_val(pte);
|
||||
|
||||
/* Set PTEL register */
|
||||
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
||||
/* conveniently, we want all the software flags to be 0 anyway */
|
||||
__raw_writel(pteval, MMU_PTEL);
|
||||
|
||||
/* Load the TLB */
|
||||
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
||||
{
|
||||
unsigned long addr, data;
|
||||
int i, ways = MMU_NTLB_WAYS;
|
||||
|
||||
/*
|
||||
* NOTE: PTEH.ASID should be set to this MM
|
||||
* _AND_ we need to write ASID to the array.
|
||||
*
|
||||
* It would be simple if we didn't need to set PTEH.ASID...
|
||||
*/
|
||||
addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
|
||||
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
|
||||
|
||||
if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
|
||||
addr |= MMU_PAGE_ASSOC_BIT;
|
||||
ways = 1; /* we already know the way .. */
|
||||
}
|
||||
|
||||
for (i = 0; i < ways; i++)
|
||||
__raw_writel(data, addr + (i << 8));
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags, status;
|
||||
|
||||
/*
|
||||
* Flush all the TLB.
|
||||
*
|
||||
* Write to the MMU control register's bit:
|
||||
* TF-bit for SH-3, TI-bit for SH-4.
|
||||
* It's same position, bit #2.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
status = __raw_readl(MMUCR);
|
||||
status |= 0x04;
|
||||
__raw_writel(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
local_irq_restore(flags);
|
||||
}
|
109
arch/sh/mm/tlb-sh4.c
Normal file
109
arch/sh/mm/tlb-sh4.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-sh4.c
|
||||
*
|
||||
* SH-4 specific TLB operations
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2007 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long flags, pteval, vpn;
|
||||
|
||||
/*
|
||||
* Handle debugger faulting in for debugee.
|
||||
*/
|
||||
if (vma && current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Set PTEH register */
|
||||
vpn = (address & MMU_VPN_MASK) | get_asid();
|
||||
__raw_writel(vpn, MMU_PTEH);
|
||||
|
||||
pteval = pte.pte_low;
|
||||
|
||||
/* Set PTEA register */
|
||||
#ifdef CONFIG_X2TLB
|
||||
/*
|
||||
* For the extended mode TLB this is trivial, only the ESZ and
|
||||
* EPR bits need to be written out to PTEA, with the remainder of
|
||||
* the protection bits (with the exception of the compat-mode SZ
|
||||
* and PR bits, which are cleared) being written out in PTEL.
|
||||
*/
|
||||
__raw_writel(pte.pte_high, MMU_PTEA);
|
||||
#else
|
||||
if (cpu_data->flags & CPU_HAS_PTEA) {
|
||||
/* The last 3 bits and the first one of pteval contains
|
||||
* the PTEA timing control and space attribute bits
|
||||
*/
|
||||
__raw_writel(copy_ptea_attributes(pteval), MMU_PTEA);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set PTEL register */
|
||||
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
||||
#ifdef CONFIG_CACHE_WRITETHROUGH
|
||||
pteval |= _PAGE_WT;
|
||||
#endif
|
||||
/* conveniently, we want all the software flags to be 0 anyway */
|
||||
__raw_writel(pteval, MMU_PTEL);
|
||||
|
||||
/* Load the TLB */
|
||||
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
||||
{
|
||||
unsigned long addr, data;
|
||||
|
||||
/*
|
||||
* NOTE: PTEH.ASID should be set to this MM
|
||||
* _AND_ we need to write ASID to the array.
|
||||
*
|
||||
* It would be simple if we didn't need to set PTEH.ASID...
|
||||
*/
|
||||
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
|
||||
data = page | asid; /* VALID bit is off */
|
||||
jump_to_uncached();
|
||||
__raw_writel(data, addr);
|
||||
back_to_cached();
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags, status;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Flush all the TLB.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
jump_to_uncached();
|
||||
|
||||
status = __raw_readl(MMUCR);
|
||||
status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
|
||||
|
||||
if (status == 0)
|
||||
status = MMUCR_URB_NENTRIES;
|
||||
|
||||
for (i = 0; i < status; i++)
|
||||
__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
|
||||
|
||||
back_to_cached();
|
||||
ctrl_barrier();
|
||||
local_irq_restore(flags);
|
||||
}
|
224
arch/sh/mm/tlb-sh5.c
Normal file
224
arch/sh/mm/tlb-sh5.c
Normal file
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-sh5.c
|
||||
*
|
||||
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
|
||||
* Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/**
|
||||
* sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
|
||||
*/
|
||||
int sh64_tlb_init(void)
|
||||
{
|
||||
/* Assign some sane DTLB defaults */
|
||||
cpu_data->dtlb.entries = 64;
|
||||
cpu_data->dtlb.step = 0x10;
|
||||
|
||||
cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step;
|
||||
cpu_data->dtlb.next = cpu_data->dtlb.first;
|
||||
|
||||
cpu_data->dtlb.last = DTLB_FIXED |
|
||||
((cpu_data->dtlb.entries - 1) *
|
||||
cpu_data->dtlb.step);
|
||||
|
||||
/* And again for the ITLB */
|
||||
cpu_data->itlb.entries = 64;
|
||||
cpu_data->itlb.step = 0x10;
|
||||
|
||||
cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step;
|
||||
cpu_data->itlb.next = cpu_data->itlb.first;
|
||||
cpu_data->itlb.last = ITLB_FIXED |
|
||||
((cpu_data->itlb.entries - 1) *
|
||||
cpu_data->itlb.step);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sh64_next_free_dtlb_entry - Find the next available DTLB entry
|
||||
*/
|
||||
unsigned long long sh64_next_free_dtlb_entry(void)
|
||||
{
|
||||
return cpu_data->dtlb.next;
|
||||
}
|
||||
|
||||
/**
|
||||
* sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
|
||||
*/
|
||||
unsigned long long sh64_get_wired_dtlb_entry(void)
|
||||
{
|
||||
unsigned long long entry = sh64_next_free_dtlb_entry();
|
||||
|
||||
cpu_data->dtlb.first += cpu_data->dtlb.step;
|
||||
cpu_data->dtlb.next += cpu_data->dtlb.step;
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
|
||||
*
|
||||
* @entry: Address of TLB slot.
|
||||
*
|
||||
* Works like a stack, last one to allocate must be first one to free.
|
||||
*/
|
||||
int sh64_put_wired_dtlb_entry(unsigned long long entry)
|
||||
{
|
||||
__flush_tlb_slot(entry);
|
||||
|
||||
/*
|
||||
* We don't do any particularly useful tracking of wired entries,
|
||||
* so this approach works like a stack .. last one to be allocated
|
||||
* has to be the first one to be freed.
|
||||
*
|
||||
* We could potentially load wired entries into a list and work on
|
||||
* rebalancing the list periodically (which also entails moving the
|
||||
* contents of a TLB entry) .. though I have a feeling that this is
|
||||
* more trouble than it's worth.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Entry must be valid .. we don't want any ITLB addresses!
|
||||
*/
|
||||
if (entry <= DTLB_FIXED)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Next, check if we're within range to be freed. (ie, must be the
|
||||
* entry beneath the first 'free' entry!
|
||||
*/
|
||||
if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
|
||||
return -EINVAL;
|
||||
|
||||
/* If we are, then bring this entry back into the list */
|
||||
cpu_data->dtlb.first -= cpu_data->dtlb.step;
|
||||
cpu_data->dtlb.next = entry;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sh64_setup_tlb_slot - Load up a translation in a wired slot.
|
||||
*
|
||||
* @config_addr: Address of TLB slot.
|
||||
* @eaddr: Virtual address.
|
||||
* @asid: Address Space Identifier.
|
||||
* @paddr: Physical address.
|
||||
*
|
||||
* Load up a virtual<->physical translation for @eaddr<->@paddr in the
|
||||
* pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
|
||||
*/
|
||||
void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|
||||
unsigned long asid, unsigned long paddr)
|
||||
{
|
||||
unsigned long long pteh, ptel;
|
||||
|
||||
pteh = neff_sign_extend(eaddr);
|
||||
pteh &= PAGE_MASK;
|
||||
pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
|
||||
ptel = neff_sign_extend(paddr);
|
||||
ptel &= PAGE_MASK;
|
||||
ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
|
||||
|
||||
asm volatile("putcfg %0, 1, %1\n\t"
|
||||
"putcfg %0, 0, %2\n"
|
||||
: : "r" (config_addr), "r" (ptel), "r" (pteh));
|
||||
}
|
||||
|
||||
/**
|
||||
* sh64_teardown_tlb_slot - Teardown a translation.
|
||||
*
|
||||
* @config_addr: Address of TLB slot.
|
||||
*
|
||||
* Teardown any existing mapping in the TLB slot @config_addr.
|
||||
*/
|
||||
void sh64_teardown_tlb_slot(unsigned long long config_addr)
|
||||
__attribute__ ((alias("__flush_tlb_slot")));
|
||||
|
||||
static int dtlb_entry;
|
||||
static unsigned long long dtlb_entries[64];
|
||||
|
||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||
{
|
||||
unsigned long long entry;
|
||||
unsigned long paddr, flags;
|
||||
|
||||
BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
entry = sh64_get_wired_dtlb_entry();
|
||||
dtlb_entries[dtlb_entry++] = entry;
|
||||
|
||||
paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
|
||||
paddr &= ~PAGE_MASK;
|
||||
|
||||
sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void tlb_unwire_entry(void)
|
||||
{
|
||||
unsigned long long entry;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!dtlb_entry);
|
||||
|
||||
local_irq_save(flags);
|
||||
entry = dtlb_entries[dtlb_entry--];
|
||||
|
||||
sh64_teardown_tlb_slot(entry);
|
||||
sh64_put_wired_dtlb_entry(entry);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
|
||||
{
|
||||
unsigned long long ptel;
|
||||
unsigned long long pteh=0;
|
||||
struct tlb_info *tlbp;
|
||||
unsigned long long next;
|
||||
unsigned int fault_code = get_thread_fault_code();
|
||||
|
||||
/* Get PTEL first */
|
||||
ptel = pte.pte_low;
|
||||
|
||||
/*
|
||||
* Set PTEH register
|
||||
*/
|
||||
pteh = neff_sign_extend(address & MMU_VPN_MASK);
|
||||
|
||||
/* Set the ASID. */
|
||||
pteh |= get_asid() << PTEH_ASID_SHIFT;
|
||||
pteh |= PTEH_VALID;
|
||||
|
||||
/* Set PTEL register, set_pte has performed the sign extension */
|
||||
ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
||||
|
||||
if (fault_code & FAULT_CODE_ITLB)
|
||||
tlbp = &cpu_data->itlb;
|
||||
else
|
||||
tlbp = &cpu_data->dtlb;
|
||||
|
||||
next = tlbp->next;
|
||||
__flush_tlb_slot(next);
|
||||
asm volatile ("putcfg %0,1,%2\n\n\t"
|
||||
"putcfg %0,0,%1\n"
|
||||
: : "r" (next), "r" (pteh), "r" (ptel) );
|
||||
|
||||
next += TLB_STEP;
|
||||
if (next > tlbp->last)
|
||||
next = tlbp->first;
|
||||
tlbp->next = next;
|
||||
}
|
93
arch/sh/mm/tlb-urb.c
Normal file
93
arch/sh/mm/tlb-urb.c
Normal file
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-urb.c
|
||||
*
|
||||
* TLB entry wiring helpers for URB-equipped parts.
|
||||
*
|
||||
* Copyright (C) 2010 Matt Fleming
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/*
|
||||
* Load the entry for 'addr' into the TLB and wire the entry.
|
||||
*/
|
||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
status = __raw_readl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URC;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to wire the last TLB entry slot.
|
||||
*/
|
||||
BUG_ON(!--urb);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
/*
|
||||
* Insert this entry into the highest non-wired TLB slot (via
|
||||
* the URC field).
|
||||
*/
|
||||
status |= (urb << MMUCR_URC_SHIFT);
|
||||
__raw_writel(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
/* Load the entry into the TLB */
|
||||
__update_tlb(vma, addr, pte);
|
||||
|
||||
/* ... and wire it up. */
|
||||
status = __raw_readl(MMUCR);
|
||||
|
||||
status &= ~MMUCR_URB;
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
|
||||
__raw_writel(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unwire the last wired TLB entry.
|
||||
*
|
||||
* It should also be noted that it is not possible to wire and unwire
|
||||
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
||||
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
||||
* respect, it works like a stack or LIFO queue.
|
||||
*/
|
||||
void tlb_unwire_entry(void)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
status = __raw_readl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URB;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to unwire a TLB entry when none
|
||||
* have been wired.
|
||||
*/
|
||||
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
__raw_writel(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
78
arch/sh/mm/tlbex_32.c
Normal file
78
arch/sh/mm/tlbex_32.c
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* TLB miss handler for SH with an MMU.
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2003 - 2012 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
/*
|
||||
* Called with interrupts disabled.
|
||||
*/
|
||||
asmlinkage int __kprobes
|
||||
handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t entry;
|
||||
|
||||
/*
|
||||
* We don't take page faults for P1, P2, and parts of P4, these
|
||||
* are always mapped, whether it be due to legacy behaviour in
|
||||
* 29-bit mode, or due to PMB configuration in 32-bit mode.
|
||||
*/
|
||||
if (address >= P3SEG && address < P3_ADDR_MAX) {
|
||||
pgd = pgd_offset_k(address);
|
||||
} else {
|
||||
if (unlikely(address >= TASK_SIZE || !current->mm))
|
||||
return 1;
|
||||
|
||||
pgd = pgd_offset(current->mm, address);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
return 1;
|
||||
pmd = pmd_offset(pud, address);
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
return 1;
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
entry = *pte;
|
||||
if (unlikely(pte_none(entry) || pte_not_present(entry)))
|
||||
return 1;
|
||||
if (unlikely(error_code && !pte_write(entry)))
|
||||
return 1;
|
||||
|
||||
if (error_code)
|
||||
entry = pte_mkdirty(entry);
|
||||
entry = pte_mkyoung(entry);
|
||||
|
||||
set_pte(pte, entry);
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
|
||||
/*
|
||||
* SH-4 does not set MMUCR.RC to the corresponding TLB entry in
|
||||
* the case of an initial page write exception, so we need to
|
||||
* flush it in order to avoid potential TLB entry duplication.
|
||||
*/
|
||||
if (error_code == FAULT_CODE_INITIAL)
|
||||
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
|
||||
#endif
|
||||
|
||||
set_thread_fault_code(error_code);
|
||||
update_mmu_cache(NULL, address, pte);
|
||||
|
||||
return 0;
|
||||
}
|
166
arch/sh/mm/tlbex_64.c
Normal file
166
arch/sh/mm/tlbex_64.c
Normal file
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* The SH64 TLB miss.
|
||||
*
|
||||
* Original code from fault.c
|
||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||
*
|
||||
* Fast PTE->TLB refill path
|
||||
* Copyright (C) 2003 Richard.Curnow@superh.com
|
||||
*
|
||||
* IMPORTANT NOTES :
|
||||
* The do_fast_page_fault function is called from a context in entry.S
|
||||
* where very few registers have been saved. In particular, the code in
|
||||
* this file must be compiled not to use ANY caller-save registers that
|
||||
* are not part of the restricted save set. Also, it means that code in
|
||||
* this file must not make calls to functions elsewhere in the kernel, or
|
||||
* else the excepting context will see corruption in its caller-save
|
||||
* registers. Plus, the entry.S save area is non-reentrant, so this code
|
||||
* has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
|
||||
* on any exception.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static int handle_tlbmiss(unsigned long long protection_flags,
|
||||
unsigned long address)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t entry;
|
||||
|
||||
if (is_vmalloc_addr((void *)address)) {
|
||||
pgd = pgd_offset_k(address);
|
||||
} else {
|
||||
if (unlikely(address >= TASK_SIZE || !current->mm))
|
||||
return 1;
|
||||
|
||||
pgd = pgd_offset(current->mm, address);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
if (pud_none(*pud) || !pud_present(*pud))
|
||||
return 1;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
if (pmd_none(*pmd) || !pmd_present(*pmd))
|
||||
return 1;
|
||||
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
entry = *pte;
|
||||
if (pte_none(entry) || !pte_present(entry))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* If the page doesn't have sufficient protection bits set to
|
||||
* service the kind of fault being handled, there's not much
|
||||
* point doing the TLB refill. Punt the fault to the general
|
||||
* handler.
|
||||
*/
|
||||
if ((pte_val(entry) & protection_flags) != protection_flags)
|
||||
return 1;
|
||||
|
||||
update_mmu_cache(NULL, address, pte);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put all this information into one structure so that everything is just
|
||||
* arithmetic relative to a single base address. This reduces the number
|
||||
* of movi/shori pairs needed just to load addresses of static data.
|
||||
*/
|
||||
struct expevt_lookup {
|
||||
unsigned short protection_flags[8];
|
||||
unsigned char is_text_access[8];
|
||||
unsigned char is_write_access[8];
|
||||
};
|
||||
|
||||
#define PRU (1<<9)
|
||||
#define PRW (1<<8)
|
||||
#define PRX (1<<7)
|
||||
#define PRR (1<<6)
|
||||
|
||||
/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
|
||||
the fault happened in user mode or privileged mode. */
|
||||
static struct expevt_lookup expevt_lookup_table = {
|
||||
.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
|
||||
.is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
|
||||
};
|
||||
|
||||
static inline unsigned int
|
||||
expevt_to_fault_code(unsigned long expevt)
|
||||
{
|
||||
if (expevt == 0xa40)
|
||||
return FAULT_CODE_ITLB;
|
||||
else if (expevt == 0x060)
|
||||
return FAULT_CODE_WRITE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
This routine handles page faults that can be serviced just by refilling a
|
||||
TLB entry from an existing page table entry. (This case represents a very
|
||||
large majority of page faults.) Return 1 if the fault was successfully
|
||||
handled. Return 0 if the fault could not be handled. (This leads into the
|
||||
general fault handling in fault.c which deals with mapping file-backed
|
||||
pages, stack growth, segmentation faults, swapping etc etc)
|
||||
*/
|
||||
asmlinkage int __kprobes
|
||||
do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
|
||||
unsigned long address)
|
||||
{
|
||||
unsigned long long protection_flags;
|
||||
unsigned long long index;
|
||||
unsigned long long expevt4;
|
||||
unsigned int fault_code;
|
||||
|
||||
/* The next few lines implement a way of hashing EXPEVT into a
|
||||
* small array index which can be used to lookup parameters
|
||||
* specific to the type of TLBMISS being handled.
|
||||
*
|
||||
* Note:
|
||||
* ITLBMISS has EXPEVT==0xa40
|
||||
* RTLBMISS has EXPEVT==0x040
|
||||
* WTLBMISS has EXPEVT==0x060
|
||||
*/
|
||||
expevt4 = (expevt >> 4);
|
||||
/* TODO : xor ssr_md into this expression too. Then we can check
|
||||
* that PRU is set when it needs to be. */
|
||||
index = expevt4 ^ (expevt4 >> 5);
|
||||
index &= 7;
|
||||
|
||||
fault_code = expevt_to_fault_code(expevt);
|
||||
|
||||
protection_flags = expevt_lookup_table.protection_flags[index];
|
||||
|
||||
if (expevt_lookup_table.is_text_access[index])
|
||||
fault_code |= FAULT_CODE_ITLB;
|
||||
if (!ssr_md)
|
||||
fault_code |= FAULT_CODE_USER;
|
||||
|
||||
set_thread_fault_code(fault_code);
|
||||
|
||||
return handle_tlbmiss(protection_flags, address);
|
||||
}
|
137
arch/sh/mm/tlbflush_32.c
Normal file
137
arch/sh/mm/tlbflush_32.c
Normal file
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* TLB flushing operations for SH with an MMU.
|
||||
*
|
||||
* Copyright (C) 1999 Niibe Yutaka
|
||||
* Copyright (C) 2003 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
|
||||
unsigned long flags;
|
||||
unsigned long asid;
|
||||
unsigned long saved_asid = MMU_NO_ASID;
|
||||
|
||||
asid = cpu_asid(cpu, vma->vm_mm);
|
||||
page &= PAGE_MASK;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (vma->vm_mm != current->mm) {
|
||||
saved_asid = get_asid();
|
||||
set_asid(asid);
|
||||
}
|
||||
local_flush_tlb_one(asid, page);
|
||||
if (saved_asid != MMU_NO_ASID)
|
||||
set_asid(saved_asid);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) != NO_CONTEXT) {
|
||||
unsigned long flags;
|
||||
int size;
|
||||
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
|
||||
cpu_context(cpu, mm) = NO_CONTEXT;
|
||||
if (mm == current->mm)
|
||||
activate_context(mm, cpu);
|
||||
} else {
|
||||
unsigned long asid;
|
||||
unsigned long saved_asid = MMU_NO_ASID;
|
||||
|
||||
asid = cpu_asid(cpu, mm);
|
||||
start &= PAGE_MASK;
|
||||
end += (PAGE_SIZE - 1);
|
||||
end &= PAGE_MASK;
|
||||
if (mm != current->mm) {
|
||||
saved_asid = get_asid();
|
||||
set_asid(asid);
|
||||
}
|
||||
while (start < end) {
|
||||
local_flush_tlb_one(asid, start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
if (saved_asid != MMU_NO_ASID)
|
||||
set_asid(saved_asid);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
int size;
|
||||
|
||||
local_irq_save(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
|
||||
local_flush_tlb_all();
|
||||
} else {
|
||||
unsigned long asid;
|
||||
unsigned long saved_asid = get_asid();
|
||||
|
||||
asid = cpu_asid(cpu, &init_mm);
|
||||
start &= PAGE_MASK;
|
||||
end += (PAGE_SIZE - 1);
|
||||
end &= PAGE_MASK;
|
||||
set_asid(asid);
|
||||
while (start < end) {
|
||||
local_flush_tlb_one(asid, start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
set_asid(saved_asid);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/* Invalidate all TLB of this process. */
|
||||
/* Instead of invalidating each TLB, we get new MMU context. */
|
||||
if (cpu_context(cpu, mm) != NO_CONTEXT) {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu_context(cpu, mm) = NO_CONTEXT;
|
||||
if (mm == current->mm)
|
||||
activate_context(mm, cpu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* This is the most destructive of the TLB flushing options,
|
||||
* and will tear down all of the UTLB/ITLB mappings, including
|
||||
* wired entries.
|
||||
*/
|
||||
__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
172
arch/sh/mm/tlbflush_64.c
Normal file
172
arch/sh/mm/tlbflush_64.c
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* arch/sh/mm/tlb-flush_64.c
|
||||
*
|
||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
|
||||
* Copyright (C) 2003 - 2012 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/signal.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
||||
{
|
||||
unsigned long long match, pteh=0, lpage;
|
||||
unsigned long tlb;
|
||||
|
||||
/*
|
||||
* Sign-extend based on neff.
|
||||
*/
|
||||
lpage = neff_sign_extend(page);
|
||||
match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
|
||||
match |= lpage;
|
||||
|
||||
for_each_itlb_entry(tlb) {
|
||||
asm volatile ("getcfg %1, 0, %0"
|
||||
: "=r" (pteh)
|
||||
: "r" (tlb) );
|
||||
|
||||
if (pteh == match) {
|
||||
__flush_tlb_slot(tlb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_dtlb_entry(tlb) {
|
||||
asm volatile ("getcfg %1, 0, %0"
|
||||
: "=r" (pteh)
|
||||
: "r" (tlb) );
|
||||
|
||||
if (pteh == match) {
|
||||
__flush_tlb_slot(tlb);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (vma->vm_mm) {
|
||||
page &= PAGE_MASK;
|
||||
local_irq_save(flags);
|
||||
local_flush_tlb_one(get_asid(), page);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long long match, pteh=0, pteh_epn, pteh_low;
|
||||
unsigned long tlb;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct mm_struct *mm;
|
||||
|
||||
mm = vma->vm_mm;
|
||||
if (cpu_context(cpu, mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
start &= PAGE_MASK;
|
||||
end &= PAGE_MASK;
|
||||
|
||||
match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
|
||||
|
||||
/* Flush ITLB */
|
||||
for_each_itlb_entry(tlb) {
|
||||
asm volatile ("getcfg %1, 0, %0"
|
||||
: "=r" (pteh)
|
||||
: "r" (tlb) );
|
||||
|
||||
pteh_epn = pteh & PAGE_MASK;
|
||||
pteh_low = pteh & ~PAGE_MASK;
|
||||
|
||||
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
|
||||
__flush_tlb_slot(tlb);
|
||||
}
|
||||
|
||||
/* Flush DTLB */
|
||||
for_each_dtlb_entry(tlb) {
|
||||
asm volatile ("getcfg %1, 0, %0"
|
||||
: "=r" (pteh)
|
||||
: "r" (tlb) );
|
||||
|
||||
pteh_epn = pteh & PAGE_MASK;
|
||||
pteh_low = pteh & ~PAGE_MASK;
|
||||
|
||||
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
|
||||
__flush_tlb_slot(tlb);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (cpu_context(cpu, mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu_context(cpu, mm) = NO_CONTEXT;
|
||||
if (mm == current->mm)
|
||||
activate_context(mm, cpu);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
/* Invalidate all, including shared pages, excluding fixed TLBs */
|
||||
unsigned long flags, tlb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Flush each ITLB entry */
|
||||
for_each_itlb_entry(tlb)
|
||||
__flush_tlb_slot(tlb);
|
||||
|
||||
/* Flush each DTLB entry */
|
||||
for_each_dtlb_entry(tlb)
|
||||
__flush_tlb_slot(tlb);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
/* FIXME: Optimize this later.. */
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __flush_tlb_global(void)
|
||||
{
|
||||
flush_tlb_all();
|
||||
}
|
43
arch/sh/mm/uncached.c
Normal file
43
arch/sh/mm/uncached.c
Normal file
|
@ -0,0 +1,43 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
/*
|
||||
* This is the offset of the uncached section from its cached alias.
|
||||
*
|
||||
* Legacy platforms handle trivial transitions between cached and
|
||||
* uncached segments by making use of the 1:1 mapping relationship in
|
||||
* 512MB lowmem, others via a special uncached mapping.
|
||||
*
|
||||
* Default value only valid in 29 bit mode, in 32bit mode this will be
|
||||
* updated by the early PMB initialization code.
|
||||
*/
|
||||
unsigned long cached_to_uncached = SZ_512M;
|
||||
unsigned long uncached_size = SZ_512M;
|
||||
unsigned long uncached_start, uncached_end;
|
||||
EXPORT_SYMBOL(uncached_start);
|
||||
EXPORT_SYMBOL(uncached_end);
|
||||
|
||||
int virt_addr_uncached(unsigned long kaddr)
|
||||
{
|
||||
return (kaddr >= uncached_start) && (kaddr < uncached_end);
|
||||
}
|
||||
EXPORT_SYMBOL(virt_addr_uncached);
|
||||
|
||||
void __init uncached_init(void)
|
||||
{
|
||||
#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
|
||||
uncached_start = P2SEG;
|
||||
#else
|
||||
uncached_start = memory_end;
|
||||
#endif
|
||||
uncached_end = uncached_start + uncached_size;
|
||||
}
|
||||
|
||||
void __init uncached_resize(unsigned long size)
|
||||
{
|
||||
uncached_size = size;
|
||||
uncached_end = uncached_start + uncached_size;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue