Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,147 @@
#
# MN10300 CPU cache options
#
choice
prompt "CPU Caching mode"
default MN10300_CACHE_WBACK
help
This option determines the caching mode for the kernel.
Write-Back caching mode involves the all reads and writes causing
the affected cacheline to be read into the cache first before being
operated upon. Memory is not then updated by a write until the cache
is filled and a cacheline needs to be displaced from the cache to
make room. Only at that point is it written back.
Write-Through caching only fetches cachelines from memory on a
read. Writes always get written directly to memory. If the affected
cacheline is also in cache, it will be updated too.
The final option is to turn of caching entirely.
config MN10300_CACHE_WBACK
bool "Write-Back"
help
The dcache operates in delayed write-back mode. It must be manually
flushed if writes are made that subsequently need to be executed or
to be DMA'd by a device.
config MN10300_CACHE_WTHRU
bool "Write-Through"
help
The dcache operates in immediate write-through mode. Writes are
committed to RAM immediately in addition to being stored in the
cache. This means that the written data is immediately available for
execution or DMA.
This is not available for use with an SMP kernel if cache flushing
and invalidation by automatic purge register is not selected.
config MN10300_CACHE_DISABLED
bool "Disabled"
help
The icache and dcache are disabled.
endchoice
config MN10300_CACHE_ENABLED
def_bool y if !MN10300_CACHE_DISABLED
choice
prompt "CPU cache flush/invalidate method"
default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
default MN10300_CACHE_MANAGE_BY_REG if AM34_2
depends on MN10300_CACHE_ENABLED
help
This determines the method by which CPU cache flushing and
invalidation is performed.
config MN10300_CACHE_MANAGE_BY_TAG
bool "Use the cache tag registers directly"
depends on !(SMP && MN10300_CACHE_WTHRU)
config MN10300_CACHE_MANAGE_BY_REG
bool "Flush areas by way of automatic purge registers (AM34 only)"
depends on AM34_2
endchoice
config MN10300_CACHE_INV_BY_TAG
def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
config MN10300_CACHE_INV_BY_REG
def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
config MN10300_CACHE_FLUSH_BY_TAG
def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
config MN10300_CACHE_FLUSH_BY_REG
def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
config MN10300_HAS_CACHE_SNOOP
def_bool n
config MN10300_CACHE_SNOOP
bool "Use CPU Cache Snooping"
depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
default y
config MN10300_CACHE_FLUSH_ICACHE
def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
help
Set if we need the dcache flushing before the icache is invalidated.
config MN10300_CACHE_INV_ICACHE
def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
help
Set if we need the icache to be invalidated, even if the dcache is in
write-through mode and doesn't need flushing.
#
# The kernel debugger gets its own separate cache flushing functions
#
config MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using the cache tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_FLUSH_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using automatic purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to invalidate the icache using the cache
tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to invalidate the icache using automatic
purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_NO_FLUSH
def_bool y if KERNEL_DEBUGGER && \
(MN10300_CACHE_DISABLED || MN10300_CACHE_SNOOP)
help
Set if the debugger does not need to flush the dcache and/or
invalidate the icache to make breakpoints work.

31
arch/mn10300/mm/Makefile Normal file
View file

@ -0,0 +1,31 @@
#
# Makefile for the MN10300-specific memory management code
#
cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
cacheflush-y := cache.o
cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG) += \
cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_REG) += \
cache-dbg-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG) += \
cache-dbg-inv-by-tag.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_REG) += \
cache-dbg-inv-by-reg.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o $(cacheflush-y)
obj-$(CONFIG_SMP) += tlb-smp.o

View file

@ -0,0 +1,160 @@
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
mov DCPGCR,a0
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# set mask
clr d0
mov d0,(DCPGMR)
# area purge
#
# DCPGCR = DCPGCR_DCP
#
mov DCPGCR_DCP,d0
mov d0,(a0)
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_done
invalidate_icache 0
debugger_local_cache_flushinv_done:
mov d1,epsw
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_no_dcache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
mov d0,d1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,d1
# conditionally purge this line in all ways
mov d1,(L1_CACHE_WAYDISP*0,a0)
debugger_local_cache_flushinv_one_no_dcache:
#
# now try to flush the icache
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_one_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View file

@ -0,0 +1,114 @@
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,e0 # total number of entries
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,e0
bne mn10300_local_dcache_flush_loop
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_icache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,a1
# conditionally purge this line in all ways
mov a1,(L1_CACHE_WAYDISP*0,a0)
# now go and do the icache
bra debugger_local_cache_flushinv_one_icache
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View file

@ -0,0 +1,69 @@
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
mov d0,a1
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_range_reg_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov ~L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
and ~L1_CACHE_TAG_MASK,a1
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
mn10300_local_icache_inv_range_reg_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View file

@ -0,0 +1,120 @@
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one_icache
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one_icache
.type debugger_local_cache_flushinv_one_icache,@function
debugger_local_cache_flushinv_one_icache:
movm [d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_one_icache_end
mov d0,a1
and L1_CACHE_TAG_MASK,a1
# read the tags from the tag RAM, and if they indicate a matching valid
# cache line then we invalidate that line
mov ICACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting icache tag RAM
# access address
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
or L1_CACHE_TAG_VALID,a1
mov L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_VALID,d1
LOCAL_CLI_SAVE(d3)
# disable the icache
movhu (a2),d0
and ~CHCTR_ICEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# check all the way tags for this cache entry
mov (a0),d0 # read the tag in the way 0 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 1 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 2 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 3 slot
xor a1,d0
and d1,d0
bne debugger_local_icache_finish # jump if not matched
debugger_local_icache_kill:
mov d0,(a0) # kill the tag (D0 is 0 at this point)
debugger_local_icache_finish:
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
LOCAL_IRQ_RESTORE(d3)
debugger_local_cache_flushinv_one_icache_end:
ret [d3,a2],8
.size debugger_local_cache_flushinv_one_icache,.-debugger_local_cache_flushinv_one_icache
#ifdef CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one = debugger_local_cache_flushinv_one_icache
#endif

View file

@ -0,0 +1,47 @@
/* MN10300 CPU cache invalidation routines
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# we only need to invalidate the icache in this cache mode
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv

View file

@ -0,0 +1,21 @@
/* Handle the cache being disabled
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
{
if (end < start)
return -EINVAL;
return 0;
}

View file

@ -0,0 +1,308 @@
/* MN10300 CPU core caching routines, using indirect regs on cache controller
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
.am33_2
#ifndef CONFIG_SMP
.globl mn10300_dcache_flush
.globl mn10300_dcache_flush_page
.globl mn10300_dcache_flush_range
.globl mn10300_dcache_flush_range2
.globl mn10300_dcache_flush_inv
.globl mn10300_dcache_flush_inv_page
.globl mn10300_dcache_flush_inv_range
.globl mn10300_dcache_flush_inv_range2
mn10300_dcache_flush = mn10300_local_dcache_flush
mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
#endif /* !CONFIG_SMP */
###############################################################################
#
# void mn10300_local_dcache_flush(void)
# Flush the entire data cache back to RAM
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush
.type mn10300_local_dcache_flush,@function
mn10300_local_dcache_flush:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_flush_end
mov DCPGCR,a0
LOCAL_CLI_SAVE(d1)
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# set mask
clr d0
mov d0,(DCPGMR)
# area purge
#
# DCPGCR = DCPGCR_DCP
#
mov DCPGCR_DCP,d0
mov d0,(a0)
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
mn10300_local_dcache_flush_end:
ret [],0
.size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
###############################################################################
#
# void mn10300_local_dcache_flush_page(unsigned long start)
# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
# Flush a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_page
.globl mn10300_local_dcache_flush_range
.globl mn10300_local_dcache_flush_range2
.type mn10300_local_dcache_flush_page,@function
.type mn10300_local_dcache_flush_range,@function
.type mn10300_local_dcache_flush_range2,@function
mn10300_local_dcache_flush_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_flush_range2:
add d0,d1
mn10300_local_dcache_flush_range:
movm [d2,d3,a2],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_flush_range_end
# calculate alignsize
#
# alignsize = L1_CACHE_BYTES;
# for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
# alignsize <<= 1;
# d2 = alignsize;
#
mov L1_CACHE_BYTES,d2
sub d0,d1,d3
add -1,d3
lsr L1_CACHE_SHIFT,d3
beq 2f
1:
add d2,d2
lsr 1,d3
bne 1b
2:
mov d1,a1 # a1 = end
LOCAL_CLI_SAVE(d3)
mov DCPGCR,a0
# wait for busy bit of area purge
setlb
mov (a0),d1
btst DCPGCR_DCPGBSY,d1
lne
# determine the mask
mov d2,d1
add -1,d1
not d1 # d1 = mask = ~(alignsize-1)
mov d1,(DCPGMR)
and d1,d0,a2 # a2 = mask & start
dcpgloop:
# area purge
mov a2,d0
or DCPGCR_DCP,d0
mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
# wait for busy bit of area purge
setlb
mov (a0),d1
btst DCPGCR_DCPGBSY,d1
lne
# check purge of end address
add d2,a2 # a2 += alignsize
cmp a1,a2 # if (a2 < end) goto dcpgloop
bns dcpgloop
LOCAL_IRQ_RESTORE(d3)
mn10300_local_dcache_flush_range_end:
ret [d2,d3,a2],12
.size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
.size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
.size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
###############################################################################
#
# void mn10300_local_dcache_flush_inv(void)
# Flush the entire data cache and invalidate all entries
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_inv
.type mn10300_local_dcache_flush_inv,@function
mn10300_local_dcache_flush_inv:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_flush_inv_end
mov DCPGCR,a0
LOCAL_CLI_SAVE(d1)
# wait for busy bit of area purge & invalidate
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# set the mask to cover everything
clr d0
mov d0,(DCPGMR)
# area purge & invalidate
mov DCPGCR_DCP|DCPGCR_DCI,d0
mov d0,(a0)
# wait for busy bit of area purge & invalidate
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
mn10300_local_dcache_flush_inv_end:
ret [],0
.size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
###############################################################################
#
# void mn10300_local_dcache_flush_inv_page(unsigned long start)
# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
# Flush and invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_inv_page
.globl mn10300_local_dcache_flush_inv_range
.globl mn10300_local_dcache_flush_inv_range2
.type mn10300_local_dcache_flush_inv_page,@function
.type mn10300_local_dcache_flush_inv_range,@function
.type mn10300_local_dcache_flush_inv_range2,@function
mn10300_local_dcache_flush_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_flush_inv_range2:
add d0,d1
mn10300_local_dcache_flush_inv_range:
movm [d2,d3,a2],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_flush_inv_range_end
# calculate alignsize
#
# alignsize = L1_CACHE_BYTES;
# for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
# alignsize <<= 1;
# d2 = alignsize
#
mov L1_CACHE_BYTES,d2
sub d0,d1,d3
add -1,d3
lsr L1_CACHE_SHIFT,d3
beq 2f
1:
add d2,d2
lsr 1,d3
bne 1b
2:
mov d1,a1 # a1 = end
LOCAL_CLI_SAVE(d3)
mov DCPGCR,a0
# wait for busy bit of area purge & invalidate
setlb
mov (a0),d1
btst DCPGCR_DCPGBSY,d1
lne
# set the mask
mov d2,d1
add -1,d1
not d1 # d1 = mask = ~(alignsize-1)
mov d1,(DCPGMR)
and d1,d0,a2 # a2 = mask & start
dcpgivloop:
# area purge & invalidate
mov a2,d0
or DCPGCR_DCP|DCPGCR_DCI,d0
mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
# wait for busy bit of area purge & invalidate
setlb
mov (a0),d1
btst DCPGCR_DCPGBSY,d1
lne
# check purge & invalidate of end address
add d2,a2 # a2 += alignsize
cmp a1,a2 # if (a2 < end) goto dcpgivloop
bns dcpgivloop
LOCAL_IRQ_RESTORE(d3)
mn10300_local_dcache_flush_inv_range_end:
ret [d2,d3,a2],12
.size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
.size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
.size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2

View file

@ -0,0 +1,250 @@
/* MN10300 CPU core caching routines, using direct tag flushing
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
.am33_2
#ifndef CONFIG_SMP
.globl mn10300_dcache_flush
.globl mn10300_dcache_flush_page
.globl mn10300_dcache_flush_range
.globl mn10300_dcache_flush_range2
.globl mn10300_dcache_flush_inv
.globl mn10300_dcache_flush_inv_page
.globl mn10300_dcache_flush_inv_range
.globl mn10300_dcache_flush_inv_range2
mn10300_dcache_flush = mn10300_local_dcache_flush
mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
#endif /* !CONFIG_SMP */
###############################################################################
#
# void mn10300_local_dcache_flush(void)
# Flush the entire data cache back to RAM
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush
.type mn10300_local_dcache_flush,@function
mn10300_local_dcache_flush:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_flush_end
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_local_dcache_flush_loop
mn10300_local_dcache_flush_end:
ret [],0
.size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
###############################################################################
#
# void mn10300_local_dcache_flush_page(unsigned long start)
# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
# Flush a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_page
.globl mn10300_local_dcache_flush_range
.globl mn10300_local_dcache_flush_range2
.type mn10300_local_dcache_flush_page,@function
.type mn10300_local_dcache_flush_range,@function
.type mn10300_local_dcache_flush_range2,@function
mn10300_local_dcache_flush_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_flush_range2:
add d0,d1
mn10300_local_dcache_flush_range:
movm [d2],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_flush_range_end
sub d0,d1,a0
cmp MN10300_DCACHE_FLUSH_BORDER,a0
ble 1f
movm (sp),[d2]
bra mn10300_local_dcache_flush
1:
# round start addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
# write a request to flush all instances of an address from the cache
mov DCACHE_PURGE(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache purge control
# reg address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
# cache
mn10300_local_dcache_flush_range_loop:
mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
# all ways
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
add -1,d1
bne mn10300_local_dcache_flush_range_loop
mn10300_local_dcache_flush_range_end:
ret [d2],4
.size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
.size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
.size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
###############################################################################
#
# void mn10300_local_dcache_flush_inv(void)
# Flush the entire data cache and invalidate all entries
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_inv
.type mn10300_local_dcache_flush_inv,@function
mn10300_local_dcache_flush_inv:
movhu (CHCTR),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_flush_inv_end
mov L1_CACHE_NENTRIES,d1
clr a1
mn10300_local_dcache_flush_inv_loop:
mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_local_dcache_flush_inv_loop
mn10300_local_dcache_flush_inv_end:
ret [],0
.size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
###############################################################################
#
# void mn10300_local_dcache_flush_inv_page(unsigned long start)
# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
# Flush and invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_flush_inv_page
.globl mn10300_local_dcache_flush_inv_range
.globl mn10300_local_dcache_flush_inv_range2
.type mn10300_local_dcache_flush_inv_page,@function
.type mn10300_local_dcache_flush_inv_range,@function
.type mn10300_local_dcache_flush_inv_range2,@function
mn10300_local_dcache_flush_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_flush_inv_range2:
add d0,d1
mn10300_local_dcache_flush_inv_range:
movm [d2],(sp)
movhu (CHCTR),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_flush_inv_range_end
sub d0,d1,a0
cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
ble 1f
movm (sp),[d2]
bra mn10300_local_dcache_flush_inv
1:
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
# write a request to flush and invalidate all instances of an address
# from the cache
mov DCACHE_PURGE(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache purge control
# reg address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
mn10300_local_dcache_flush_inv_range_loop:
mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
# in all ways
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
add -1,d1
bne mn10300_local_dcache_flush_inv_range_loop
mn10300_local_dcache_flush_inv_range_end:
ret [d2],4
.size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
.size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
.size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2

View file

@ -0,0 +1,155 @@
/* Flush dcache and invalidate icache when the dcache is in writeback mode
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
#include "cache-smp.h"
/**
* flush_icache_page - Flush a page from the dcache and invalidate the icache
* @vma: The VMA the page is part of.
* @page: The page to be flushed.
*
* Write a page back from the dcache and invalidate the icache so that we can
* run code from it that we've just written into it
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long start = page_to_phys(page);
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_page(start);
mn10300_local_icache_inv_page(start);
smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
EXPORT_SYMBOL(flush_icache_page);
/**
* flush_icache_page_range - Flush dcache and invalidate icache for part of a
* single page
* @start: The starting virtual address of the page part.
* @end: The ending virtual address of the page part.
*
* Flush the dcache and invalidate the icache for part of a single page, as
* determined by the virtual addresses given. The page must be in the paged
* area.
*/
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
/* work out how much of the page to flush */
off = start & ~PAGE_MASK;
size = end - start;
/* get the physical address the page is mapped to from the page
* tables */
pgd = pgd_offset(current->mm, start);
if (!pgd || !pgd_val(*pgd))
return;
pud = pud_offset(pgd, start);
if (!pud || !pud_val(*pud))
return;
pmd = pmd_offset(pud, start);
if (!pmd || !pmd_val(*pmd))
return;
ppte = pte_offset_map(pmd, start);
if (!ppte)
return;
pte = *ppte;
pte_unmap(ppte);
if (pte_none(pte))
return;
page = pte_page(pte);
if (!page)
return;
addr = page_to_phys(page);
/* flush the dcache and invalidate the icache coverage on that
* region */
mn10300_local_dcache_flush_range2(addr + off, size);
mn10300_local_icache_inv_range2(addr + off, size);
smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
/**
* flush_icache_range - Globally flush dcache and invalidate icache for region
* @start: The starting virtual address of the region.
* @end: The ending virtual address of the region.
*
* This is used by the kernel to globally flush some code it has just written
* from the dcache back to RAM and then to globally invalidate the icache over
* that region so that that code can be run on all CPUs in the system.
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long start_page, end_page;
unsigned long flags;
flags = smp_lock_cache();
if (end > 0x80000000UL) {
/* addresses above 0xa0000000 do not go through the cache */
if (end > 0xa0000000UL) {
end = 0xa0000000UL;
if (start >= end)
goto done;
}
/* kernel addresses between 0x80000000 and 0x9fffffff do not
* require page tables, so we just map such addresses
* directly */
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_local_dcache_flush_range(start_page, end);
mn10300_local_icache_inv_range(start_page, end);
smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
if (start_page == start)
goto done;
end = start_page;
}
start_page = start & PAGE_MASK;
end_page = (end - 1) & PAGE_MASK;
if (start_page == end_page) {
/* the first and last bytes are on the same page */
flush_icache_page_range(start, end);
} else if (start_page + 1 == end_page) {
/* split over two virtually contiguous pages */
flush_icache_page_range(start, end_page);
flush_icache_page_range(end_page, end);
} else {
/* more than 2 pages; just flush the entire cache */
mn10300_dcache_flush();
mn10300_icache_inv();
smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
}
done:
smp_unlock_cache(flags);
}
EXPORT_SYMBOL(flush_icache_range);

View file

@ -0,0 +1,350 @@
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
#if mn10300_local_dcache_inv_range_intr_interval > 0xff
#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
#endif
.am33_2
#ifndef CONFIG_SMP
.globl mn10300_icache_inv
.globl mn10300_icache_inv_page
.globl mn10300_icache_inv_range
.globl mn10300_icache_inv_range2
.globl mn10300_dcache_inv
.globl mn10300_dcache_inv_page
.globl mn10300_dcache_inv_range
.globl mn10300_dcache_inv_range2
mn10300_icache_inv = mn10300_local_icache_inv
mn10300_icache_inv_page = mn10300_local_icache_inv_page
mn10300_icache_inv_range = mn10300_local_icache_inv_range
mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
mn10300_dcache_inv = mn10300_local_dcache_inv
mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
#endif /* !CONFIG_SMP */
###############################################################################
#
# void mn10300_local_icache_inv(void)
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl mn10300_local_icache_inv
.type mn10300_local_icache_inv,@function
mn10300_local_icache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
.size mn10300_local_icache_inv,.-mn10300_local_icache_inv
###############################################################################
#
# void mn10300_local_dcache_inv(void)
# Invalidate the entire dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv
.type mn10300_local_dcache_inv,@function
mn10300_local_dcache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
.size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
###############################################################################
#
# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
# void mn10300_local_dcache_inv_page(unsigned long start)
# Invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv_page
.globl mn10300_local_dcache_inv_range
.globl mn10300_local_dcache_inv_range2
.type mn10300_local_dcache_inv_page,@function
.type mn10300_local_dcache_inv_range,@function
.type mn10300_local_dcache_inv_range2,@function
mn10300_local_dcache_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_inv_range2:
add d0,d1
mn10300_local_dcache_inv_range:
# If we are in writeback mode we check the start and end alignments,
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
2:
#endif /* CONFIG_MN10300_CACHE_WBACK */
movm [d2,d3,a2],(sp)
mov CHCTR,a0
movhu (a0),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_inv_range_end
# round the addresses out to be full cachelines, unless we're in
# writeback mode, in which case we would be in flush and invalidate by
# now
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov L1_CACHE_BYTES-1,d2
add d2,d1
and L1_CACHE_TAG_MASK,d1 # round end addr up
#endif /* !CONFIG_MN10300_CACHE_WBACK */
sub d0,d1,d2 # calculate the total size
mov d0,a2 # A2 = start address
mov d1,a1 # A1 = end address
LOCAL_CLI_SAVE(d3)
mov DCPGCR,a0 # make sure the purger isn't busy
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# skip initial address alignment calculation if address is zero
mov d2,d1
cmp 0,a2
beq 1f
dcivloop:
/* calculate alignsize
*
* alignsize = L1_CACHE_BYTES;
* while (! start & alignsize) {
* alignsize <<=1;
* }
* d1 = alignsize;
*/
mov L1_CACHE_BYTES,d1
lsr 1,d1
setlb
add d1,d1
mov d1,d0
and a2,d0
leq
1:
/* calculate invsize
*
* if (totalsize > alignsize) {
* invsize = alignsize;
* } else {
* invsize = totalsize;
* tmp = 0x80000000;
* while (! invsize & tmp) {
* tmp >>= 1;
* }
* invsize = tmp;
* }
* d1 = invsize
*/
cmp d2,d1
bns 2f
mov d2,d1
mov 0x80000000,d0 # start from 31bit=1
setlb
lsr 1,d0
mov d0,e0
and d1,e0
leq
mov d0,d1
2:
/* set mask
*
* mask = ~(invsize-1);
* DCPGMR = mask;
*/
mov d1,d0
add -1,d0
not d0
mov d0,(DCPGMR)
# invalidate area
mov a2,d0
or DCPGCR_DCI,d0
mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
setlb # wait for the purge to complete
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
sub d1,d2 # decrease size remaining
add d1,a2 # increase next start address
/* check invalidating of end address
*
* a2 = a2 + invsize
* if (a2 < end) {
* goto dcivloop;
* } */
cmp a1,a2
bns dcivloop
LOCAL_IRQ_RESTORE(d3)
mn10300_local_dcache_inv_range_end:
ret [d2,d3,a2],12
.size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
.size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
.size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
###############################################################################
#
# void mn10300_local_icache_inv_page(unsigned long start)
# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
# Invalidate a range of addresses on a page in the icache
#
###############################################################################
ALIGN
.globl mn10300_local_icache_inv_page
.globl mn10300_local_icache_inv_range
.globl mn10300_local_icache_inv_range2
.type mn10300_local_icache_inv_page,@function
.type mn10300_local_icache_inv_range,@function
.type mn10300_local_icache_inv_range2,@function
mn10300_local_icache_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_icache_inv_range2:
add d0,d1
mn10300_local_icache_inv_range:
movm [d2,d3,a2],(sp)
mov CHCTR,a0
movhu (a0),d2
btst CHCTR_ICEN,d2
beq mn10300_local_icache_inv_range_reg_end
/* calculate alignsize
*
* alignsize = L1_CACHE_BYTES;
* for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
* alignsize <<= 1;
* }
* d2 = alignsize;
*/
mov L1_CACHE_BYTES,d2
sub d0,d1,d3
add -1,d3
lsr L1_CACHE_SHIFT,d3
beq 2f
1:
add d2,d2
lsr 1,d3
bne 1b
2:
/* a1 = end */
mov d1,a1
LOCAL_CLI_SAVE(d3)
mov ICIVCR,a0
/* wait for busy bit of area invalidation */
setlb
mov (a0),d1
btst ICIVCR_ICIVBSY,d1
lne
/* set mask
*
* mask = ~(alignsize-1);
* ICIVMR = mask;
*/
mov d2,d1
add -1,d1
not d1
mov d1,(ICIVMR)
/* a2 = mask & start */
and d1,d0,a2
icivloop:
/* area invalidate
*
* ICIVCR = (mask & start) | ICIVCR_ICI
*/
mov a2,d0
or ICIVCR_ICI,d0
mov d0,(a0)
/* wait for busy bit of area invalidation */
setlb
mov (a0),d1
btst ICIVCR_ICIVBSY,d1
lne
/* check invalidating of end address
*
* a2 = a2 + alignsize
* if (a2 < end) {
* goto icivloop;
* } */
add d2,a2
cmp a1,a2
bns icivloop
LOCAL_IRQ_RESTORE(d3)
mn10300_local_icache_inv_range_reg_end:
ret [d2,d3,a2],12
.size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
.size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
.size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2

View file

@ -0,0 +1,276 @@
/* MN10300 CPU core caching routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
#if mn10300_local_dcache_inv_range_intr_interval > 0xff
#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
#endif
.am33_2
.globl mn10300_local_icache_inv_page
.globl mn10300_local_icache_inv_range
.globl mn10300_local_icache_inv_range2
mn10300_local_icache_inv_page = mn10300_local_icache_inv
mn10300_local_icache_inv_range = mn10300_local_icache_inv
mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
#ifndef CONFIG_SMP
.globl mn10300_icache_inv
.globl mn10300_icache_inv_page
.globl mn10300_icache_inv_range
.globl mn10300_icache_inv_range2
.globl mn10300_dcache_inv
.globl mn10300_dcache_inv_page
.globl mn10300_dcache_inv_range
.globl mn10300_dcache_inv_range2
mn10300_icache_inv = mn10300_local_icache_inv
mn10300_icache_inv_page = mn10300_local_icache_inv_page
mn10300_icache_inv_range = mn10300_local_icache_inv_range
mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
mn10300_dcache_inv = mn10300_local_dcache_inv
mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
#endif /* !CONFIG_SMP */
###############################################################################
#
# void mn10300_local_icache_inv(void)
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl mn10300_local_icache_inv
.type mn10300_local_icache_inv,@function
mn10300_local_icache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
.size mn10300_local_icache_inv,.-mn10300_local_icache_inv
###############################################################################
#
# void mn10300_local_dcache_inv(void)
# Invalidate the entire dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv
.type mn10300_local_dcache_inv,@function
mn10300_local_dcache_inv:
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
.size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
###############################################################################
#
# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
# void mn10300_local_dcache_inv_page(unsigned long start)
# Invalidate a range of addresses on a page in the dcache
#
###############################################################################
ALIGN
.globl mn10300_local_dcache_inv_page
.globl mn10300_local_dcache_inv_range
.globl mn10300_local_dcache_inv_range2
.type mn10300_local_dcache_inv_page,@function
.type mn10300_local_dcache_inv_range,@function
.type mn10300_local_dcache_inv_range2,@function
mn10300_local_dcache_inv_page:
and ~(PAGE_SIZE-1),d0
mov PAGE_SIZE,d1
mn10300_local_dcache_inv_range2:
add d0,d1
mn10300_local_dcache_inv_range:
# If we are in writeback mode we check the start and end alignments,
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
2:
#endif /* CONFIG_MN10300_CACHE_WBACK */
movm [d2,d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d2
btst CHCTR_DCEN,d2
beq mn10300_local_dcache_inv_range_end
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_MASK,d0 # round start addr down
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
#endif /* !CONFIG_MN10300_CACHE_WBACK */
mov d0,a1
clr d2 # we're going to clear tag RAM
# entries
# read the tags from the tag RAM, and if they indicate a valid dirty
# cache line then invalidate that line
mov DCACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting dcache tag RAM
# access address
sub a1,d1
lsr L1_CACHE_SHIFT,d1 # total number of entries to
# examine
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
mn10300_local_dcache_inv_range_outer_loop:
LOCAL_CLI_SAVE(d3)
# disable the dcache
movhu (a2),d0
and ~CHCTR_DCEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
mn10300_local_dcache_inv_range_loop:
# process the way 0 slot
mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_0:
# process the way 1 slot
mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_1:
# process the way 2 slot
mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_2:
# process the way 3 slot
mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
btst L1_CACHE_TAG_VALID,d0
beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
# is not valid
xor a1,d0
lsr 12,d0
bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
mn10300_local_dcache_inv_range_skip_3:
# approx every N steps we re-enable the cache and see if there are any
# interrupts to be processed
# we also break out if we've reached the end of the loop
# (the bottom nibble of the count is zero in both cases)
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
and ~L1_CACHE_WAYDISP,a0
add -1,d1
btst mn10300_local_dcache_inv_range_intr_interval,d1
bne mn10300_local_dcache_inv_range_loop
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
# - we don't bother with delay NOPs as we'll have enough instructions
# before we disable interrupts again to give the interrupts a chance
# to happen
LOCAL_IRQ_RESTORE(d3)
# go around again if the counter hasn't yet reached zero
add 0,d1
bne mn10300_local_dcache_inv_range_outer_loop
mn10300_local_dcache_inv_range_end:
ret [d2,d3,a2],12
.size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
.size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
.size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2

View file

@ -0,0 +1,129 @@
/* Invalidate icache when dcache doesn't need invalidation as it's in
* write-through mode
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
#include "cache-smp.h"
/**
* flush_icache_page_range - Flush dcache and invalidate icache for part of a
* single page
* @start: The starting virtual address of the page part.
* @end: The ending virtual address of the page part.
*
* Invalidate the icache for part of a single page, as determined by the
* virtual addresses given. The page must be in the paged area. The dcache is
* not flushed as the cache must be in write-through mode to get here.
*/
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
/* work out how much of the page to flush */
off = start & ~PAGE_MASK;
size = end - start;
/* get the physical address the page is mapped to from the page
* tables */
pgd = pgd_offset(current->mm, start);
if (!pgd || !pgd_val(*pgd))
return;
pud = pud_offset(pgd, start);
if (!pud || !pud_val(*pud))
return;
pmd = pmd_offset(pud, start);
if (!pmd || !pmd_val(*pmd))
return;
ppte = pte_offset_map(pmd, start);
if (!ppte)
return;
pte = *ppte;
pte_unmap(ppte);
if (pte_none(pte))
return;
page = pte_page(pte);
if (!page)
return;
addr = page_to_phys(page);
/* invalidate the icache coverage on that region */
mn10300_local_icache_inv_range2(addr + off, size);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
}
/**
* flush_icache_range - Globally flush dcache and invalidate icache for region
* @start: The starting virtual address of the region.
* @end: The ending virtual address of the region.
*
* This is used by the kernel to globally flush some code it has just written
* from the dcache back to RAM and then to globally invalidate the icache over
* that region so that that code can be run on all CPUs in the system.
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long start_page, end_page;
unsigned long flags;
flags = smp_lock_cache();
if (end > 0x80000000UL) {
/* addresses above 0xa0000000 do not go through the cache */
if (end > 0xa0000000UL) {
end = 0xa0000000UL;
if (start >= end)
goto done;
}
/* kernel addresses between 0x80000000 and 0x9fffffff do not
* require page tables, so we just map such addresses
* directly */
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_icache_inv_range(start_page, end);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
if (start_page == start)
goto done;
end = start_page;
}
start_page = start & PAGE_MASK;
end_page = (end - 1) & PAGE_MASK;
if (start_page == end_page) {
/* the first and last bytes are on the same page */
flush_icache_page_range(start, end);
} else if (start_page + 1 == end_page) {
/* split over two virtually contiguous pages */
flush_icache_page_range(start, end_page);
flush_icache_page_range(end_page, end);
} else {
/* more than 2 pages; just flush the entire cache */
mn10300_local_icache_inv();
smp_cache_call(SMP_ICACHE_INV, 0, 0);
}
done:
smp_unlock_cache(flags);
}
EXPORT_SYMBOL(flush_icache_range);

View file

@ -0,0 +1,156 @@
/* Functions for global dcache flush when writeback caching in SMP
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "cache-smp.h"
/**
* mn10300_dcache_flush - Globally flush data cache
*
* Flush the data cache on all CPUs.
*/
void mn10300_dcache_flush(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush();
smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_page - Globally flush a page of data cache
* @start: The address of the page of memory to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs covering
* the page that includes the given address.
*/
void mn10300_dcache_flush_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_flush_page(start);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_range - Globally flush range of data cache
* @start: The start address of the region to be flushed.
* @end: The end address of the region to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs, between start and
* end-1 inclusive.
*/
void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_range(start, end);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_range2 - Globally flush range of data cache
* @start: The start address of the region to be flushed.
* @size: The size of the region to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs, between start and
* start+size-1 inclusive.
*/
void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_range2(start, size);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv - Globally flush and invalidate data cache
*
* Flush and invalidate the data cache on all CPUs.
*/
void mn10300_dcache_flush_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv();
smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
* cache
* @start: The address of the page of memory to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs
* covering the page that includes the given address.
*/
void mn10300_dcache_flush_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_page(start);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
* cache
* @start: The start address of the region to be flushed and invalidated.
* @end: The end address of the region to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs,
* between start and end-1 inclusive.
*/
void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_range(start, end);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
* cache
* @start: The start address of the region to be flushed and invalidated.
* @size: The size of the region to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs,
* between start and start+size-1 inclusive.
*/
void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_range2(start, size);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}

View file

@ -0,0 +1,153 @@
/* Functions for global i/dcache invalidation when caching in SMP
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "cache-smp.h"
/**
* mn10300_icache_inv - Globally invalidate instruction cache
*
* Invalidate the instruction cache on all CPUs.
*/
void mn10300_icache_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv();
smp_cache_call(SMP_ICACHE_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_page - Globally invalidate a page of instruction cache
* @start: The address of the page of memory to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs
* covering the page that includes the given address.
*/
void mn10300_icache_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_icache_inv_page(start);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_range - Globally invalidate range of instruction cache
* @start: The start address of the region to be invalidated.
* @end: The end address of the region to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs,
* between start and end-1 inclusive.
*/
void mn10300_icache_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv_range(start, end);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
* @start: The start address of the region to be invalidated.
* @size: The size of the region to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs,
* between start and start+size-1 inclusive.
*/
void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv_range2(start, size);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv - Globally invalidate data cache
*
* Invalidate the data cache on all CPUs.
*/
void mn10300_dcache_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv();
smp_cache_call(SMP_DCACHE_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_page - Globally invalidate a page of data cache
* @start: The address of the page of memory to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs covering the
* page that includes the given address.
*/
void mn10300_dcache_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_inv_page(start);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_range - Globally invalidate range of data cache
* @start: The start address of the region to be invalidated.
* @end: The end address of the region to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs, between start
* and end-1 inclusive.
*/
void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv_range(start, end);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_range2 - Globally invalidate range of data cache
* @start: The start address of the region to be invalidated.
* @size: The size of the region to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs, between start
* and start+size-1 inclusive.
*/
void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv_range2(start, size);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}

105
arch/mn10300/mm/cache-smp.c Normal file
View file

@ -0,0 +1,105 @@
/* SMP global caching code
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include "cache-smp.h"
DEFINE_SPINLOCK(smp_cache_lock);
static unsigned long smp_cache_mask;
static unsigned long smp_cache_start;
static unsigned long smp_cache_end;
static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
/**
* smp_cache_interrupt - Handle IPI request to flush caches.
*
* Handle a request delivered by IPI to flush the current CPU's
* caches. The parameters are stored in smp_cache_*.
*/
void smp_cache_interrupt(void)
{
unsigned long opr_mask = smp_cache_mask;
switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
case SMP_DCACHE_NOP:
break;
case SMP_DCACHE_INV:
mn10300_local_dcache_inv();
break;
case SMP_DCACHE_INV_RANGE:
mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
break;
case SMP_DCACHE_FLUSH:
mn10300_local_dcache_flush();
break;
case SMP_DCACHE_FLUSH_RANGE:
mn10300_local_dcache_flush_range(smp_cache_start,
smp_cache_end);
break;
case SMP_DCACHE_FLUSH_INV:
mn10300_local_dcache_flush_inv();
break;
case SMP_DCACHE_FLUSH_INV_RANGE:
mn10300_local_dcache_flush_inv_range(smp_cache_start,
smp_cache_end);
break;
}
switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
case SMP_ICACHE_NOP:
break;
case SMP_ICACHE_INV:
mn10300_local_icache_inv();
break;
case SMP_ICACHE_INV_RANGE:
mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
break;
}
cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
}
/**
* smp_cache_call - Issue an IPI to request the other CPUs flush caches
* @opr_mask: Cache operation flags
* @start: Start address of request
* @end: End address of request
*
* Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
* above on those other CPUs and then waits for them to finish.
*
* The caller must hold smp_cache_lock.
*/
void smp_cache_call(unsigned long opr_mask,
unsigned long start, unsigned long end)
{
smp_cache_mask = opr_mask;
smp_cache_start = start;
smp_cache_end = end;
cpumask_copy(&smp_cache_ipi_map, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
send_IPI_allbutself(FLUSH_CACHE_IPI);
while (!cpumask_empty(&smp_cache_ipi_map))
/* nothing. lockup detection does not belong here */
mb();
}

View file

@ -0,0 +1,69 @@
/* SMP caching definitions
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
/*
* Operation requests for smp_cache_call().
*
* One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
*/
enum smp_icache_ops {
SMP_ICACHE_NOP = 0x0000,
SMP_ICACHE_INV = 0x0001,
SMP_ICACHE_INV_RANGE = 0x0002,
};
#define SMP_ICACHE_OP_MASK 0x0003
enum smp_dcache_ops {
SMP_DCACHE_NOP = 0x0000,
SMP_DCACHE_INV = 0x0004,
SMP_DCACHE_INV_RANGE = 0x0008,
SMP_DCACHE_FLUSH = 0x000c,
SMP_DCACHE_FLUSH_RANGE = 0x0010,
SMP_DCACHE_FLUSH_INV = 0x0014,
SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
};
#define SMP_DCACHE_OP_MASK 0x001c
#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
/*
* cache-smp.c
*/
#ifdef CONFIG_SMP
extern spinlock_t smp_cache_lock;
extern void smp_cache_call(unsigned long opr_mask,
unsigned long addr, unsigned long end);
static inline unsigned long smp_lock_cache(void)
__acquires(&smp_cache_lock)
{
unsigned long flags;
spin_lock_irqsave(&smp_cache_lock, flags);
return flags;
}
static inline void smp_unlock_cache(unsigned long flags)
__releases(&smp_cache_lock)
{
spin_unlock_irqrestore(&smp_cache_lock, flags);
}
#else
static inline unsigned long smp_lock_cache(void) { return 0; }
static inline void smp_unlock_cache(unsigned long flags) {}
static inline void smp_cache_call(unsigned long opr_mask,
unsigned long addr, unsigned long end)
{
}
#endif /* CONFIG_SMP */

54
arch/mn10300/mm/cache.c Normal file
View file

@ -0,0 +1,54 @@
/* MN10300 Cache flushing routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
EXPORT_SYMBOL(mn10300_icache_inv_range);
EXPORT_SYMBOL(mn10300_icache_inv_range2);
EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_inv_page);
#ifdef CONFIG_MN10300_CACHE_WBACK
EXPORT_SYMBOL(mn10300_dcache_flush);
EXPORT_SYMBOL(mn10300_dcache_flush_inv);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
EXPORT_SYMBOL(mn10300_dcache_flush_range);
EXPORT_SYMBOL(mn10300_dcache_flush_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
{
if (end < start)
return -EINVAL;
flush_icache_range(start, end);
return 0;
}

133
arch/mn10300/mm/cache.inc Normal file
View file

@ -0,0 +1,133 @@
/* MN10300 CPU core caching macros -*- asm -*-
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
###############################################################################
#
# Invalidate the instruction cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the icache whilst we do this.
#
###############################################################################
.macro invalidate_icache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm
###############################################################################
#
# Invalidate the data cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the dcache whilst we do this.
#
###############################################################################
.macro invalidate_dcache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm

View file

@ -0,0 +1,76 @@
/* MN10300 Dynamic DMA mapping support
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* Derived from: arch/i386/kernel/pci-dma.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <asm/io.h>
static unsigned long pci_sram_allocated = 0xbc000000;
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
{
unsigned long addr;
void *ret;
pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
dev ? dev_name(dev) : "?", size, gfp);
if (0xbe000000 - pci_sram_allocated >= size) {
size = (size + 255) & ~255;
addr = pci_sram_allocated;
pci_sram_allocated += size;
ret = (void *) addr;
goto done;
}
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
addr = __get_free_pages(gfp, get_order(size));
if (!addr)
return NULL;
/* map the coherent memory through the uncached memory window */
ret = (void *) (addr | 0x20000000);
/* fill the memory with obvious rubbish */
memset((void *) addr, 0xfb, size);
/* write back and evict all cache lines covering this region */
mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
done:
*dma_handle = virt_to_bus((void *) addr);
printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
if (addr >= 0x9c000000)
return;
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);

26
arch/mn10300/mm/extable.c Normal file
View file

@ -0,0 +1,26 @@
/* MN10300 In-kernel exception handling
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return 1;
}
return 0;
}

414
arch/mn10300/mm/fault.c Normal file
View file

@ -0,0 +1,414 @@
/* MN10300 MMU Fault handler
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/cpu-regs.h>
#include <asm/debugger.h>
#include <asm/gdb-stub.h>
/*
* Unlock any spinlocks which will prevent us from getting the
* message out
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
unblank_screen();
#endif
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15; /* NMI oopser may have shut the console
* up */
printk(" ");
console_loglevel = loglevel_save;
}
}
void do_BUG(const char *file, int line)
{
bust_spinlocks(1);
printk(KERN_EMERG "------------[ cut here ]------------\n");
printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
}
#if 0
static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgdir + __pgd_offset(address);
printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
pgd, (long long) pgd_val(*pgd));
if (!pgd_present(*pgd)) {
printk(KERN_DEBUG "... pgd not present!\n");
return;
}
pmd = pmd_offset(pgd, address);
printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
pmd, (long long)pmd_val(*pmd));
if (!pmd_present(*pmd)) {
printk(KERN_DEBUG "... pmd not present!\n");
return;
}
pte = pte_offset(pmd, address);
printk(KERN_DEBUG "pte entry %p: %016Lx\n",
pte, (long long) pte_val(*pte));
if (!pte_present(*pte))
printk(KERN_DEBUG "... pte not present!\n");
}
#endif
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* fault_code:
* - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
* - MSW: 0 if data access, 1 if instruction access
* - bit 0: TLB miss flag
* - bit 1: initial write
* - bit 2: page invalid
* - bit 3: protection violation
* - bit 4: accessor (0=user 1=kernel)
* - bit 5: 0=read 1=write
* - bit 6-8: page protection spec
* - bit 9: illegal address
* - bit 16: 0=data 1=ins
*
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long page;
siginfo_t info;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
#ifdef CONFIG_GDBSTUB
/* handle GDB stub causing a fault */
if (gdbstub_busy) {
gdbstub_exception(regs, TBR & TBR_INT_CODE);
return;
}
#endif
#if 0
printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
regs,
fault_code & 0x10000 ? "ins" : "data",
fault_code & 0xffff, address);
#endif
tsk = current;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* and that the fault was a page not present (invalid) error
*/
if (address >= VMALLOC_START && address < VMALLOC_END &&
(fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
(fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
)
goto vmalloc_fault;
mm = tsk->mm;
info.si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
/* accessing the stack below the stack pointer is always a
* bug */
if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
#if 0
printk(KERN_WARNING
"[%d] ### Access below stack @%lx (sp=%lx)\n",
current->pid, address, regs->sp);
printk(KERN_WARNING
"vma [%08x - %08x]\n",
vma->vm_start, vma->vm_end);
show_registers(regs);
printk(KERN_WARNING
"[%d] ### Code: [%08lx]"
" %02x %02x %02x %02x %02x %02x %02x %02x\n",
current->pid,
regs->pc,
((u8 *) regs->pc)[0],
((u8 *) regs->pc)[1],
((u8 *) regs->pc)[2],
((u8 *) regs->pc)[3],
((u8 *) regs->pc)[4],
((u8 *) regs->pc)[5],
((u8 *) regs->pc)[6],
((u8 *) regs->pc)[7]
);
#endif
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
default: /* 3: write, present */
case MMUFCR_xFC_TYPE_WRITE:
#ifdef TEST_VERIFY_AREA
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
#endif
/* write to absent page */
case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
break;
/* read from protected page */
case MMUFCR_xFC_TYPE_READ:
goto bad_area;
/* read from absent page present */
case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
break;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void *)address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
if (address < PAGE_SIZE)
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT
"Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(" printing pc:\n");
printk(KERN_ALERT "%08lx\n", regs->pc);
debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR,
SIGSEGV, SEGV_ACCERR, regs);
page = PTBR;
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & 1) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
die("Oops", regs, fault_code);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
pagefault_out_of_memory();
return;
}
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
goto no_context;
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto no_context;
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
pgd = (pgd_t *) PTBR + index;
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}

136
arch/mn10300/mm/init.c Normal file
View file

@ -0,0 +1,136 @@
/* MN10300 Memory management initialisation
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/tlb.h>
#include <asm/sections.h>
unsigned long highstart_pfn, highend_pfn;
#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
static struct vm_struct user_iomap_vm;
#endif
/*
* set up paging
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0,};
pte_t *ppte;
int loop;
/* main kernel space -> RAM mapping is handled as 1:1 transparent by
* the MMU */
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
/* load the VMALLOC area PTE table addresses into the kernel PGD */
ppte = kernel_vmalloc_ptes;
for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
loop++
) {
set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
ppte += PAGE_SIZE / sizeof(pte_t);
}
/* declare the sizes of the RAM zones (only use the normal zone) */
zones_size[ZONE_NORMAL] =
contig_page_data.bdata->node_low_pfn -
contig_page_data.bdata->node_min_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
/* The Atomic Operation Unit registers need to be mapped to userspace
* for all processes. The following uses vm_area_register_early() to
* reserve the first page of the vmalloc area and sets the pte for that
* page.
*
* glibc hardcodes this virtual mapping, so we're pretty much stuck with
* it from now on.
*/
user_iomap_vm.flags = VM_USERMAP;
user_iomap_vm.size = 1 << PAGE_SHIFT;
vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
ppte = kernel_vmalloc_ptes;
set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
PAGE_USERIO));
#endif
local_flush_tlb_all();
}
/*
* transfer all the memory from the bootmem allocator to the runtime allocator
*/
void __init mem_init(void)
{
BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
max_mapnr = MAX_LOW_PFN - START_PFN;
high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
free_all_bootmem();
mem_init_print_info(NULL);
}
/*
* recycle memory containing stuff only required for initialisation
*/
void free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
/*
* dispose of the memory on which the initial ramdisk resided
*/
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
#endif

View file

@ -0,0 +1,966 @@
/* MN10300 Misalignment fixup handler
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#include <asm/cpu-regs.h>
#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/gdb-stub.h>
#include <asm/asm-offsets.h>
#if 0
#define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__)
#else
#define kdebug(FMT, ...) do {} while (0)
#endif
static int misalignment_addr(unsigned long *registers, unsigned long sp,
unsigned params, unsigned opcode,
unsigned long disp,
void **_address, unsigned long **_postinc,
unsigned long *_inc);
static int misalignment_reg(unsigned long *registers, unsigned params,
unsigned opcode, unsigned long disp,
unsigned long **_register);
static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode);
static const unsigned Dreg_index[] = {
REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
};
static const unsigned Areg_index[] = {
REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2
};
static const unsigned Rreg_index[] = {
REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2,
REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2,
REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2,
REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
};
enum format_id {
FMT_S0,
FMT_S1,
FMT_S2,
FMT_S4,
FMT_D0,
FMT_D1,
FMT_D2,
FMT_D4,
FMT_D6,
FMT_D7,
FMT_D8,
FMT_D9,
FMT_D10,
};
static const struct {
u_int8_t opsz, dispsz;
} format_tbl[16] = {
[FMT_S0] = { 8, 0 },
[FMT_S1] = { 8, 8 },
[FMT_S2] = { 8, 16 },
[FMT_S4] = { 8, 32 },
[FMT_D0] = { 16, 0 },
[FMT_D1] = { 16, 8 },
[FMT_D2] = { 16, 16 },
[FMT_D4] = { 16, 32 },
[FMT_D6] = { 24, 0 },
[FMT_D7] = { 24, 8 },
[FMT_D8] = { 24, 24 },
[FMT_D9] = { 24, 32 },
[FMT_D10] = { 32, 0 },
};
enum value_id {
DM0, /* data reg in opcode in bits 0-1 */
DM1, /* data reg in opcode in bits 2-3 */
DM2, /* data reg in opcode in bits 4-5 */
AM0, /* addr reg in opcode in bits 0-1 */
AM1, /* addr reg in opcode in bits 2-3 */
AM2, /* addr reg in opcode in bits 4-5 */
RM0, /* reg in opcode in bits 0-3 */
RM1, /* reg in opcode in bits 2-5 */
RM2, /* reg in opcode in bits 4-7 */
RM4, /* reg in opcode in bits 8-11 */
RM6, /* reg in opcode in bits 12-15 */
RD0, /* reg in displacement in bits 0-3 */
RD2, /* reg in displacement in bits 4-7 */
SP, /* stack pointer */
SD8, /* 8-bit signed displacement */
SD16, /* 16-bit signed displacement */
SD24, /* 24-bit signed displacement */
SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */
SIMM8, /* 8-bit signed immediate */
IMM8, /* 8-bit unsigned immediate */
IMM16, /* 16-bit unsigned immediate */
IMM24, /* 24-bit unsigned immediate */
IMM32, /* 32-bit unsigned immediate */
IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */
IMM32_MEM, /* 32-bit unsigned displacement */
IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */
DN0 = DM0,
DN1 = DM1,
DN2 = DM2,
AN0 = AM0,
AN1 = AM1,
AN2 = AM2,
RN0 = RM0,
RN1 = RM1,
RN2 = RM2,
RN4 = RM4,
RN6 = RM6,
DI = DM1,
RI = RM2,
};
struct mn10300_opcode {
const char name[8];
u_int32_t opcode;
u_int32_t opmask;
unsigned exclusion;
enum format_id format;
unsigned cpu_mask;
#define AM33 330
unsigned params[2];
#define MEM(ADDR) (0x80000000 | (ADDR))
#define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2))
#define MEMINC(ADDR) (0x81000000 | (ADDR))
#define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC))
};
/* LIBOPCODES EXCERPT
Assemble Matsushita MN10300 instructions.
Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public Licence as published by
the Free Software Foundation; either version 2 of the Licence, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public Licence for more details.
You should have received a copy of the GNU General Public Licence
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
static const struct mn10300_opcode mn10300_opcodes[] = {
{ "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}},
{ "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}},
{ "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}},
{ "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}},
{ "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}},
{ "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}},
{ "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}},
{ "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}},
{ "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
{ "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
{ "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}},
{ "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}},
{ "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
{ "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
{ "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}},
{ "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}},
{ "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
{ "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
{ "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
{ "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
{ "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
{ "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
{ "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}},
{ "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}},
{ "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}},
{ "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}},
{ "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}},
{ "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}},
{ "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
{ "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
{ "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
{ "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
{ "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}},
{ "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
{ "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}},
{ "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
{ "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
{ "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
{ "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}},
{ "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}},
{ "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}},
{ "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}},
{ "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}},
{ "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}},
{ "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}},
{ "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}},
{ "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}},
{ "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}},
{ "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
{ "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
{ "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
{ "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
{ "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}},
{ "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}},
{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}},
{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}},
{ "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
{ "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
{ "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}},
{ "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}},
{ "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}},
{ "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}},
{ "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
{ "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
{ "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
{ "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
{ "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}},
{ "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}},
{ "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
{ "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
{ "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
{ "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
{ "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
{ "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
{ "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}},
{ "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}},
{ "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
{ "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
{ "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}},
{ "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
{ "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}},
{ "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
{ "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
{ "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
{ "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
{ "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
{ "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}},
{ "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}},
{ "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}},
{ "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}},
{ "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
{ "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
{ "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}},
{ "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}},
{ "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
{ "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
{ "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
{ "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}},
{ "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
{ "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}},
{ "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}},
{ "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}},
{ "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
{ "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
{ "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}},
{ "", 0, 0, 0, 0, 0, {0}},
};
/*
* fix up misalignment problems where possible
*/
asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
{
const struct exception_table_entry *fixup;
const struct mn10300_opcode *pop;
unsigned long *registers = (unsigned long *) regs;
unsigned long data, *store, *postinc, disp, inc, sp;
mm_segment_t seg;
siginfo_t info;
uint32_t opcode, noc, xo, xm;
uint8_t *pc, byte, datasz;
void *address;
unsigned tmp, npop, dispsz, loop;
/* we don't fix up userspace misalignment faults */
if (user_mode(regs))
goto bus_error;
sp = (unsigned long) regs + sizeof(*regs);
kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp);
if (regs->epsw & EPSW_IE)
asm volatile("or %0,epsw" : : "i"(EPSW_IE));
seg = get_fs();
set_fs(KERNEL_DS);
fixup = search_exception_tables(regs->pc);
/* first thing to do is to match the opcode */
pc = (u_int8_t *) regs->pc;
if (__get_user(byte, pc) != 0)
goto fetch_error;
opcode = byte;
noc = 8;
for (pop = mn10300_opcodes; pop->name[0]; pop++) {
npop = ilog2(pop->opcode | pop->opmask);
if (npop <= 0 || npop > 31)
continue;
npop = (npop + 8) & ~7;
got_more_bits:
if (npop == noc) {
if ((opcode & pop->opmask) == pop->opcode)
goto found_opcode;
} else if (npop > noc) {
xo = pop->opcode >> (npop - noc);
xm = pop->opmask >> (npop - noc);
if ((opcode & xm) != xo)
continue;
/* we've got a partial match (an exact match on the
* first N bytes), so we need to get some more data */
pc++;
if (__get_user(byte, pc) != 0)
goto fetch_error;
opcode = opcode << 8 | byte;
noc += 8;
goto got_more_bits;
} else {
/* there's already been a partial match as long as the
* complete match we're now considering, so this one
* should't match */
continue;
}
}
/* didn't manage to find a fixup */
printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n",
regs->pc, opcode);
failed:
set_fs(seg);
if (die_if_no_fixup("misalignment error", regs, code))
return;
bus_error:
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGBUS, &info, current);
return;
/* error reading opcodes */
fetch_error:
printk(KERN_CRIT
"MISALIGN: %p: fault whilst reading instruction data\n",
pc);
goto failed;
bad_addr_mode:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported addressing mode %x\n",
regs->pc, opcode);
goto failed;
bad_reg_mode:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported register mode %x\n",
regs->pc, opcode);
goto failed;
unsupported_instruction:
printk(KERN_CRIT
"MISALIGN: %lx: unsupported instruction %x (%s)\n",
regs->pc, opcode, pop->name);
goto failed;
transfer_failed:
set_fs(seg);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
if (die_if_no_fixup("misalignment fixup", regs, code))
return;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = 0;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGSEGV, &info, current);
return;
/* we matched the opcode */
found_opcode:
kdebug("%lx: %x==%x { %x, %x }",
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
opcode >>= tmp;
pc -= tmp >> 3;
}
/* grab the extra displacement (note it's LSB first) */
disp = 0;
dispsz = format_tbl[pop->format].dispsz;
for (loop = 0; loop < dispsz; loop += 8) {
pc++;
if (__get_user(byte, pc) != 0)
goto fetch_error;
disp |= byte << loop;
kdebug("{%p} disp[%02x]=%02x", pc, loop, byte);
}
kdebug("disp=%lx", disp);
set_fs(KERNEL_XDS);
if (fixup)
set_fs(seg);
tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000;
if (!tmp) {
printk(KERN_CRIT
"MISALIGN: %lx: insn not move to/from memory %x\n",
regs->pc, opcode);
goto failed;
}
/* determine the data transfer size of the move */
if (pop->name[3] == 0 || /* "mov" */
pop->name[4] == 'l') /* mov_lcc */
inc = datasz = 4;
else if (pop->name[3] == 'h') /* movhu */
inc = datasz = 2;
else
goto unsupported_instruction;
if (pop->params[0] & 0x80000000) {
/* move memory to register */
if (!misalignment_addr(registers, sp,
pop->params[0], opcode, disp,
&address, &postinc, &inc))
goto bad_addr_mode;
if (!misalignment_reg(registers, pop->params[1], opcode, disp,
&store))
goto bad_reg_mode;
kdebug("mov%u (%p),DARn", datasz, address);
if (copy_from_user(&data, (void *) address, datasz) != 0)
goto transfer_failed;
if (pop->params[0] & 0x1000000) {
kdebug("inc=%lx", inc);
*postinc += inc;
}
*store = data;
kdebug("loaded %lx", data);
} else {
/* move register to memory */
if (!misalignment_reg(registers, pop->params[0], opcode, disp,
&store))
goto bad_reg_mode;
if (!misalignment_addr(registers, sp,
pop->params[1], opcode, disp,
&address, &postinc, &inc))
goto bad_addr_mode;
data = *store;
kdebug("mov%u %lx,(%p)", datasz, data, address);
if (copy_to_user((void *) address, &data, datasz) != 0)
goto transfer_failed;
if (pop->params[1] & 0x1000000)
*postinc += inc;
}
tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz;
regs->pc += tmp >> 3;
/* handle MOV_Lcc, which are currently the only FMT_D10 insns that
* access memory */
if (pop->format == FMT_D10)
misalignment_MOV_Lcc(regs, opcode);
set_fs(seg);
}
/*
* determine the address that was being accessed
*/
static int misalignment_addr(unsigned long *registers, unsigned long sp,
unsigned params, unsigned opcode,
unsigned long disp,
void **_address, unsigned long **_postinc,
unsigned long *_inc)
{
unsigned long *postinc = NULL, address = 0, tmp;
if (!(params & 0x1000000)) {
kdebug("noinc");
*_inc = 0;
_inc = NULL;
}
params &= 0x00ffffff;
do {
switch (params & 0xff) {
case DM0:
postinc = &registers[Dreg_index[opcode & 0x03]];
address += *postinc;
break;
case DM1:
postinc = &registers[Dreg_index[opcode >> 2 & 0x03]];
address += *postinc;
break;
case DM2:
postinc = &registers[Dreg_index[opcode >> 4 & 0x03]];
address += *postinc;
break;
case AM0:
postinc = &registers[Areg_index[opcode & 0x03]];
address += *postinc;
break;
case AM1:
postinc = &registers[Areg_index[opcode >> 2 & 0x03]];
address += *postinc;
break;
case AM2:
postinc = &registers[Areg_index[opcode >> 4 & 0x03]];
address += *postinc;
break;
case RM0:
postinc = &registers[Rreg_index[opcode & 0x0f]];
address += *postinc;
break;
case RM1:
postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]];
address += *postinc;
break;
case RM2:
postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]];
address += *postinc;
break;
case RM4:
postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]];
address += *postinc;
break;
case RM6:
postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]];
address += *postinc;
break;
case RD0:
postinc = &registers[Rreg_index[disp & 0x0f]];
address += *postinc;
break;
case RD2:
postinc = &registers[Rreg_index[disp >> 4 & 0x0f]];
address += *postinc;
break;
case SP:
address += sp;
break;
/* displacements are either to be added to the address
* before use, or, in the case of post-inc addressing,
* to be added into the base register after use */
case SD8:
case SIMM8:
disp = (long) (int8_t) (disp & 0xff);
goto displace_or_inc;
case SD16:
disp = (long) (int16_t) (disp & 0xffff);
goto displace_or_inc;
case SD24:
tmp = disp << 8;
asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case SIMM4_2:
tmp = opcode >> 4 & 0x0f;
tmp <<= 28;
asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case IMM8:
disp &= 0x000000ff;
goto displace_or_inc;
case IMM16:
disp &= 0x0000ffff;
goto displace_or_inc;
case IMM24:
disp &= 0x00ffffff;
goto displace_or_inc;
case IMM32:
case IMM32_MEM:
case IMM32_HIGH8:
case IMM32_HIGH8_MEM:
displace_or_inc:
kdebug("%s %lx", _inc ? "incr" : "disp", disp);
if (!_inc)
address += disp;
else
*_inc = disp;
break;
default:
BUG();
return 0;
}
} while ((params >>= 8));
*_address = (void *) address;
*_postinc = postinc;
return 1;
}
/*
* determine the register that is acting as source/dest
*/
static int misalignment_reg(unsigned long *registers, unsigned params,
unsigned opcode, unsigned long disp,
unsigned long **_register)
{
params &= 0x7fffffff;
if (params & 0xffffff00)
return 0;
switch (params & 0xff) {
case DM0:
*_register = &registers[Dreg_index[opcode & 0x03]];
break;
case DM1:
*_register = &registers[Dreg_index[opcode >> 2 & 0x03]];
break;
case DM2:
*_register = &registers[Dreg_index[opcode >> 4 & 0x03]];
break;
case AM0:
*_register = &registers[Areg_index[opcode & 0x03]];
break;
case AM1:
*_register = &registers[Areg_index[opcode >> 2 & 0x03]];
break;
case AM2:
*_register = &registers[Areg_index[opcode >> 4 & 0x03]];
break;
case RM0:
*_register = &registers[Rreg_index[opcode & 0x0f]];
break;
case RM1:
*_register = &registers[Rreg_index[opcode >> 2 & 0x0f]];
break;
case RM2:
*_register = &registers[Rreg_index[opcode >> 4 & 0x0f]];
break;
case RM4:
*_register = &registers[Rreg_index[opcode >> 8 & 0x0f]];
break;
case RM6:
*_register = &registers[Rreg_index[opcode >> 12 & 0x0f]];
break;
case RD0:
*_register = &registers[Rreg_index[disp & 0x0f]];
break;
case RD2:
*_register = &registers[Rreg_index[disp >> 4 & 0x0f]];
break;
case SP:
*_register = &registers[REG_SP >> 2];
break;
default:
BUG();
return 0;
}
return 1;
}
/*
* handle the conditional loop part of the move-and-loop instructions
*/
static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode)
{
unsigned long epsw = regs->epsw;
unsigned long NxorV;
kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf);
/* calculate N^V and shift onto the same bit position as Z */
NxorV = ((epsw >> 3) ^ epsw >> 1) & 1;
switch (opcode & 0xf) {
case 0x0: /* MOV_LLT: N^V */
if (NxorV)
goto take_the_loop;
return;
case 0x1: /* MOV_LGT: ~(Z or (N^V))*/
if (!((epsw & EPSW_FLAG_Z) | NxorV))
goto take_the_loop;
return;
case 0x2: /* MOV_LGE: ~(N^V) */
if (!NxorV)
goto take_the_loop;
return;
case 0x3: /* MOV_LLE: Z or (N^V) */
if ((epsw & EPSW_FLAG_Z) | NxorV)
goto take_the_loop;
return;
case 0x4: /* MOV_LCS: C */
if (epsw & EPSW_FLAG_C)
goto take_the_loop;
return;
case 0x5: /* MOV_LHI: ~(C or Z) */
if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)))
goto take_the_loop;
return;
case 0x6: /* MOV_LCC: ~C */
if (!(epsw & EPSW_FLAG_C))
goto take_the_loop;
return;
case 0x7: /* MOV_LLS: C or Z */
if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))
goto take_the_loop;
return;
case 0x8: /* MOV_LEQ: Z */
if (epsw & EPSW_FLAG_Z)
goto take_the_loop;
return;
case 0x9: /* MOV_LNE: ~Z */
if (!(epsw & EPSW_FLAG_Z))
goto take_the_loop;
return;
case 0xa: /* MOV_LRA: always */
goto take_the_loop;
default:
BUG();
}
take_the_loop:
/* wind the PC back to just after the SETLB insn */
kdebug("loop LAR=%lx", regs->lar);
regs->pc = regs->lar - 4;
}
/*
* misalignment handler tests
*/
#ifdef CONFIG_TEST_MISALIGNMENT_HANDLER
static u8 __initdata testbuf[512] __attribute__((aligned(16))) = {
[257] = 0x11,
[258] = 0x22,
[259] = 0x33,
[260] = 0x44,
};
#define ASSERTCMP(X, OP, Y) \
do { \
if (unlikely(!((X) OP (Y)))) { \
printk(KERN_ERR "\n"); \
printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \
__LINE__); \
printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
(unsigned long)(X), (unsigned long)(Y)); \
BUG(); \
} \
} while(0)
static int __init test_misalignment(void)
{
register void *r asm("e0");
register u32 y asm("e1");
void *p = testbuf, *q;
u32 tmp, tmp2, x;
printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p);
p++;
printk(KERN_NOTICE "___ MOV (Am),Dn ___\n");
q = p + 256;
asm volatile("mov (%0),%1" : "+a"(q), "=d"(x));
ASSERTCMP(q, ==, p + 256);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n");
q = p;
asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x));
ASSERTCMP(q, ==, p);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n");
tmp = 256;
q = p;
asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp));
ASSERTCMP(q, ==, p);
ASSERTCMP(x, ==, 0x44332211);
ASSERTCMP(tmp, ==, 256);
printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n");
r = p;
asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n");
r = p + 256;
asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p + 256 + 4);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n");
r = p + 256;
asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y));
ASSERTCMP(r, ==, p + 256 + 8);
ASSERTCMP(y, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n");
asm volatile(
"add -16,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(7,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(8,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(9,sp) \n"
"mov +0x44,%0 \n"
"movbu %0,(10,sp) \n"
"mov (7,sp),%1 \n"
"add +16,sp \n"
: "+a"(q), "=d"(x));
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n");
asm volatile(
"add -264,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(259,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(260,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(261,sp) \n"
"mov +0x55,%0 \n"
"movbu %0,(262,sp) \n"
"mov (259,sp),%1 \n"
"add +264,sp \n"
: "+d"(tmp), "=d"(x));
ASSERTCMP(x, ==, 0x55332211);
printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n");
asm volatile(
"add -264,sp \n"
"mov +0x11,%0 \n"
"movbu %0,(260,sp) \n"
"mov +0x22,%0 \n"
"movbu %0,(261,sp) \n"
"mov +0x33,%0 \n"
"movbu %0,(262,sp) \n"
"mov +0x55,%0 \n"
"movbu %0,(263,sp) \n"
"mov (260,sp),%1 \n"
"add +264,sp \n"
: "+d"(tmp), "=d"(x));
ASSERTCMP(x, ==, 0x55332211);
printk(KERN_NOTICE "___ MOV_LNE ___\n");
tmp = 1;
tmp2 = 2;
q = p + 256;
asm volatile(
"setlb \n"
"mov %2,%3 \n"
"mov %1,%2 \n"
"cmp +0,%1 \n"
"mov_lne (%0+,4),%1"
: "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x)
:
: "cc");
ASSERTCMP(q, ==, p + 256 + 12);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "___ MOV in SETLB ___\n");
tmp = 1;
tmp2 = 2;
q = p + 256;
asm volatile(
"setlb \n"
"mov %1,%3 \n"
"mov (%0+),%1 \n"
"cmp +0,%1 \n"
"lne "
: "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x)
:
: "cc");
ASSERTCMP(q, ==, p + 256 + 8);
ASSERTCMP(x, ==, 0x44332211);
printk(KERN_NOTICE "<==test_misalignment()\n");
return 0;
}
arch_initcall(test_misalignment);
#endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */

View file

@ -0,0 +1,62 @@
/* MN10300 MMU context allocation and management
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_MN10300_TLB_USE_PIDR
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
[0 ... NR_CPUS - 1] =
MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
};
#endif /* CONFIG_MN10300_TLB_USE_PIDR */
/*
* preemptively set a TLB entry
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
unsigned long pteu, ptel, cnx, flags;
pte_t pte = *ptep;
addr &= PAGE_MASK;
ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
/* make sure the context doesn't migrate and defend against
* interference from vmalloc'd regions */
local_irq_save(flags);
cnx = ~MMU_NO_CONTEXT;
#ifdef CONFIG_MN10300_TLB_USE_PIDR
cnx = mm_context(vma->vm_mm);
#endif
if (cnx != MMU_NO_CONTEXT) {
pteu = addr;
#ifdef CONFIG_MN10300_TLB_USE_PIDR
pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
#endif
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
IPTEL = ptel;
}
DPTEU = pteu;
if (DPTEL & xPTEL_V)
DPTEL = ptel;
}
local_irq_restore(flags);
}

174
arch/mn10300/mm/pgtable.c Normal file
View file

@ -0,0 +1,174 @@
/* MN10300 Page table management
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/quicklist.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
* Associate a large virtual page frame with a given physical page frame
* and protection flags for that frame. pfn is for the base of the page,
* vaddr is what the page gets mapped to - both must be properly aligned.
* The pmd must already be instantiated. Assumes PAE mode.
*/
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
return; /* BUG(); */
}
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
return; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
return; /* BUG(); */
}
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(pfn, flags));
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
if (!pte)
return NULL;
clear_highpage(pte);
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
return pte;
}
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
* in both cached and uncached pgd's; not needed for PAE since the
* kernel pmd is shared. If PAE were not to share the pmd a similar
* tactic would be needed. This is essentially codepath-based locking
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
* If the locking proves to be non-performant, a ticketing scheme with
* checks at dup_mmap(), exec(), and other mmlist addition points
* could be used. The locking scheme was chosen on the basis of
* manfred's recommendations and having no core impact whatsoever.
* -- nyc
*/
DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list;
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
page->index = (unsigned long) pgd_list;
if (pgd_list)
set_page_private(pgd_list, (unsigned long) &page->index);
pgd_list = page;
set_page_private(page, (unsigned long) &pgd_list);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *next, **pprev, *page = virt_to_page(pgd);
next = (struct page *) page->index;
pprev = (struct page **) page_private(page);
*pprev = next;
if (next)
set_page_private(next, (unsigned long) pprev);
}
void pgd_ctor(void *pgd)
{
unsigned long flags;
if (PTRS_PER_PMD == 1)
spin_lock_irqsave(&pgd_lock, flags);
memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
if (PTRS_PER_PMD > 1)
return;
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd)
{
unsigned long flags; /* can be called from interrupt context */
spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, pgd_dtor, pgd);
}
void __init pgtable_cache_init(void)
{
}
void check_pgt_cache(void)
{
quicklist_trim(0, pgd_dtor, 25, 16);
}

View file

@ -0,0 +1,220 @@
###############################################################################
#
# TLB loading functions
#
# Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
# Modified by David Howells (dhowells@redhat.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
###############################################################################
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/intctl-regs.h>
#include <asm/frame.inc>
#include <asm/page.h>
#include <asm/pgtable.h>
###############################################################################
#
# Instruction TLB Miss handler entry point
#
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
# register bank
nop
nop
nop
#endif
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d2
mov d2,(MMUCTR)
#endif
and ~EPSW_NMID,epsw
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
and 0xffc00000,d2
lsr 20,d2
mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
btst _PAGE_VALID,a2
beq itlb_miss_fault # jump if doesn't point anywhere
and ~(PAGE_SIZE-1),a2
mov d3,d2
and 0x003ff000,d2
lsr 10,d2
add d2,a2
mov (a2),d2 # get pte from PTD[addr 21..12]
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
bset +(_PAGE_ACCESSED >> 8),(1,a2)
#else
#error "_PAGE_ACCESSED value is out of range"
#endif
and ~xPTEL2_UNUSED1,d2
itlb_miss_set:
mov d2,(IPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
rti
itlb_miss_fault:
mov _PAGE_VALID,d2 # force address error handler to be
# invoked
bra itlb_miss_set
.size itlb_miss, . - itlb_miss
###############################################################################
#
# Data TLB Miss handler entry point
#
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
# register bank
nop
nop
nop
#endif
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d2
mov d2,(MMUCTR)
#endif
and ~EPSW_NMID,epsw
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
and 0xffc00000,d2
lsr 20,d2
mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
btst _PAGE_VALID,a2
beq dtlb_miss_fault # jump if doesn't point anywhere
and ~(PAGE_SIZE-1),a2
mov d3,d2
and 0x003ff000,d2
lsr 10,d2
add d2,a2
mov (a2),d2 # get pte from PTD[addr 21..12]
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
bset +(_PAGE_ACCESSED >> 8),(1,a2)
#else
#error "_PAGE_ACCESSED value is out of range"
#endif
and ~xPTEL2_UNUSED1,d2
dtlb_miss_set:
mov d2,(DPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
rti
dtlb_miss_fault:
mov _PAGE_VALID,d2 # force address error handler to be
# invoked
bra dtlb_miss_set
.size dtlb_miss, . - dtlb_miss
###############################################################################
#
# Instruction TLB Address Error handler entry point
#
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
add -4,sp
SAVE_ALL
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d1
mov d1,(MMUCTR)
#endif
and ~EPSW_NMID,epsw
add -4,sp # need to pass three params
# calculate the fault code
movhu (MMUFCR_IFC),d1
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
mov (IPTEU),d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
mov d0,(IPTEL2)
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
jmp ret_from_exception
.size itlb_aerror, . - itlb_aerror
###############################################################################
#
# Data TLB Address Error handler entry point
#
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
add -4,sp
SAVE_ALL
#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
mov (MMUCTR),d1
mov d1,(MMUCTR)
#endif
add -4,sp # need to pass three params
and ~EPSW_NMID,epsw
# calculate the fault code
movhu (MMUFCR_DFC),d1
# determine the page address
mov (DPTEU),a2
mov a2,d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
mov d0,(DPTEL2)
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
jmp ret_from_exception
.size dtlb_aerror, . - dtlb_aerror

213
arch/mn10300/mm/tlb-smp.c Normal file
View file

@ -0,0 +1,213 @@
/* SMP TLB support routines.
*
* Copyright (C) 2006-2008 Panasonic Corporation
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/processor.h>
#include <asm/bug.h>
#include <asm/exceptions.h>
#include <asm/hardirq.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
#include <asm/intctl-regs.h>
/*
* For flush TLB
*/
#define FLUSH_ALL 0xffffffff
static cpumask_t flush_cpumask;
static struct mm_struct *flush_mm;
static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
&init_mm, 0
};
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va);
static void do_flush_tlb_all(void *info);
/**
* smp_flush_tlb - Callback to invalidate the TLB.
* @unused: Callback context (ignored).
*/
void smp_flush_tlb(void *unused)
{
unsigned long cpu_id;
cpu_id = get_cpu();
if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
/* This was a BUG() but until someone can quote me the line
* from the intel manual that guarantees an IPI to multiple
* CPUs is retried _only_ on the erroring CPUs its staying as a
* return
*
* BUG();
*/
goto out;
if (flush_va == FLUSH_ALL)
local_flush_tlb();
else
local_flush_tlb_page(flush_mm, flush_va);
smp_mb__before_atomic();
cpumask_clear_cpu(cpu_id, &flush_cpumask);
smp_mb__after_atomic();
out:
put_cpu();
}
/**
* flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
* @cpumask: The list of CPUs to target.
* @mm: The VM context to flush from (if va!=FLUSH_ALL).
* @va: Virtual address to flush or FLUSH_ALL to flush everything.
*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
unsigned long va)
{
cpumask_t tmp;
/* A couple of sanity checks (to be removed):
* - mask must not be empty
* - current CPU must not be in mask
* - we do not send IPIs to as-yet unbooted CPUs.
*/
BUG_ON(!mm);
BUG_ON(cpumask_empty(&cpumask));
BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
cpumask_and(&tmp, &cpumask, cpu_online_mask);
BUG_ON(!cpumask_equal(&cpumask, &tmp));
/* I'm not happy about this global shared spinlock in the MM hot path,
* but we'll see how contended it is.
*
* Temporarily this turns IRQs off, so that lockups are detected by the
* NMI watchdog.
*/
spin_lock(&tlbstate_lock);
flush_mm = mm;
flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif
/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
smp_call_function(smp_flush_tlb, NULL, 1);
while (!cpumask_empty(&flush_cpumask))
/* Lockup detection does not belong here */
smp_mb();
flush_mm = NULL;
flush_va = 0;
spin_unlock(&tlbstate_lock);
}
/**
* flush_tlb_mm - Invalidate TLB of specified VM context
* @mm: The VM context to invalidate.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
cpumask_t cpu_mask;
preempt_disable();
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
local_flush_tlb();
if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
/**
* flush_tlb_current_task - Invalidate TLB of current task
*/
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
cpumask_t cpu_mask;
preempt_disable();
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
local_flush_tlb();
if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
/**
* flush_tlb_page - Invalidate TLB of page
* @vma: The VM context to invalidate the page for.
* @va: The virtual address of the page to invalidate.
*/
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
struct mm_struct *mm = vma->vm_mm;
cpumask_t cpu_mask;
preempt_disable();
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
local_flush_tlb_page(mm, va);
if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
/**
* do_flush_tlb_all - Callback to completely invalidate a TLB
* @unused: Callback context (ignored).
*/
static void do_flush_tlb_all(void *unused)
{
local_flush_tlb_all();
}
/**
* flush_tlb_all - Completely invalidate TLBs on all CPUs
*/
void flush_tlb_all(void)
{
on_each_cpu(do_flush_tlb_all, 0, 1);
}