mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-28 14:58:52 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
11
arch/m68k/mm/Makefile
Normal file
11
arch/m68k/mm/Makefile
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# Makefile for the linux m68k-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o
|
||||
|
||||
obj-$(CONFIG_MMU) += cache.o fault.o
|
||||
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
|
||||
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
|
||||
obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
|
||||
|
||||
136
arch/m68k/mm/cache.c
Normal file
136
arch/m68k/mm/cache.c
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/cache.c
|
||||
*
|
||||
* Instruction cache handling
|
||||
*
|
||||
* Copyright (C) 1995 Hamish Macdonald
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
|
||||
static unsigned long virt_to_phys_slow(unsigned long vaddr)
|
||||
{
|
||||
if (CPU_IS_060) {
|
||||
unsigned long paddr;
|
||||
|
||||
/* The PLPAR instruction causes an access error if the translation
|
||||
* is not possible. To catch this we use the same exception mechanism
|
||||
* as for user space accesses in <asm/uaccess.h>. */
|
||||
asm volatile (".chip 68060\n"
|
||||
"1: plpar (%0)\n"
|
||||
".chip 68k\n"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
" .even\n"
|
||||
"3: sub.l %0,%0\n"
|
||||
" jra 2b\n"
|
||||
".previous\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .align 4\n"
|
||||
" .long 1b,3b\n"
|
||||
".previous"
|
||||
: "=a" (paddr)
|
||||
: "0" (vaddr));
|
||||
return paddr;
|
||||
} else if (CPU_IS_040) {
|
||||
unsigned long mmusr;
|
||||
|
||||
asm volatile (".chip 68040\n\t"
|
||||
"ptestr (%1)\n\t"
|
||||
"movec %%mmusr, %0\n\t"
|
||||
".chip 68k"
|
||||
: "=r" (mmusr)
|
||||
: "a" (vaddr));
|
||||
|
||||
if (mmusr & MMU_R_040)
|
||||
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
|
||||
} else {
|
||||
unsigned short mmusr;
|
||||
unsigned long *descaddr;
|
||||
|
||||
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
|
||||
"pmove %%psr,%1"
|
||||
: "=a&" (descaddr), "=m" (mmusr)
|
||||
: "a" (vaddr), "d" (get_fs().seg));
|
||||
if (mmusr & (MMU_I|MMU_B|MMU_L))
|
||||
return 0;
|
||||
descaddr = phys_to_virt((unsigned long)descaddr);
|
||||
switch (mmusr & MMU_NUM) {
|
||||
case 1:
|
||||
return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
|
||||
case 2:
|
||||
return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
|
||||
case 3:
|
||||
return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Push n pages at kernel virtual address and clear the icache */
|
||||
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
|
||||
void flush_icache_range(unsigned long address, unsigned long endaddr)
|
||||
{
|
||||
if (CPU_IS_COLDFIRE) {
|
||||
unsigned long start, end;
|
||||
start = address & ICACHE_SET_MASK;
|
||||
end = endaddr & ICACHE_SET_MASK;
|
||||
if (start > end) {
|
||||
flush_cf_icache(0, end);
|
||||
end = ICACHE_MAX_ADDR;
|
||||
}
|
||||
flush_cf_icache(start, end);
|
||||
} else if (CPU_IS_040_OR_060) {
|
||||
address &= PAGE_MASK;
|
||||
|
||||
do {
|
||||
asm volatile ("nop\n\t"
|
||||
".chip 68040\n\t"
|
||||
"cpushp %%bc,(%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (virt_to_phys_slow(address)));
|
||||
address += PAGE_SIZE;
|
||||
} while (address < endaddr);
|
||||
} else {
|
||||
unsigned long tmp;
|
||||
asm volatile ("movec %%cacr,%0\n\t"
|
||||
"orw %1,%0\n\t"
|
||||
"movec %0,%%cacr"
|
||||
: "=&d" (tmp)
|
||||
: "di" (FLUSH_I));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long addr, int len)
|
||||
{
|
||||
if (CPU_IS_COLDFIRE) {
|
||||
unsigned long start, end;
|
||||
start = addr & ICACHE_SET_MASK;
|
||||
end = (addr + len) & ICACHE_SET_MASK;
|
||||
if (start > end) {
|
||||
flush_cf_icache(0, end);
|
||||
end = ICACHE_MAX_ADDR;
|
||||
}
|
||||
flush_cf_icache(start, end);
|
||||
|
||||
} else if (CPU_IS_040_OR_060) {
|
||||
asm volatile ("nop\n\t"
|
||||
".chip 68040\n\t"
|
||||
"cpushp %%bc,(%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (page_to_phys(page)));
|
||||
} else {
|
||||
unsigned long tmp;
|
||||
asm volatile ("movec %%cacr,%0\n\t"
|
||||
"orw %1,%0\n\t"
|
||||
"movec %0,%%cacr"
|
||||
: "=&d" (tmp)
|
||||
: "di" (FLUSH_I));
|
||||
}
|
||||
}
|
||||
|
||||
220
arch/m68k/mm/fault.c
Normal file
220
arch/m68k/mm/fault.c
Normal file
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/fault.c
|
||||
*
|
||||
* Copyright (C) 1995 Hamish Macdonald
|
||||
*/
|
||||
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
extern void die_if_kernel(char *, struct pt_regs *, long);
|
||||
|
||||
int send_fault_sig(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t siginfo = { 0, 0, 0, };
|
||||
|
||||
siginfo.si_signo = current->thread.signo;
|
||||
siginfo.si_code = current->thread.code;
|
||||
siginfo.si_addr = (void *)current->thread.faddr;
|
||||
pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr,
|
||||
siginfo.si_signo, siginfo.si_code);
|
||||
|
||||
if (user_mode(regs)) {
|
||||
force_sig_info(siginfo.si_signo,
|
||||
&siginfo, current);
|
||||
} else {
|
||||
if (handle_kernel_fault(regs))
|
||||
return -1;
|
||||
|
||||
//if (siginfo.si_signo == SIGBUS)
|
||||
// force_sig_info(siginfo.si_signo,
|
||||
// &siginfo, current);
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
|
||||
pr_alert("Unable to handle kernel NULL pointer dereference");
|
||||
else
|
||||
pr_alert("Unable to handle kernel access");
|
||||
pr_cont(" at virtual address %p\n", siginfo.si_addr);
|
||||
die_if_kernel("Oops", regs, 0 /*error_code*/);
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine handles page faults. It determines the problem, and
|
||||
* then passes it off to one of the appropriate routines.
|
||||
*
|
||||
* error_code:
|
||||
* bit 0 == 0 means no page found, 1 means protection fault
|
||||
* bit 1 == 0 means read, 1 means write
|
||||
*
|
||||
* If this routine detects a bad access, it returns 1, otherwise it
|
||||
* returns 0.
|
||||
*/
|
||||
int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
unsigned long error_code)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct * vma;
|
||||
int fault;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
|
||||
regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
* context, we must not take the fault..
|
||||
*/
|
||||
if (in_atomic() || !mm)
|
||||
goto no_context;
|
||||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto map_err;
|
||||
if (vma->vm_flags & VM_IO)
|
||||
goto acc_err;
|
||||
if (vma->vm_start <= address)
|
||||
goto good_area;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto map_err;
|
||||
if (user_mode(regs)) {
|
||||
/* Accessing the stack below usp is always a bug. The
|
||||
"+ 256" is there due to some instructions doing
|
||||
pre-decrement on the stack and that doesn't show up
|
||||
until later. */
|
||||
if (address + 256 < rdusp())
|
||||
goto map_err;
|
||||
}
|
||||
if (expand_stack(vma, address))
|
||||
goto map_err;
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
good_area:
|
||||
pr_debug("do_page_fault: good_area\n");
|
||||
switch (error_code & 3) {
|
||||
default: /* 3: write, present */
|
||||
/* fall through */
|
||||
case 2: /* write, not present */
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
goto acc_err;
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
break;
|
||||
case 1: /* read, present */
|
||||
goto acc_err;
|
||||
case 0: /* read, not present */
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
|
||||
goto acc_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
pr_debug("handle_mm_fault returns %d\n", fault);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGSEGV)
|
||||
goto map_err;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
goto bus_err;
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting is only done on the
|
||||
* initial attempt. If we go through a retry, it is extremely
|
||||
* likely that the page will be found in page cache at that point.
|
||||
*/
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR)
|
||||
current->maj_flt++;
|
||||
else
|
||||
current->min_flt++;
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
||||
* of starvation. */
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/*
|
||||
* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
pagefault_out_of_memory();
|
||||
return 0;
|
||||
|
||||
no_context:
|
||||
current->thread.signo = SIGBUS;
|
||||
current->thread.faddr = address;
|
||||
return send_fault_sig(regs);
|
||||
|
||||
bus_err:
|
||||
current->thread.signo = SIGBUS;
|
||||
current->thread.code = BUS_ADRERR;
|
||||
current->thread.faddr = address;
|
||||
goto send_sig;
|
||||
|
||||
map_err:
|
||||
current->thread.signo = SIGSEGV;
|
||||
current->thread.code = SEGV_MAPERR;
|
||||
current->thread.faddr = address;
|
||||
goto send_sig;
|
||||
|
||||
acc_err:
|
||||
current->thread.signo = SIGSEGV;
|
||||
current->thread.code = SEGV_ACCERR;
|
||||
current->thread.faddr = address;
|
||||
|
||||
send_sig:
|
||||
up_read(&mm->mmap_sem);
|
||||
return send_fault_sig(regs);
|
||||
}
|
||||
93
arch/m68k/mm/hwtest.c
Normal file
93
arch/m68k/mm/hwtest.c
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
/* Tests for presence or absence of hardware registers.
|
||||
* This code was originally in atari/config.c, but I noticed
|
||||
* that it was also in drivers/nubus/nubus.c and I wanted to
|
||||
* use it in hp300/config.c, so it seemed sensible to pull it
|
||||
* out into its own file.
|
||||
*
|
||||
* The test is for use when trying to read a hardware register
|
||||
* that isn't present would cause a bus error. We set up a
|
||||
* temporary handler so that this doesn't kill the kernel.
|
||||
*
|
||||
* There is a test-by-reading and a test-by-writing; I present
|
||||
* them here complete with the comments from the original atari
|
||||
* config.c...
|
||||
* -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998
|
||||
*/
|
||||
|
||||
/* This function tests for the presence of an address, specially a
|
||||
* hardware register address. It is called very early in the kernel
|
||||
* initialization process, when the VBR register isn't set up yet. On
|
||||
* an Atari, it still points to address 0, which is unmapped. So a bus
|
||||
* error would cause another bus error while fetching the exception
|
||||
* vector, and the CPU would do nothing at all. So we needed to set up
|
||||
* a temporary VBR and a vector table for the duration of the test.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
int hwreg_present(volatile void *regp)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
long save_sp, save_vbr;
|
||||
long tmp_vectors[3];
|
||||
|
||||
local_irq_save(flags);
|
||||
__asm__ __volatile__ (
|
||||
"movec %/vbr,%2\n\t"
|
||||
"movel #Lberr1,%4@(8)\n\t"
|
||||
"movec %4,%/vbr\n\t"
|
||||
"movel %/sp,%1\n\t"
|
||||
"moveq #0,%0\n\t"
|
||||
"tstb %3@\n\t"
|
||||
"nop\n\t"
|
||||
"moveq #1,%0\n"
|
||||
"Lberr1:\n\t"
|
||||
"movel %1,%/sp\n\t"
|
||||
"movec %2,%/vbr"
|
||||
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
||||
: "a" (regp), "a" (tmp_vectors)
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hwreg_present);
|
||||
|
||||
/* Basically the same, but writes a value into a word register, protected
|
||||
* by a bus error handler. Returns 1 if successful, 0 otherwise.
|
||||
*/
|
||||
|
||||
int hwreg_write(volatile void *regp, unsigned short val)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
long save_sp, save_vbr;
|
||||
long tmp_vectors[3];
|
||||
|
||||
local_irq_save(flags);
|
||||
__asm__ __volatile__ (
|
||||
"movec %/vbr,%2\n\t"
|
||||
"movel #Lberr2,%4@(8)\n\t"
|
||||
"movec %4,%/vbr\n\t"
|
||||
"movel %/sp,%1\n\t"
|
||||
"moveq #0,%0\n\t"
|
||||
"movew %5,%3@\n\t"
|
||||
"nop\n\t"
|
||||
/*
|
||||
* If this nop isn't present, 'ret' may already be loaded
|
||||
* with 1 at the time the bus error happens!
|
||||
*/
|
||||
"moveq #1,%0\n"
|
||||
"Lberr2:\n\t"
|
||||
"movel %1,%/sp\n\t"
|
||||
"movec %2,%/vbr"
|
||||
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
||||
: "a" (regp), "a" (tmp_vectors), "g" (val)
|
||||
);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hwreg_write);
|
||||
|
||||
181
arch/m68k/mm/init.c
Normal file
181
arch/m68k/mm/init.c
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/init.c
|
||||
*
|
||||
* Copyright (C) 1995 Hamish Macdonald
|
||||
*
|
||||
* Contains common initialization routines, specific init code moved
|
||||
* to motorola.c and sun3mmu.c
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/io.h>
|
||||
#ifdef CONFIG_ATARI
|
||||
#include <asm/atari_stram.h>
|
||||
#endif
|
||||
#include <asm/sections.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a special page that is used for zero-initialized
|
||||
* data and COW.
|
||||
*/
|
||||
void *empty_zero_page;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
|
||||
extern void init_pointer_table(unsigned long ptable);
|
||||
extern pmd_t *zero_pgtable;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
pg_data_t pg_data_map[MAX_NUMNODES];
|
||||
EXPORT_SYMBOL(pg_data_map);
|
||||
|
||||
int m68k_virt_to_node_shift;
|
||||
|
||||
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
|
||||
pg_data_t *pg_data_table[65];
|
||||
EXPORT_SYMBOL(pg_data_table);
|
||||
#endif
|
||||
|
||||
void __init m68k_setup_node(int node)
|
||||
{
|
||||
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
|
||||
struct m68k_mem_info *info = m68k_memory + node;
|
||||
int i, end;
|
||||
|
||||
i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
|
||||
end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
|
||||
for (; i <= end; i++) {
|
||||
if (pg_data_table[i])
|
||||
printk("overlap at %u for chunk %u\n", i, node);
|
||||
pg_data_table[i] = pg_data_map + node;
|
||||
}
|
||||
#endif
|
||||
pg_data_map[node].bdata = bootmem_node_data + node;
|
||||
node_set_online(node);
|
||||
}
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* paging_init() continues the virtual memory environment setup which
|
||||
* was begun by the code in arch/head.S.
|
||||
* The parameters are pointers to where to stick the starting and ending
|
||||
* addresses of available kernel virtual memory.
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
/*
|
||||
* Make sure start_mem is page aligned, otherwise bootmem and
|
||||
* page_alloc get different views of the world.
|
||||
*/
|
||||
unsigned long end_mem = memory_end & PAGE_MASK;
|
||||
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
|
||||
|
||||
high_memory = (void *) end_mem;
|
||||
|
||||
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
|
||||
memset(empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Set up SFC/DFC registers (user data space).
|
||||
*/
|
||||
set_fs (USER_DS);
|
||||
|
||||
zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
free_area_init(zones_size);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
#ifndef CONFIG_MMU_SUN3
|
||||
free_initmem_default(-1);
|
||||
#endif /* CONFIG_MMU_SUN3 */
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
|
||||
#define VECTORS &vectors[0]
|
||||
#else
|
||||
#define VECTORS _ramvec
|
||||
#endif
|
||||
|
||||
void __init print_memmap(void)
|
||||
{
|
||||
#define UL(x) ((unsigned long) (x))
|
||||
#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
|
||||
#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
|
||||
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
|
||||
|
||||
pr_notice("Virtual kernel memory layout:\n"
|
||||
" vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
|
||||
" kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
|
||||
" vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
|
||||
" lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
|
||||
" .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
|
||||
" .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
|
||||
" .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
|
||||
" .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
|
||||
MLK(VECTORS, VECTORS + 256),
|
||||
MLM(KMAP_START, KMAP_END),
|
||||
MLM(VMALLOC_START, VMALLOC_END),
|
||||
MLM(PAGE_OFFSET, (unsigned long)high_memory),
|
||||
MLK_ROUNDUP(__init_begin, __init_end),
|
||||
MLK_ROUNDUP(_stext, _etext),
|
||||
MLK_ROUNDUP(_sdata, _edata),
|
||||
MLK_ROUNDUP(__bss_start, __bss_stop));
|
||||
}
|
||||
|
||||
static inline void init_pointer_tables(void)
|
||||
{
|
||||
#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
|
||||
int i;
|
||||
|
||||
/* insert pointer tables allocated so far into the tablelist */
|
||||
init_pointer_table((unsigned long)kernel_pg_dir);
|
||||
for (i = 0; i < PTRS_PER_PGD; i++) {
|
||||
if (pgd_present(kernel_pg_dir[i]))
|
||||
init_pointer_table(__pgd_page(kernel_pg_dir[i]));
|
||||
}
|
||||
|
||||
/* insert also pointer table that we used to unmap the zero page */
|
||||
if (zero_pgtable)
|
||||
init_pointer_table((unsigned long)zero_pgtable);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
/* this will put all memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
init_pointer_tables();
|
||||
mem_init_print_info(NULL);
|
||||
print_memmap();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
367
arch/m68k/mm/kmap.c
Normal file
367
arch/m68k/mm/kmap.c
Normal file
|
|
@ -0,0 +1,367 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/kmap.c
|
||||
*
|
||||
* Copyright (C) 1997 Roman Hodek
|
||||
*
|
||||
* 10/01/99 cleaned up the code and changing to the same interface
|
||||
* used by other architectures /Roman Zippel
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#define PTRTREESIZE (256*1024)
|
||||
|
||||
/*
|
||||
* For 040/060 we can use the virtual memory area like other architectures,
|
||||
* but for 020/030 we want to use early termination page descriptors and we
|
||||
* can't mix this with normal page descriptors, so we have to copy that code
|
||||
* (mm/vmalloc.c) and return appropriately aligned addresses.
|
||||
*/
|
||||
|
||||
#ifdef CPU_M68040_OR_M68060_ONLY
|
||||
|
||||
#define IO_SIZE PAGE_SIZE
|
||||
|
||||
static inline struct vm_struct *get_io_area(unsigned long size)
|
||||
{
|
||||
return get_vm_area(size, VM_IOREMAP);
|
||||
}
|
||||
|
||||
|
||||
static inline void free_io_area(void *addr)
|
||||
{
|
||||
vfree((void *)(PAGE_MASK & (unsigned long)addr));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define IO_SIZE (256*1024)
|
||||
|
||||
static struct vm_struct *iolist;
|
||||
|
||||
static struct vm_struct *get_io_area(unsigned long size)
|
||||
{
|
||||
unsigned long addr;
|
||||
struct vm_struct **p, *tmp, *area;
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = KMAP_START;
|
||||
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
|
||||
if (size + addr < (unsigned long)tmp->addr)
|
||||
break;
|
||||
if (addr > KMAP_END-size) {
|
||||
kfree(area);
|
||||
return NULL;
|
||||
}
|
||||
addr = tmp->size + (unsigned long)tmp->addr;
|
||||
}
|
||||
area->addr = (void *)addr;
|
||||
area->size = size + IO_SIZE;
|
||||
area->next = *p;
|
||||
*p = area;
|
||||
return area;
|
||||
}
|
||||
|
||||
static inline void free_io_area(void *addr)
|
||||
{
|
||||
struct vm_struct **p, *tmp;
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
addr = (void *)((unsigned long)addr & -IO_SIZE);
|
||||
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
|
||||
if (tmp->addr == addr) {
|
||||
*p = tmp->next;
|
||||
__iounmap(tmp->addr, tmp->size);
|
||||
kfree(tmp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map some physical address range into the kernel address space.
|
||||
*/
|
||||
/* Rewritten by Andreas Schwab to remove all races. */
|
||||
|
||||
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long virtaddr, retaddr;
|
||||
long offset;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
/*
|
||||
* Don't allow mappings that wrap..
|
||||
*/
|
||||
if (!size || physaddr > (unsigned long)(-size))
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_AMIGA
|
||||
if (MACH_IS_AMIGA) {
|
||||
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
|
||||
&& (cacheflag == IOMAP_NOCACHE_SER))
|
||||
return (void __iomem *)physaddr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
|
||||
#endif
|
||||
/*
|
||||
* Mappings have to be aligned
|
||||
*/
|
||||
offset = physaddr & (IO_SIZE - 1);
|
||||
physaddr &= -IO_SIZE;
|
||||
size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
area = get_io_area(size);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
virtaddr = (unsigned long)area->addr;
|
||||
retaddr = virtaddr + offset;
|
||||
#ifdef DEBUG
|
||||
printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* add cache and table flags to physical address
|
||||
*/
|
||||
if (CPU_IS_040_OR_060) {
|
||||
physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
|
||||
_PAGE_ACCESSED | _PAGE_DIRTY);
|
||||
switch (cacheflag) {
|
||||
case IOMAP_FULL_CACHING:
|
||||
physaddr |= _PAGE_CACHE040;
|
||||
break;
|
||||
case IOMAP_NOCACHE_SER:
|
||||
default:
|
||||
physaddr |= _PAGE_NOCACHE_S;
|
||||
break;
|
||||
case IOMAP_NOCACHE_NONSER:
|
||||
physaddr |= _PAGE_NOCACHE;
|
||||
break;
|
||||
case IOMAP_WRITETHROUGH:
|
||||
physaddr |= _PAGE_CACHE040W;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
|
||||
_PAGE_DIRTY | _PAGE_READWRITE);
|
||||
switch (cacheflag) {
|
||||
case IOMAP_NOCACHE_SER:
|
||||
case IOMAP_NOCACHE_NONSER:
|
||||
default:
|
||||
physaddr |= _PAGE_NOCACHE030;
|
||||
break;
|
||||
case IOMAP_FULL_CACHING:
|
||||
case IOMAP_WRITETHROUGH:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while ((long)size > 0) {
|
||||
#ifdef DEBUG
|
||||
if (!(virtaddr & (PTRTREESIZE-1)))
|
||||
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
|
||||
#endif
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
|
||||
if (!pmd_dir) {
|
||||
printk("ioremap: no mem for pmd_dir\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
|
||||
physaddr += PTRTREESIZE;
|
||||
virtaddr += PTRTREESIZE;
|
||||
size -= PTRTREESIZE;
|
||||
} else {
|
||||
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
|
||||
if (!pte_dir) {
|
||||
printk("ioremap: no mem for pte_dir\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pte_val(*pte_dir) = physaddr;
|
||||
virtaddr += PAGE_SIZE;
|
||||
physaddr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#ifdef DEBUG
|
||||
printk("\n");
|
||||
#endif
|
||||
flush_tlb_all();
|
||||
|
||||
return (void __iomem *)retaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
/*
|
||||
* Unmap an ioremap()ed region again
|
||||
*/
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
#ifdef CONFIG_AMIGA
|
||||
if ((!MACH_IS_AMIGA) ||
|
||||
(((unsigned long)addr < 0x40000000) ||
|
||||
((unsigned long)addr > 0x60000000)))
|
||||
free_io_area((__force void *)addr);
|
||||
#else
|
||||
free_io_area((__force void *)addr);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
/*
|
||||
* __iounmap unmaps nearly everything, so be careful
|
||||
* Currently it doesn't free pointer/page tables anymore but this
|
||||
* wasn't used anyway and might be added later.
|
||||
*/
|
||||
void __iounmap(void *addr, unsigned long size)
|
||||
{
|
||||
unsigned long virtaddr = (unsigned long)addr;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
while ((long)size > 0) {
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
if (pgd_bad(*pgd_dir)) {
|
||||
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
|
||||
pgd_clear(pgd_dir);
|
||||
return;
|
||||
}
|
||||
pmd_dir = pmd_offset(pgd_dir, virtaddr);
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
|
||||
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
|
||||
|
||||
if (pmd_type == _PAGE_PRESENT) {
|
||||
pmd_dir->pmd[pmd_off] = 0;
|
||||
virtaddr += PTRTREESIZE;
|
||||
size -= PTRTREESIZE;
|
||||
continue;
|
||||
} else if (pmd_type == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pmd_bad(*pmd_dir)) {
|
||||
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
|
||||
pmd_clear(pmd_dir);
|
||||
return;
|
||||
}
|
||||
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
|
||||
|
||||
pte_val(*pte_dir) = 0;
|
||||
virtaddr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set new cache mode for some kernel address space.
|
||||
* The caller must push data for that range itself, if such data may already
|
||||
* be in the cache.
|
||||
*/
|
||||
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
|
||||
{
|
||||
unsigned long virtaddr = (unsigned long)addr;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
if (CPU_IS_040_OR_060) {
|
||||
switch (cmode) {
|
||||
case IOMAP_FULL_CACHING:
|
||||
cmode = _PAGE_CACHE040;
|
||||
break;
|
||||
case IOMAP_NOCACHE_SER:
|
||||
default:
|
||||
cmode = _PAGE_NOCACHE_S;
|
||||
break;
|
||||
case IOMAP_NOCACHE_NONSER:
|
||||
cmode = _PAGE_NOCACHE;
|
||||
break;
|
||||
case IOMAP_WRITETHROUGH:
|
||||
cmode = _PAGE_CACHE040W;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (cmode) {
|
||||
case IOMAP_NOCACHE_SER:
|
||||
case IOMAP_NOCACHE_NONSER:
|
||||
default:
|
||||
cmode = _PAGE_NOCACHE030;
|
||||
break;
|
||||
case IOMAP_FULL_CACHING:
|
||||
case IOMAP_WRITETHROUGH:
|
||||
cmode = 0;
|
||||
}
|
||||
}
|
||||
|
||||
while ((long)size > 0) {
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
if (pgd_bad(*pgd_dir)) {
|
||||
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
|
||||
pgd_clear(pgd_dir);
|
||||
return;
|
||||
}
|
||||
pmd_dir = pmd_offset(pgd_dir, virtaddr);
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
|
||||
|
||||
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
|
||||
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
|
||||
_CACHEMASK040) | cmode;
|
||||
virtaddr += PTRTREESIZE;
|
||||
size -= PTRTREESIZE;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (pmd_bad(*pmd_dir)) {
|
||||
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
|
||||
pmd_clear(pmd_dir);
|
||||
return;
|
||||
}
|
||||
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
|
||||
|
||||
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
|
||||
virtaddr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
flush_tlb_all();
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_set_cachemode);
|
||||
195
arch/m68k/mm/mcfmmu.c
Normal file
195
arch/m68k/mm/mcfmmu.c
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Based upon linux/arch/m68k/mm/sun3mmu.c
|
||||
* Based upon linux/arch/ppc/mm/mmu_context.c
|
||||
*
|
||||
* Implementations of mm routines specific to the Coldfire MMU.
|
||||
*
|
||||
* Copyright (c) 2008 Freescale Semiconductor, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mcf_pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
|
||||
|
||||
mm_context_t next_mmu_context;
|
||||
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
|
||||
atomic_t nr_free_contexts;
|
||||
struct mm_struct *context_mm[LAST_CONTEXT+1];
|
||||
extern unsigned long num_pages;
|
||||
|
||||
/*
|
||||
* ColdFire paging_init derived from sun3.
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
pgd_t *pg_dir;
|
||||
pte_t *pg_table;
|
||||
unsigned long address, size;
|
||||
unsigned long next_pgtable, bootmem_end;
|
||||
unsigned long zones_size[MAX_NR_ZONES];
|
||||
enum zone_type zone;
|
||||
int i;
|
||||
|
||||
empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
|
||||
memset((void *) empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
pg_dir = swapper_pg_dir;
|
||||
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
|
||||
|
||||
size = num_pages * sizeof(pte_t);
|
||||
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
||||
next_pgtable = (unsigned long) alloc_bootmem_pages(size);
|
||||
|
||||
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
|
||||
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
|
||||
|
||||
address = PAGE_OFFSET;
|
||||
while (address < (unsigned long)high_memory) {
|
||||
pg_table = (pte_t *) next_pgtable;
|
||||
next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
|
||||
pgd_val(*pg_dir) = (unsigned long) pg_table;
|
||||
pg_dir++;
|
||||
|
||||
/* now change pg_table to kernel virtual addresses */
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
|
||||
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
||||
if (address >= (unsigned long) high_memory)
|
||||
pte_val(pte) = 0;
|
||||
|
||||
set_pte(pg_table, pte);
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
current->mm = NULL;
|
||||
|
||||
for (zone = 0; zone < MAX_NR_ZONES; zone++)
|
||||
zones_size[zone] = 0x0;
|
||||
zones_size[ZONE_DMA] = num_pages;
|
||||
free_area_init(zones_size);
|
||||
}
|
||||
|
||||
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
|
||||
{
|
||||
unsigned long flags, mmuar, mmutr;
|
||||
struct mm_struct *mm;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
int asid;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
mmuar = (dtlb) ? mmu_read(MMUAR) :
|
||||
regs->pc + (extension_word * sizeof(long));
|
||||
|
||||
mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
|
||||
if (!mm) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pgd = pgd_offset(mm, mmuar);
|
||||
if (pgd_none(*pgd)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pgd, mmuar);
|
||||
if (pmd_none(*pmd)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
|
||||
: pte_offset_map(pmd, mmuar);
|
||||
if (pte_none(*pte) || !pte_present(*pte)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
if (!pte_write(*pte)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
set_pte(pte, pte_mkdirty(*pte));
|
||||
}
|
||||
|
||||
set_pte(pte, pte_mkyoung(*pte));
|
||||
asid = mm->context & 0xff;
|
||||
if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
|
||||
set_pte(pte, pte_wrprotect(*pte));
|
||||
|
||||
mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
|
||||
if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
|
||||
mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
|
||||
mmu_write(MMUTR, mmutr);
|
||||
|
||||
mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
|
||||
((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
|
||||
|
||||
if (dtlb)
|
||||
mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
|
||||
else
|
||||
mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
|
||||
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the context management stuff.
|
||||
* The following was taken from arch/ppc/mmu_context.c
|
||||
*/
|
||||
void __init mmu_context_init(void)
|
||||
{
|
||||
/*
|
||||
* Some processors have too few contexts to reserve one for
|
||||
* init_mm, and require using context 0 for a normal task.
|
||||
* Other processors reserve the use of context zero for the kernel.
|
||||
* This code assumes FIRST_CONTEXT < 32.
|
||||
*/
|
||||
context_map[0] = (1 << FIRST_CONTEXT) - 1;
|
||||
next_mmu_context = FIRST_CONTEXT;
|
||||
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Steal a context from a task that has one at the moment.
|
||||
* This is only used on 8xx and 4xx and we presently assume that
|
||||
* they don't do SMP. If they do then thicfpgalloc.hs will have to check
|
||||
* whether the MM we steal is in use.
|
||||
* We also assume that this is only used on systems that don't
|
||||
* use an MMU hash table - this is true for 8xx and 4xx.
|
||||
* This isn't an LRU system, it just frees up each context in
|
||||
* turn (sort-of pseudo-random replacement :). This would be the
|
||||
* place to implement an LRU scheme if anyone was motivated to do it.
|
||||
* -- paulus
|
||||
*/
|
||||
void steal_context(void)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
/*
|
||||
* free up context `next_mmu_context'
|
||||
* if we shouldn't free context 0, don't...
|
||||
*/
|
||||
if (next_mmu_context < FIRST_CONTEXT)
|
||||
next_mmu_context = FIRST_CONTEXT;
|
||||
mm = context_mm[next_mmu_context];
|
||||
flush_tlb_mm(mm);
|
||||
destroy_context(mm);
|
||||
}
|
||||
|
||||
298
arch/m68k/mm/memory.c
Normal file
298
arch/m68k/mm/memory.c
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/memory.c
|
||||
*
|
||||
* Copyright (C) 1995 Hamish Macdonald
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
|
||||
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
|
||||
struct page instead of separately kmalloced struct. Stolen from
|
||||
arch/sparc/mm/srmmu.c ... */
|
||||
|
||||
typedef struct list_head ptable_desc;
|
||||
static LIST_HEAD(ptable_list);
|
||||
|
||||
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
|
||||
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
|
||||
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
|
||||
|
||||
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
|
||||
|
||||
void __init init_pointer_table(unsigned long ptable)
|
||||
{
|
||||
ptable_desc *dp;
|
||||
unsigned long page = ptable & PAGE_MASK;
|
||||
unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
|
||||
|
||||
dp = PD_PTABLE(page);
|
||||
if (!(PD_MARKBITS(dp) & mask)) {
|
||||
PD_MARKBITS(dp) = 0xff;
|
||||
list_add(dp, &ptable_list);
|
||||
}
|
||||
|
||||
PD_MARKBITS(dp) &= ~mask;
|
||||
#ifdef DEBUG
|
||||
printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
|
||||
#endif
|
||||
|
||||
/* unreserve the page so it's possible to free that page */
|
||||
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
|
||||
init_page_count(PD_PAGE(dp));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
pmd_t *get_pointer_table (void)
|
||||
{
|
||||
ptable_desc *dp = ptable_list.next;
|
||||
unsigned char mask = PD_MARKBITS (dp);
|
||||
unsigned char tmp;
|
||||
unsigned int off;
|
||||
|
||||
/*
|
||||
* For a pointer table for a user process address space, a
|
||||
* table is taken from a page allocated for the purpose. Each
|
||||
* page can hold 8 pointer tables. The page is remapped in
|
||||
* virtual address space to be noncacheable.
|
||||
*/
|
||||
if (mask == 0) {
|
||||
void *page;
|
||||
ptable_desc *new;
|
||||
|
||||
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
|
||||
return NULL;
|
||||
|
||||
flush_tlb_kernel_page(page);
|
||||
nocache_page(page);
|
||||
|
||||
new = PD_PTABLE(page);
|
||||
PD_MARKBITS(new) = 0xfe;
|
||||
list_add_tail(new, dp);
|
||||
|
||||
return (pmd_t *)page;
|
||||
}
|
||||
|
||||
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
|
||||
;
|
||||
PD_MARKBITS(dp) = mask & ~tmp;
|
||||
if (!PD_MARKBITS(dp)) {
|
||||
/* move to end of list */
|
||||
list_move_tail(dp, &ptable_list);
|
||||
}
|
||||
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
|
||||
}
|
||||
|
||||
int free_pointer_table (pmd_t *ptable)
|
||||
{
|
||||
ptable_desc *dp;
|
||||
unsigned long page = (unsigned long)ptable & PAGE_MASK;
|
||||
unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
|
||||
|
||||
dp = PD_PTABLE(page);
|
||||
if (PD_MARKBITS (dp) & mask)
|
||||
panic ("table already free!");
|
||||
|
||||
PD_MARKBITS (dp) |= mask;
|
||||
|
||||
if (PD_MARKBITS(dp) == 0xff) {
|
||||
/* all tables in page are free, free page */
|
||||
list_del(dp);
|
||||
cache_page((void *)page);
|
||||
free_page (page);
|
||||
return 1;
|
||||
} else if (ptable_list.next != dp) {
|
||||
/*
|
||||
* move this descriptor to the front of the list, since
|
||||
* it has one or more free tables.
|
||||
*/
|
||||
list_move(dp, &ptable_list);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* invalidate page in both caches */
|
||||
static inline void clear040(unsigned long paddr)
|
||||
{
|
||||
asm volatile (
|
||||
"nop\n\t"
|
||||
".chip 68040\n\t"
|
||||
"cinvp %%bc,(%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (paddr));
|
||||
}
|
||||
|
||||
/* invalidate page in i-cache */
|
||||
static inline void cleari040(unsigned long paddr)
|
||||
{
|
||||
asm volatile (
|
||||
"nop\n\t"
|
||||
".chip 68040\n\t"
|
||||
"cinvp %%ic,(%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (paddr));
|
||||
}
|
||||
|
||||
/* push page in both caches */
|
||||
/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
|
||||
static inline void push040(unsigned long paddr)
|
||||
{
|
||||
asm volatile (
|
||||
"nop\n\t"
|
||||
".chip 68040\n\t"
|
||||
"cpushp %%bc,(%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (paddr));
|
||||
}
|
||||
|
||||
/* push and invalidate page in both caches, must disable ints
|
||||
* to avoid invalidating valid data */
|
||||
static inline void pushcl040(unsigned long paddr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
push040(paddr);
|
||||
if (CPU_IS_060)
|
||||
clear040(paddr);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* 040: Hit every page containing an address in the range paddr..paddr+len-1.
|
||||
* (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
|
||||
* Hit every page until there is a page or less to go. Hit the next page,
|
||||
* and the one after that if the range hits it.
|
||||
*/
|
||||
/* ++roman: A little bit more care is required here: The CINVP instruction
|
||||
* invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
|
||||
* and the end of the region must be treated differently if they are not
|
||||
* exactly at the beginning or end of a page boundary. Else, maybe too much
|
||||
* data becomes invalidated and thus lost forever. CPUSHP does what we need:
|
||||
* it invalidates the page after pushing dirty data to memory. (Thanks to Jes
|
||||
* for discovering the problem!)
|
||||
*/
|
||||
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
|
||||
* the DPI bit in the CACR; would it cause problems with temporarily changing
|
||||
* this?). So we have to push first and then additionally to invalidate.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* cache_clear() semantics: Clear any cache entries for the area in question,
|
||||
* without writing back dirty entries first. This is useful if the data will
|
||||
* be overwritten anyway, e.g. by DMA to memory. The range is defined by a
|
||||
* _physical_ address.
|
||||
*/
|
||||
|
||||
void cache_clear (unsigned long paddr, int len)
|
||||
{
|
||||
if (CPU_IS_COLDFIRE) {
|
||||
clear_cf_bcache(0, DCACHE_MAX_ADDR);
|
||||
} else if (CPU_IS_040_OR_060) {
|
||||
int tmp;
|
||||
|
||||
/*
|
||||
* We need special treatment for the first page, in case it
|
||||
* is not page-aligned. Page align the addresses to work
|
||||
* around bug I17 in the 68060.
|
||||
*/
|
||||
if ((tmp = -paddr & (PAGE_SIZE - 1))) {
|
||||
pushcl040(paddr & PAGE_MASK);
|
||||
if ((len -= tmp) <= 0)
|
||||
return;
|
||||
paddr += tmp;
|
||||
}
|
||||
tmp = PAGE_SIZE;
|
||||
paddr &= PAGE_MASK;
|
||||
while ((len -= tmp) >= 0) {
|
||||
clear040(paddr);
|
||||
paddr += tmp;
|
||||
}
|
||||
if ((len += tmp))
|
||||
/* a page boundary gets crossed at the end */
|
||||
pushcl040(paddr);
|
||||
}
|
||||
else /* 68030 or 68020 */
|
||||
asm volatile ("movec %/cacr,%/d0\n\t"
|
||||
"oriw %0,%/d0\n\t"
|
||||
"movec %/d0,%/cacr"
|
||||
: : "i" (FLUSH_I_AND_D)
|
||||
: "d0");
|
||||
#ifdef CONFIG_M68K_L2_CACHE
|
||||
if(mach_l2_flush)
|
||||
mach_l2_flush(0);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cache_clear);
|
||||
|
||||
|
||||
/*
|
||||
* cache_push() semantics: Write back any dirty cache data in the given area,
|
||||
* and invalidate the range in the instruction cache. It needs not (but may)
|
||||
* invalidate those entries also in the data cache. The range is defined by a
|
||||
* _physical_ address.
|
||||
*/
|
||||
|
||||
void cache_push (unsigned long paddr, int len)
|
||||
{
|
||||
if (CPU_IS_COLDFIRE) {
|
||||
flush_cf_bcache(0, DCACHE_MAX_ADDR);
|
||||
} else if (CPU_IS_040_OR_060) {
|
||||
int tmp = PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* on 68040 or 68060, push cache lines for pages in the range;
|
||||
* on the '040 this also invalidates the pushed lines, but not on
|
||||
* the '060!
|
||||
*/
|
||||
len += paddr & (PAGE_SIZE - 1);
|
||||
|
||||
/*
|
||||
* Work around bug I17 in the 68060 affecting some instruction
|
||||
* lines not being invalidated properly.
|
||||
*/
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
do {
|
||||
push040(paddr);
|
||||
paddr += tmp;
|
||||
} while ((len -= tmp) > 0);
|
||||
}
|
||||
/*
|
||||
* 68030/68020 have no writeback cache. On the other hand,
|
||||
* cache_push is actually a superset of cache_clear (the lines
|
||||
* get written back and invalidated), so we should make sure
|
||||
* to perform the corresponding actions. After all, this is getting
|
||||
* called in places where we've just loaded code, or whatever, so
|
||||
* flushing the icache is appropriate; flushing the dcache shouldn't
|
||||
* be required.
|
||||
*/
|
||||
else /* 68030 or 68020 */
|
||||
asm volatile ("movec %/cacr,%/d0\n\t"
|
||||
"oriw %0,%/d0\n\t"
|
||||
"movec %/d0,%/cacr"
|
||||
: : "i" (FLUSH_I)
|
||||
: "d0");
|
||||
#ifdef CONFIG_M68K_L2_CACHE
|
||||
if(mach_l2_flush)
|
||||
mach_l2_flush(1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cache_push);
|
||||
|
||||
308
arch/m68k/mm/motorola.c
Normal file
308
arch/m68k/mm/motorola.c
Normal file
|
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/motorola.c
|
||||
*
|
||||
* Routines specific to the Motorola MMU, originally from:
|
||||
* linux/arch/m68k/init.c
|
||||
* which are Copyright (C) 1995 Hamish Macdonald
|
||||
*
|
||||
* Moved 8/20/1999 Sam Creasey
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
#ifdef CONFIG_ATARI
|
||||
#include <asm/atari_stram.h>
|
||||
#endif
|
||||
#include <asm/sections.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#ifndef mm_cachebits
|
||||
/*
|
||||
* Bits to add to page descriptors for "normal" caching mode.
|
||||
* For 68020/030 this is 0.
|
||||
* For 68040, this is _PAGE_CACHE040 (cachable, copyback)
|
||||
*/
|
||||
unsigned long mm_cachebits;
|
||||
EXPORT_SYMBOL(mm_cachebits);
|
||||
#endif
|
||||
|
||||
/* size of memory already mapped in head.S */
|
||||
extern __initdata unsigned long m68k_init_mapped_size;
|
||||
|
||||
extern unsigned long availmem;
|
||||
|
||||
static pte_t * __init kernel_page_table(void)
|
||||
{
|
||||
pte_t *ptablep;
|
||||
|
||||
ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
|
||||
clear_page(ptablep);
|
||||
__flush_page_to_ram(ptablep);
|
||||
flush_tlb_kernel_page(ptablep);
|
||||
nocache_page(ptablep);
|
||||
|
||||
return ptablep;
|
||||
}
|
||||
|
||||
static pmd_t *last_pgtable __initdata = NULL;
|
||||
pmd_t *zero_pgtable __initdata = NULL;
|
||||
|
||||
static pmd_t * __init kernel_ptr_table(void)
|
||||
{
|
||||
if (!last_pgtable) {
|
||||
unsigned long pmd, last;
|
||||
int i;
|
||||
|
||||
/* Find the last ptr table that was used in head.S and
|
||||
* reuse the remaining space in that page for further
|
||||
* ptr tables.
|
||||
*/
|
||||
last = (unsigned long)kernel_pg_dir;
|
||||
for (i = 0; i < PTRS_PER_PGD; i++) {
|
||||
if (!pgd_present(kernel_pg_dir[i]))
|
||||
continue;
|
||||
pmd = __pgd_page(kernel_pg_dir[i]);
|
||||
if (pmd > last)
|
||||
last = pmd;
|
||||
}
|
||||
|
||||
last_pgtable = (pmd_t *)last;
|
||||
#ifdef DEBUG
|
||||
printk("kernel_ptr_init: %p\n", last_pgtable);
|
||||
#endif
|
||||
}
|
||||
|
||||
last_pgtable += PTRS_PER_PMD;
|
||||
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
|
||||
last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
|
||||
clear_page(last_pgtable);
|
||||
__flush_page_to_ram(last_pgtable);
|
||||
flush_tlb_kernel_page(last_pgtable);
|
||||
nocache_page(last_pgtable);
|
||||
}
|
||||
|
||||
return last_pgtable;
|
||||
}
|
||||
|
||||
static void __init map_node(int node)
|
||||
{
|
||||
#define PTRTREESIZE (256*1024)
|
||||
#define ROOTTREESIZE (32*1024*1024)
|
||||
unsigned long physaddr, virtaddr, size;
|
||||
pgd_t *pgd_dir;
|
||||
pmd_t *pmd_dir;
|
||||
pte_t *pte_dir;
|
||||
|
||||
size = m68k_memory[node].size;
|
||||
physaddr = m68k_memory[node].addr;
|
||||
virtaddr = (unsigned long)phys_to_virt(physaddr);
|
||||
physaddr |= m68k_supervisor_cachemode |
|
||||
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
|
||||
if (CPU_IS_040_OR_060)
|
||||
physaddr |= _PAGE_GLOBAL040;
|
||||
|
||||
while (size > 0) {
|
||||
#ifdef DEBUG
|
||||
if (!(virtaddr & (PTRTREESIZE-1)))
|
||||
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
|
||||
virtaddr);
|
||||
#endif
|
||||
pgd_dir = pgd_offset_k(virtaddr);
|
||||
if (virtaddr && CPU_IS_020_OR_030) {
|
||||
if (!(virtaddr & (ROOTTREESIZE-1)) &&
|
||||
size >= ROOTTREESIZE) {
|
||||
#ifdef DEBUG
|
||||
printk ("[very early term]");
|
||||
#endif
|
||||
pgd_val(*pgd_dir) = physaddr;
|
||||
size -= ROOTTREESIZE;
|
||||
virtaddr += ROOTTREESIZE;
|
||||
physaddr += ROOTTREESIZE;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!pgd_present(*pgd_dir)) {
|
||||
pmd_dir = kernel_ptr_table();
|
||||
#ifdef DEBUG
|
||||
printk ("[new pointer %p]", pmd_dir);
|
||||
#endif
|
||||
pgd_set(pgd_dir, pmd_dir);
|
||||
} else
|
||||
pmd_dir = pmd_offset(pgd_dir, virtaddr);
|
||||
|
||||
if (CPU_IS_020_OR_030) {
|
||||
if (virtaddr) {
|
||||
#ifdef DEBUG
|
||||
printk ("[early term]");
|
||||
#endif
|
||||
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
|
||||
physaddr += PTRTREESIZE;
|
||||
} else {
|
||||
int i;
|
||||
#ifdef DEBUG
|
||||
printk ("[zero map]");
|
||||
#endif
|
||||
zero_pgtable = kernel_ptr_table();
|
||||
pte_dir = (pte_t *)zero_pgtable;
|
||||
pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
|
||||
_PAGE_TABLE | _PAGE_ACCESSED;
|
||||
pte_val(*pte_dir++) = 0;
|
||||
physaddr += PAGE_SIZE;
|
||||
for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
|
||||
pte_val(*pte_dir++) = physaddr;
|
||||
}
|
||||
size -= PTRTREESIZE;
|
||||
virtaddr += PTRTREESIZE;
|
||||
} else {
|
||||
if (!pmd_present(*pmd_dir)) {
|
||||
#ifdef DEBUG
|
||||
printk ("[new table]");
|
||||
#endif
|
||||
pte_dir = kernel_page_table();
|
||||
pmd_set(pmd_dir, pte_dir);
|
||||
}
|
||||
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
|
||||
|
||||
if (virtaddr) {
|
||||
if (!pte_present(*pte_dir))
|
||||
pte_val(*pte_dir) = physaddr;
|
||||
} else
|
||||
pte_val(*pte_dir) = 0;
|
||||
size -= PAGE_SIZE;
|
||||
virtaddr += PAGE_SIZE;
|
||||
physaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
}
|
||||
#ifdef DEBUG
|
||||
printk("\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() continues the virtual memory environment setup which
|
||||
* was begun by the code in arch/head.S.
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
|
||||
unsigned long min_addr, max_addr;
|
||||
unsigned long addr, size, end;
|
||||
int i;
|
||||
|
||||
#ifdef DEBUG
|
||||
printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
|
||||
#endif
|
||||
|
||||
/* Fix the cache mode in the page descriptors for the 680[46]0. */
|
||||
if (CPU_IS_040_OR_060) {
|
||||
int i;
|
||||
#ifndef mm_cachebits
|
||||
mm_cachebits = _PAGE_CACHE040;
|
||||
#endif
|
||||
for (i = 0; i < 16; i++)
|
||||
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
|
||||
}
|
||||
|
||||
min_addr = m68k_memory[0].addr;
|
||||
max_addr = min_addr + m68k_memory[0].size;
|
||||
for (i = 1; i < m68k_num_memory;) {
|
||||
if (m68k_memory[i].addr < min_addr) {
|
||||
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
|
||||
m68k_memory[i].addr, m68k_memory[i].size);
|
||||
printk("Fix your bootloader or use a memfile to make use of this area!\n");
|
||||
m68k_num_memory--;
|
||||
memmove(m68k_memory + i, m68k_memory + i + 1,
|
||||
(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
|
||||
continue;
|
||||
}
|
||||
addr = m68k_memory[i].addr + m68k_memory[i].size;
|
||||
if (addr > max_addr)
|
||||
max_addr = addr;
|
||||
i++;
|
||||
}
|
||||
m68k_memoffset = min_addr - PAGE_OFFSET;
|
||||
m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
|
||||
|
||||
module_fixup(NULL, __start_fixup, __stop_fixup);
|
||||
flush_icache();
|
||||
|
||||
high_memory = phys_to_virt(max_addr);
|
||||
|
||||
min_low_pfn = availmem >> PAGE_SHIFT;
|
||||
max_low_pfn = max_addr >> PAGE_SHIFT;
|
||||
|
||||
for (i = 0; i < m68k_num_memory; i++) {
|
||||
addr = m68k_memory[i].addr;
|
||||
end = addr + m68k_memory[i].size;
|
||||
m68k_setup_node(i);
|
||||
availmem = PAGE_ALIGN(availmem);
|
||||
availmem += init_bootmem_node(NODE_DATA(i),
|
||||
availmem >> PAGE_SHIFT,
|
||||
addr >> PAGE_SHIFT,
|
||||
end >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the physical memory available into the kernel virtual
|
||||
* address space. First initialize the bootmem allocator with
|
||||
* the memory we already mapped, so map_node() has something
|
||||
* to allocate.
|
||||
*/
|
||||
addr = m68k_memory[0].addr;
|
||||
size = m68k_memory[0].size;
|
||||
free_bootmem_node(NODE_DATA(0), availmem,
|
||||
min(m68k_init_mapped_size, size) - (availmem - addr));
|
||||
map_node(0);
|
||||
if (size > m68k_init_mapped_size)
|
||||
free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
|
||||
size - m68k_init_mapped_size);
|
||||
|
||||
for (i = 1; i < m68k_num_memory; i++)
|
||||
map_node(i);
|
||||
|
||||
flush_tlb_all();
|
||||
|
||||
/*
|
||||
* initialize the bad page table and bad page to point
|
||||
* to a couple of allocated pages
|
||||
*/
|
||||
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Set up SFC/DFC registers
|
||||
*/
|
||||
set_fs(KERNEL_DS);
|
||||
|
||||
#ifdef DEBUG
|
||||
printk ("before free_area_init\n");
|
||||
#endif
|
||||
for (i = 0; i < m68k_num_memory; i++) {
|
||||
zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
|
||||
free_area_init_node(i, zones_size,
|
||||
m68k_memory[i].addr >> PAGE_SHIFT, NULL);
|
||||
if (node_present_pages(i))
|
||||
node_set_state(i, N_NORMAL_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
161
arch/m68k/mm/sun3kmap.c
Normal file
161
arch/m68k/mm/sun3kmap.c
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/sun3kmap.c
|
||||
*
|
||||
* Copyright (C) 2002 Sam Creasey <sammy@sammy.net>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file COPYING in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/sun3mmu.h>
|
||||
|
||||
#undef SUN3_KMAP_DEBUG
|
||||
|
||||
#ifdef SUN3_KMAP_DEBUG
|
||||
extern void print_pte_vaddr(unsigned long vaddr);
|
||||
#endif
|
||||
|
||||
extern void mmu_emu_map_pmeg (int context, int vaddr);
|
||||
|
||||
static inline void do_page_mapin(unsigned long phys, unsigned long virt,
|
||||
unsigned long type)
|
||||
{
|
||||
unsigned long pte;
|
||||
pte_t ptep;
|
||||
|
||||
ptep = pfn_pte(phys >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
pte = pte_val(ptep);
|
||||
pte |= type;
|
||||
|
||||
sun3_put_pte(virt, pte);
|
||||
|
||||
#ifdef SUN3_KMAP_DEBUG
|
||||
print_pte_vaddr(virt);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
static inline void do_pmeg_mapin(unsigned long phys, unsigned long virt,
|
||||
unsigned long type, int pages)
|
||||
{
|
||||
|
||||
if(sun3_get_segmap(virt & ~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
|
||||
mmu_emu_map_pmeg(sun3_get_context(), virt);
|
||||
|
||||
while(pages) {
|
||||
do_page_mapin(phys, virt, type);
|
||||
phys += PAGE_SIZE;
|
||||
virt += PAGE_SIZE;
|
||||
pages--;
|
||||
}
|
||||
}
|
||||
|
||||
void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
|
||||
unsigned long type)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long offset, virt, ret;
|
||||
int pages;
|
||||
|
||||
if(!size)
|
||||
return NULL;
|
||||
|
||||
/* page align */
|
||||
offset = phys & (PAGE_SIZE-1);
|
||||
phys &= ~(PAGE_SIZE-1);
|
||||
|
||||
size += offset;
|
||||
size = PAGE_ALIGN(size);
|
||||
if((area = get_vm_area(size, VM_IOREMAP)) == NULL)
|
||||
return NULL;
|
||||
|
||||
#ifdef SUN3_KMAP_DEBUG
|
||||
printk("ioremap: got virt %p size %lx(%lx)\n",
|
||||
area->addr, size, area->size);
|
||||
#endif
|
||||
|
||||
pages = size / PAGE_SIZE;
|
||||
virt = (unsigned long)area->addr;
|
||||
ret = virt + offset;
|
||||
|
||||
while(pages) {
|
||||
int seg_pages;
|
||||
|
||||
seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE;
|
||||
if(seg_pages > pages)
|
||||
seg_pages = pages;
|
||||
|
||||
do_pmeg_mapin(phys, virt, type, seg_pages);
|
||||
|
||||
pages -= seg_pages;
|
||||
phys += seg_pages * PAGE_SIZE;
|
||||
virt += seg_pages * PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (void __iomem *)ret;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(sun3_ioremap);
|
||||
|
||||
|
||||
void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache)
|
||||
{
|
||||
|
||||
return sun3_ioremap(phys, size, SUN3_PAGE_TYPE_IO);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
vfree((void *)(PAGE_MASK & (unsigned long)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
/* sun3_map_test(addr, val) -- Reads a byte from addr, storing to val,
|
||||
* trapping the potential read fault. Returns 0 if the access faulted,
|
||||
* 1 on success.
|
||||
*
|
||||
* This function is primarily used to check addresses on the VME bus.
|
||||
*
|
||||
* Mucking with the page fault handler seems a little hackish to me, but
|
||||
* SunOS, NetBSD, and Mach all implemented this check in such a manner,
|
||||
* so I figure we're allowed.
|
||||
*/
|
||||
int sun3_map_test(unsigned long addr, char *val)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
__asm__ __volatile__
|
||||
(".globl _sun3_map_test_start\n"
|
||||
"_sun3_map_test_start:\n"
|
||||
"1: moveb (%2), (%0)\n"
|
||||
" moveq #1, %1\n"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
".even\n"
|
||||
"3: moveq #0, %1\n"
|
||||
" jmp 2b\n"
|
||||
".previous\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
".align 4\n"
|
||||
".long 1b,3b\n"
|
||||
".previous\n"
|
||||
".globl _sun3_map_test_end\n"
|
||||
"_sun3_map_test_end:\n"
|
||||
: "=a"(val), "=r"(ret)
|
||||
: "a"(addr));
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(sun3_map_test);
|
||||
98
arch/m68k/mm/sun3mmu.c
Normal file
98
arch/m68k/mm/sun3mmu.c
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* linux/arch/m68k/mm/sun3mmu.c
|
||||
*
|
||||
* Implementations of mm routines specific to the sun3 MMU.
|
||||
*
|
||||
* Moved here 8/20/1999 Sam Creasey
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern void mmu_emu_init (unsigned long bootmem_end);
|
||||
|
||||
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
|
||||
|
||||
extern unsigned long num_pages;
|
||||
|
||||
/* For the sun3 we try to follow the i386 paging_init() more closely */
|
||||
/* start_mem and end_mem have PAGE_OFFSET added already */
|
||||
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
|
||||
void __init paging_init(void)
|
||||
{
|
||||
pgd_t * pg_dir;
|
||||
pte_t * pg_table;
|
||||
int i;
|
||||
unsigned long address;
|
||||
unsigned long next_pgtable;
|
||||
unsigned long bootmem_end;
|
||||
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
|
||||
unsigned long size;
|
||||
|
||||
#ifdef TEST_VERIFY_AREA
|
||||
wp_works_ok = 0;
|
||||
#endif
|
||||
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
address = PAGE_OFFSET;
|
||||
pg_dir = swapper_pg_dir;
|
||||
memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
|
||||
memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir));
|
||||
|
||||
size = num_pages * sizeof(pte_t);
|
||||
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
||||
|
||||
next_pgtable = (unsigned long)alloc_bootmem_pages(size);
|
||||
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
|
||||
|
||||
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
|
||||
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
|
||||
|
||||
while (address < (unsigned long)high_memory) {
|
||||
pg_table = (pte_t *) __pa (next_pgtable);
|
||||
next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
|
||||
pgd_val(*pg_dir) = (unsigned long) pg_table;
|
||||
pg_dir++;
|
||||
|
||||
/* now change pg_table to kernel virtual addresses */
|
||||
pg_table = (pte_t *) __va ((unsigned long) pg_table);
|
||||
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
|
||||
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
||||
if (address >= (unsigned long)high_memory)
|
||||
pte_val (pte) = 0;
|
||||
set_pte (pg_table, pte);
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
mmu_emu_init(bootmem_end);
|
||||
|
||||
current->mm = NULL;
|
||||
|
||||
/* memory sizing is a hack stolen from motorola.c.. hope it works for us */
|
||||
zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
|
||||
/* I really wish I knew why the following change made things better... -- Sam */
|
||||
/* free_area_init(zones_size); */
|
||||
free_area_init_node(0, zones_size,
|
||||
(__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue