mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
10
arch/s390/mm/Makefile
Normal file
10
arch/s390/mm/Makefile
Normal file
|
@ -0,0 +1,10 @@
|
|||
#
|
||||
# Makefile for the linux s390-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
|
||||
obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o
|
||||
|
||||
obj-$(CONFIG_CMM) += cmm.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
|
495
arch/s390/mm/cmm.c
Normal file
495
arch/s390/mm/cmm.c
Normal file
|
@ -0,0 +1,495 @@
|
|||
/*
|
||||
* Collaborative memory management interface.
|
||||
*
|
||||
* Copyright IBM Corp 2003, 2010
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
static char *cmm_default_sender = "VMRMSVM";
|
||||
#endif
|
||||
static char *sender;
|
||||
module_param(sender, charp, 0400);
|
||||
MODULE_PARM_DESC(sender,
|
||||
"Guest name that may send SMSG messages (default VMRMSVM)");
|
||||
|
||||
#include "../../../drivers/s390/net/smsgiucv.h"
|
||||
|
||||
#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
|
||||
|
||||
struct cmm_page_array {
|
||||
struct cmm_page_array *next;
|
||||
unsigned long index;
|
||||
unsigned long pages[CMM_NR_PAGES];
|
||||
};
|
||||
|
||||
static long cmm_pages;
|
||||
static long cmm_timed_pages;
|
||||
static volatile long cmm_pages_target;
|
||||
static volatile long cmm_timed_pages_target;
|
||||
static long cmm_timeout_pages;
|
||||
static long cmm_timeout_seconds;
|
||||
static int cmm_suspended;
|
||||
|
||||
static struct cmm_page_array *cmm_page_list;
|
||||
static struct cmm_page_array *cmm_timed_page_list;
|
||||
static DEFINE_SPINLOCK(cmm_lock);
|
||||
|
||||
static struct task_struct *cmm_thread_ptr;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
|
||||
static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
|
||||
|
||||
static void cmm_timer_fn(unsigned long);
|
||||
static void cmm_set_timer(void);
|
||||
|
||||
static long cmm_alloc_pages(long nr, long *counter,
|
||||
struct cmm_page_array **list)
|
||||
{
|
||||
struct cmm_page_array *pa, *npa;
|
||||
unsigned long addr;
|
||||
|
||||
while (nr) {
|
||||
addr = __get_free_page(GFP_NOIO);
|
||||
if (!addr)
|
||||
break;
|
||||
spin_lock(&cmm_lock);
|
||||
pa = *list;
|
||||
if (!pa || pa->index >= CMM_NR_PAGES) {
|
||||
/* Need a new page for the page list. */
|
||||
spin_unlock(&cmm_lock);
|
||||
npa = (struct cmm_page_array *)
|
||||
__get_free_page(GFP_NOIO);
|
||||
if (!npa) {
|
||||
free_page(addr);
|
||||
break;
|
||||
}
|
||||
spin_lock(&cmm_lock);
|
||||
pa = *list;
|
||||
if (!pa || pa->index >= CMM_NR_PAGES) {
|
||||
npa->next = pa;
|
||||
npa->index = 0;
|
||||
pa = npa;
|
||||
*list = pa;
|
||||
} else
|
||||
free_page((unsigned long) npa);
|
||||
}
|
||||
diag10_range(addr >> PAGE_SHIFT, 1);
|
||||
pa->pages[pa->index++] = addr;
|
||||
(*counter)++;
|
||||
spin_unlock(&cmm_lock);
|
||||
nr--;
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
|
||||
{
|
||||
struct cmm_page_array *pa;
|
||||
unsigned long addr;
|
||||
|
||||
spin_lock(&cmm_lock);
|
||||
pa = *list;
|
||||
while (nr) {
|
||||
if (!pa || pa->index <= 0)
|
||||
break;
|
||||
addr = pa->pages[--pa->index];
|
||||
if (pa->index == 0) {
|
||||
pa = pa->next;
|
||||
free_page((unsigned long) *list);
|
||||
*list = pa;
|
||||
}
|
||||
free_page(addr);
|
||||
(*counter)--;
|
||||
nr--;
|
||||
}
|
||||
spin_unlock(&cmm_lock);
|
||||
return nr;
|
||||
}
|
||||
|
||||
static int cmm_oom_notify(struct notifier_block *self,
|
||||
unsigned long dummy, void *parm)
|
||||
{
|
||||
unsigned long *freed = parm;
|
||||
long nr = 256;
|
||||
|
||||
nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
|
||||
if (nr > 0)
|
||||
nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
|
||||
cmm_pages_target = cmm_pages;
|
||||
cmm_timed_pages_target = cmm_timed_pages;
|
||||
*freed += 256 - nr;
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cmm_oom_nb = {
|
||||
.notifier_call = cmm_oom_notify,
|
||||
};
|
||||
|
||||
static int cmm_thread(void *dummy)
|
||||
{
|
||||
int rc;
|
||||
|
||||
while (1) {
|
||||
rc = wait_event_interruptible(cmm_thread_wait,
|
||||
(!cmm_suspended && (cmm_pages != cmm_pages_target ||
|
||||
cmm_timed_pages != cmm_timed_pages_target)) ||
|
||||
kthread_should_stop());
|
||||
if (kthread_should_stop() || rc == -ERESTARTSYS) {
|
||||
cmm_pages_target = cmm_pages;
|
||||
cmm_timed_pages_target = cmm_timed_pages;
|
||||
break;
|
||||
}
|
||||
if (cmm_pages_target > cmm_pages) {
|
||||
if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
|
||||
cmm_pages_target = cmm_pages;
|
||||
} else if (cmm_pages_target < cmm_pages) {
|
||||
cmm_free_pages(1, &cmm_pages, &cmm_page_list);
|
||||
}
|
||||
if (cmm_timed_pages_target > cmm_timed_pages) {
|
||||
if (cmm_alloc_pages(1, &cmm_timed_pages,
|
||||
&cmm_timed_page_list))
|
||||
cmm_timed_pages_target = cmm_timed_pages;
|
||||
} else if (cmm_timed_pages_target < cmm_timed_pages) {
|
||||
cmm_free_pages(1, &cmm_timed_pages,
|
||||
&cmm_timed_page_list);
|
||||
}
|
||||
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
|
||||
cmm_set_timer();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cmm_kick_thread(void)
|
||||
{
|
||||
wake_up(&cmm_thread_wait);
|
||||
}
|
||||
|
||||
static void cmm_set_timer(void)
|
||||
{
|
||||
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
|
||||
if (timer_pending(&cmm_timer))
|
||||
del_timer(&cmm_timer);
|
||||
return;
|
||||
}
|
||||
if (timer_pending(&cmm_timer)) {
|
||||
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
|
||||
return;
|
||||
}
|
||||
cmm_timer.function = cmm_timer_fn;
|
||||
cmm_timer.data = 0;
|
||||
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
|
||||
add_timer(&cmm_timer);
|
||||
}
|
||||
|
||||
static void cmm_timer_fn(unsigned long ignored)
|
||||
{
|
||||
long nr;
|
||||
|
||||
nr = cmm_timed_pages_target - cmm_timeout_pages;
|
||||
if (nr < 0)
|
||||
cmm_timed_pages_target = 0;
|
||||
else
|
||||
cmm_timed_pages_target = nr;
|
||||
cmm_kick_thread();
|
||||
cmm_set_timer();
|
||||
}
|
||||
|
||||
static void cmm_set_pages(long nr)
|
||||
{
|
||||
cmm_pages_target = nr;
|
||||
cmm_kick_thread();
|
||||
}
|
||||
|
||||
static long cmm_get_pages(void)
|
||||
{
|
||||
return cmm_pages;
|
||||
}
|
||||
|
||||
static void cmm_add_timed_pages(long nr)
|
||||
{
|
||||
cmm_timed_pages_target += nr;
|
||||
cmm_kick_thread();
|
||||
}
|
||||
|
||||
static long cmm_get_timed_pages(void)
|
||||
{
|
||||
return cmm_timed_pages;
|
||||
}
|
||||
|
||||
static void cmm_set_timeout(long nr, long seconds)
|
||||
{
|
||||
cmm_timeout_pages = nr;
|
||||
cmm_timeout_seconds = seconds;
|
||||
cmm_set_timer();
|
||||
}
|
||||
|
||||
static int cmm_skip_blanks(char *cp, char **endp)
|
||||
{
|
||||
char *str;
|
||||
|
||||
for (str = cp; *str == ' ' || *str == '\t'; str++)
|
||||
;
|
||||
*endp = str;
|
||||
return str != cp;
|
||||
}
|
||||
|
||||
static struct ctl_table cmm_table[];
|
||||
|
||||
static int cmm_pages_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char buf[16], *p;
|
||||
unsigned int len;
|
||||
long nr;
|
||||
|
||||
if (!*lenp || (*ppos && !write)) {
|
||||
*lenp = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
len = *lenp;
|
||||
if (copy_from_user(buf, buffer,
|
||||
len > sizeof(buf) ? sizeof(buf) : len))
|
||||
return -EFAULT;
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
cmm_skip_blanks(buf, &p);
|
||||
nr = simple_strtoul(p, &p, 0);
|
||||
if (ctl == &cmm_table[0])
|
||||
cmm_set_pages(nr);
|
||||
else
|
||||
cmm_add_timed_pages(nr);
|
||||
} else {
|
||||
if (ctl == &cmm_table[0])
|
||||
nr = cmm_get_pages();
|
||||
else
|
||||
nr = cmm_get_timed_pages();
|
||||
len = sprintf(buf, "%ld\n", nr);
|
||||
if (len > *lenp)
|
||||
len = *lenp;
|
||||
if (copy_to_user(buffer, buf, len))
|
||||
return -EFAULT;
|
||||
}
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmm_timeout_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char buf[64], *p;
|
||||
long nr, seconds;
|
||||
unsigned int len;
|
||||
|
||||
if (!*lenp || (*ppos && !write)) {
|
||||
*lenp = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (write) {
|
||||
len = *lenp;
|
||||
if (copy_from_user(buf, buffer,
|
||||
len > sizeof(buf) ? sizeof(buf) : len))
|
||||
return -EFAULT;
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
cmm_skip_blanks(buf, &p);
|
||||
nr = simple_strtoul(p, &p, 0);
|
||||
cmm_skip_blanks(p, &p);
|
||||
seconds = simple_strtoul(p, &p, 0);
|
||||
cmm_set_timeout(nr, seconds);
|
||||
} else {
|
||||
len = sprintf(buf, "%ld %ld\n",
|
||||
cmm_timeout_pages, cmm_timeout_seconds);
|
||||
if (len > *lenp)
|
||||
len = *lenp;
|
||||
if (copy_to_user(buffer, buf, len))
|
||||
return -EFAULT;
|
||||
}
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ctl_table cmm_table[] = {
|
||||
{
|
||||
.procname = "cmm_pages",
|
||||
.mode = 0644,
|
||||
.proc_handler = cmm_pages_handler,
|
||||
},
|
||||
{
|
||||
.procname = "cmm_timed_pages",
|
||||
.mode = 0644,
|
||||
.proc_handler = cmm_pages_handler,
|
||||
},
|
||||
{
|
||||
.procname = "cmm_timeout",
|
||||
.mode = 0644,
|
||||
.proc_handler = cmm_timeout_handler,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table cmm_dir_table[] = {
|
||||
{
|
||||
.procname = "vm",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = cmm_table,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
#define SMSG_PREFIX "CMM"
|
||||
static void cmm_smsg_target(const char *from, char *msg)
|
||||
{
|
||||
long nr, seconds;
|
||||
|
||||
if (strlen(sender) > 0 && strcmp(from, sender) != 0)
|
||||
return;
|
||||
if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
|
||||
return;
|
||||
if (strncmp(msg, "SHRINK", 6) == 0) {
|
||||
if (!cmm_skip_blanks(msg + 6, &msg))
|
||||
return;
|
||||
nr = simple_strtoul(msg, &msg, 0);
|
||||
cmm_skip_blanks(msg, &msg);
|
||||
if (*msg == '\0')
|
||||
cmm_set_pages(nr);
|
||||
} else if (strncmp(msg, "RELEASE", 7) == 0) {
|
||||
if (!cmm_skip_blanks(msg + 7, &msg))
|
||||
return;
|
||||
nr = simple_strtoul(msg, &msg, 0);
|
||||
cmm_skip_blanks(msg, &msg);
|
||||
if (*msg == '\0')
|
||||
cmm_add_timed_pages(nr);
|
||||
} else if (strncmp(msg, "REUSE", 5) == 0) {
|
||||
if (!cmm_skip_blanks(msg + 5, &msg))
|
||||
return;
|
||||
nr = simple_strtoul(msg, &msg, 0);
|
||||
if (!cmm_skip_blanks(msg, &msg))
|
||||
return;
|
||||
seconds = simple_strtoul(msg, &msg, 0);
|
||||
cmm_skip_blanks(msg, &msg);
|
||||
if (*msg == '\0')
|
||||
cmm_set_timeout(nr, seconds);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct ctl_table_header *cmm_sysctl_header;
|
||||
|
||||
static int cmm_suspend(void)
|
||||
{
|
||||
cmm_suspended = 1;
|
||||
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
|
||||
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmm_resume(void)
|
||||
{
|
||||
cmm_suspended = 0;
|
||||
cmm_kick_thread();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmm_power_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
switch (event) {
|
||||
case PM_POST_HIBERNATION:
|
||||
return cmm_resume();
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
return cmm_suspend();
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block cmm_power_notifier = {
|
||||
.notifier_call = cmm_power_event,
|
||||
};
|
||||
|
||||
static int __init cmm_init(void)
|
||||
{
|
||||
int rc = -ENOMEM;
|
||||
|
||||
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
|
||||
if (!cmm_sysctl_header)
|
||||
goto out_sysctl;
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
/* convert sender to uppercase characters */
|
||||
if (sender) {
|
||||
int len = strlen(sender);
|
||||
while (len--)
|
||||
sender[len] = toupper(sender[len]);
|
||||
} else {
|
||||
sender = cmm_default_sender;
|
||||
}
|
||||
|
||||
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
|
||||
if (rc < 0)
|
||||
goto out_smsg;
|
||||
#endif
|
||||
rc = register_oom_notifier(&cmm_oom_nb);
|
||||
if (rc < 0)
|
||||
goto out_oom_notify;
|
||||
rc = register_pm_notifier(&cmm_power_notifier);
|
||||
if (rc)
|
||||
goto out_pm;
|
||||
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
|
||||
if (!IS_ERR(cmm_thread_ptr))
|
||||
return 0;
|
||||
|
||||
rc = PTR_ERR(cmm_thread_ptr);
|
||||
unregister_pm_notifier(&cmm_power_notifier);
|
||||
out_pm:
|
||||
unregister_oom_notifier(&cmm_oom_nb);
|
||||
out_oom_notify:
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
|
||||
out_smsg:
|
||||
#endif
|
||||
unregister_sysctl_table(cmm_sysctl_header);
|
||||
out_sysctl:
|
||||
del_timer_sync(&cmm_timer);
|
||||
return rc;
|
||||
}
|
||||
module_init(cmm_init);
|
||||
|
||||
static void __exit cmm_exit(void)
|
||||
{
|
||||
unregister_sysctl_table(cmm_sysctl_header);
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
|
||||
#endif
|
||||
unregister_pm_notifier(&cmm_power_notifier);
|
||||
unregister_oom_notifier(&cmm_oom_nb);
|
||||
kthread_stop(cmm_thread_ptr);
|
||||
del_timer_sync(&cmm_timer);
|
||||
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
|
||||
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
|
||||
}
|
||||
module_exit(cmm_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
245
arch/s390/mm/dump_pagetables.c
Normal file
245
arch/s390/mm/dump_pagetables.c
Normal file
|
@ -0,0 +1,245 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static unsigned long max_addr;
|
||||
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
enum address_markers_idx {
|
||||
IDENTITY_NR = 0,
|
||||
KERNEL_START_NR,
|
||||
KERNEL_END_NR,
|
||||
VMEMMAP_NR,
|
||||
VMALLOC_NR,
|
||||
#ifdef CONFIG_64BIT
|
||||
MODULES_NR,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
[IDENTITY_NR] = {0, "Identity Mapping"},
|
||||
[KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"},
|
||||
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
|
||||
[VMEMMAP_NR] = {0, "vmemmap Area"},
|
||||
[VMALLOC_NR] = {0, "vmalloc Area"},
|
||||
#ifdef CONFIG_64BIT
|
||||
[MODULES_NR] = {0, "Modules Area"},
|
||||
#endif
|
||||
{ -1, NULL }
|
||||
};
|
||||
|
||||
struct pg_state {
|
||||
int level;
|
||||
unsigned int current_prot;
|
||||
unsigned long start_address;
|
||||
unsigned long current_address;
|
||||
const struct addr_marker *marker;
|
||||
};
|
||||
|
||||
static void print_prot(struct seq_file *m, unsigned int pr, int level)
|
||||
{
|
||||
static const char * const level_name[] =
|
||||
{ "ASCE", "PGD", "PUD", "PMD", "PTE" };
|
||||
|
||||
seq_printf(m, "%s ", level_name[level]);
|
||||
if (pr & _PAGE_INVALID) {
|
||||
seq_printf(m, "I\n");
|
||||
return;
|
||||
}
|
||||
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
static void note_page(struct seq_file *m, struct pg_state *st,
|
||||
unsigned int new_prot, int level)
|
||||
{
|
||||
static const char units[] = "KMGTPE";
|
||||
int width = sizeof(unsigned long) * 2;
|
||||
const char *unit = units;
|
||||
unsigned int prot, cur;
|
||||
unsigned long delta;
|
||||
|
||||
/*
|
||||
* If we have a "break" in the series, we need to flush the state
|
||||
* that we have now. "break" is either changing perms, levels or
|
||||
* address space marker.
|
||||
*/
|
||||
prot = new_prot;
|
||||
cur = st->current_prot;
|
||||
|
||||
if (!st->level) {
|
||||
/* First entry */
|
||||
st->current_prot = new_prot;
|
||||
st->level = level;
|
||||
st->marker = address_markers;
|
||||
seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
||||
} else if (prot != cur || level != st->level ||
|
||||
st->current_address >= st->marker[1].start_address) {
|
||||
/* Print the actual finished series */
|
||||
seq_printf(m, "0x%0*lx-0x%0*lx",
|
||||
width, st->start_address,
|
||||
width, st->current_address);
|
||||
delta = (st->current_address - st->start_address) >> 10;
|
||||
while (!(delta & 0x3ff) && unit[1]) {
|
||||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
seq_printf(m, "%9lu%c ", delta, *unit);
|
||||
print_prot(m, st->current_prot, st->level);
|
||||
if (st->current_address >= st->marker[1].start_address) {
|
||||
st->marker++;
|
||||
seq_printf(m, "---[ %s ]---\n", st->marker->name);
|
||||
}
|
||||
st->start_address = st->current_address;
|
||||
st->current_prot = new_prot;
|
||||
st->level = level;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The actual page table walker functions. In order to keep the
|
||||
* implementation of print_prot() short, we only check and pass
|
||||
* _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
|
||||
* segment or page table entry is invalid or read-only.
|
||||
* After all it's just a hint that the current level being walked
|
||||
* contains an invalid or read-only entry.
|
||||
*/
|
||||
static void walk_pte_level(struct seq_file *m, struct pg_state *st,
|
||||
pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
unsigned int prot;
|
||||
pte_t *pte;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
|
||||
st->current_address = addr;
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
|
||||
note_page(m, st, prot, 4);
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
|
||||
#else
|
||||
#define _PMD_PROT_MASK 0
|
||||
#endif
|
||||
|
||||
static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
|
||||
pud_t *pud, unsigned long addr)
|
||||
{
|
||||
unsigned int prot;
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
|
||||
st->current_address = addr;
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd)) {
|
||||
if (pmd_large(*pmd)) {
|
||||
prot = pmd_val(*pmd) & _PMD_PROT_MASK;
|
||||
note_page(m, st, prot, 3);
|
||||
} else
|
||||
walk_pte_level(m, st, pmd, addr);
|
||||
} else
|
||||
note_page(m, st, _PAGE_INVALID, 3);
|
||||
addr += PMD_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define _PUD_PROT_MASK _REGION3_ENTRY_RO
|
||||
#else
|
||||
#define _PUD_PROT_MASK 0
|
||||
#endif
|
||||
|
||||
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
|
||||
pgd_t *pgd, unsigned long addr)
|
||||
{
|
||||
unsigned int prot;
|
||||
pud_t *pud;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
|
||||
st->current_address = addr;
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud))
|
||||
if (pud_large(*pud)) {
|
||||
prot = pud_val(*pud) & _PUD_PROT_MASK;
|
||||
note_page(m, st, prot, 2);
|
||||
} else
|
||||
walk_pmd_level(m, st, pud, addr);
|
||||
else
|
||||
note_page(m, st, _PAGE_INVALID, 2);
|
||||
addr += PUD_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pgd_level(struct seq_file *m)
|
||||
{
|
||||
unsigned long addr = 0;
|
||||
struct pg_state st;
|
||||
pgd_t *pgd;
|
||||
int i;
|
||||
|
||||
memset(&st, 0, sizeof(st));
|
||||
for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
|
||||
st.current_address = addr;
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (!pgd_none(*pgd))
|
||||
walk_pud_level(m, &st, pgd, addr);
|
||||
else
|
||||
note_page(m, &st, _PAGE_INVALID, 1);
|
||||
addr += PGDIR_SIZE;
|
||||
}
|
||||
/* Flush out the last page */
|
||||
st.current_address = max_addr;
|
||||
note_page(m, &st, 0, 0);
|
||||
}
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
walk_pgd_level(m);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, ptdump_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int pt_dump_init(void)
|
||||
{
|
||||
/*
|
||||
* Figure out the maximum virtual address being accessible with the
|
||||
* kernel ASCE. We need this to keep the page table walker functions
|
||||
* from accessing non-existent entries.
|
||||
*/
|
||||
#ifdef CONFIG_32BIT
|
||||
max_addr = 1UL << 31;
|
||||
#else
|
||||
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
|
||||
max_addr = 1UL << (max_addr * 11 + 31);
|
||||
address_markers[MODULES_NR].start_address = MODULES_VADDR;
|
||||
#endif
|
||||
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
|
||||
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
|
||||
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(pt_dump_init);
|
81
arch/s390/mm/extable.c
Normal file
81
arch/s390/mm/extable.c
Normal file
|
@ -0,0 +1,81 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Search one exception table for an entry corresponding to the
|
||||
* given instruction address, and return the address of the entry,
|
||||
* or NULL if none is found.
|
||||
* We use a binary search, and thus we assume that the table is
|
||||
* already sorted.
|
||||
*/
|
||||
const struct exception_table_entry *
|
||||
search_extable(const struct exception_table_entry *first,
|
||||
const struct exception_table_entry *last,
|
||||
unsigned long value)
|
||||
{
|
||||
const struct exception_table_entry *mid;
|
||||
unsigned long addr;
|
||||
|
||||
while (first <= last) {
|
||||
mid = ((last - first) >> 1) + first;
|
||||
addr = extable_insn(mid);
|
||||
if (addr < value)
|
||||
first = mid + 1;
|
||||
else if (addr > value)
|
||||
last = mid - 1;
|
||||
else
|
||||
return mid;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exception table needs to be sorted so that the binary
|
||||
* search that we use to find entries in it works properly.
|
||||
* This is used both for the kernel exception table and for
|
||||
* the exception tables of modules that get loaded.
|
||||
*
|
||||
*/
|
||||
static int cmp_ex(const void *a, const void *b)
|
||||
{
|
||||
const struct exception_table_entry *x = a, *y = b;
|
||||
|
||||
/* This compare is only valid after normalization. */
|
||||
return x->insn - y->insn;
|
||||
}
|
||||
|
||||
void sort_extable(struct exception_table_entry *start,
|
||||
struct exception_table_entry *finish)
|
||||
{
|
||||
struct exception_table_entry *p;
|
||||
int i;
|
||||
|
||||
/* Normalize entries to being relative to the start of the section */
|
||||
for (p = start, i = 0; p < finish; p++, i += 8)
|
||||
p->insn += i;
|
||||
sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
|
||||
/* Denormalize all entries */
|
||||
for (p = start, i = 0; p < finish; p++, i += 8)
|
||||
p->insn -= i;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
/*
|
||||
* If the exception table is sorted, any referring to the module init
|
||||
* will be at the beginning or the end.
|
||||
*/
|
||||
void trim_init_extable(struct module *m)
|
||||
{
|
||||
/* Trim the beginning */
|
||||
while (m->num_exentries &&
|
||||
within_module_init(extable_insn(&m->extable[0]), m)) {
|
||||
m->extable++;
|
||||
m->num_exentries--;
|
||||
}
|
||||
/* Trim the end */
|
||||
while (m->num_exentries &&
|
||||
within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
|
||||
m->num_exentries--;
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
777
arch/s390/mm/extmem.c
Normal file
777
arch/s390/mm/extmem.c
Normal file
|
@ -0,0 +1,777 @@
|
|||
/*
|
||||
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
|
||||
* Rob M van der Heij <rvdheij@nl.ibm.com>
|
||||
* Steven Shultz <shultzss@us.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 2002, 2004
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "extmem"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/extmem.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#define DCSS_LOADSHR 0x00
|
||||
#define DCSS_LOADNSR 0x04
|
||||
#define DCSS_PURGESEG 0x08
|
||||
#define DCSS_FINDSEG 0x0c
|
||||
#define DCSS_LOADNOLY 0x10
|
||||
#define DCSS_SEGEXT 0x18
|
||||
#define DCSS_LOADSHRX 0x20
|
||||
#define DCSS_LOADNSRX 0x24
|
||||
#define DCSS_FINDSEGX 0x2c
|
||||
#define DCSS_SEGEXTX 0x38
|
||||
#define DCSS_FINDSEGA 0x0c
|
||||
|
||||
struct qrange {
|
||||
unsigned long start; /* last byte type */
|
||||
unsigned long end; /* last byte reserved */
|
||||
};
|
||||
|
||||
struct qout64 {
|
||||
unsigned long segstart;
|
||||
unsigned long segend;
|
||||
int segcnt;
|
||||
int segrcnt;
|
||||
struct qrange range[6];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
struct qrange_old {
|
||||
unsigned int start; /* last byte type */
|
||||
unsigned int end; /* last byte reserved */
|
||||
};
|
||||
|
||||
/* output area format for the Diag x'64' old subcode x'18' */
|
||||
struct qout64_old {
|
||||
int segstart;
|
||||
int segend;
|
||||
int segcnt;
|
||||
int segrcnt;
|
||||
struct qrange_old range[6];
|
||||
};
|
||||
#endif
|
||||
|
||||
struct qin64 {
|
||||
char qopcode;
|
||||
char rsrv1[3];
|
||||
char qrcode;
|
||||
char rsrv2[3];
|
||||
char qname[8];
|
||||
unsigned int qoutptr;
|
||||
short int qoutlen;
|
||||
};
|
||||
|
||||
struct dcss_segment {
|
||||
struct list_head list;
|
||||
char dcss_name[8];
|
||||
char res_name[15];
|
||||
unsigned long start_addr;
|
||||
unsigned long end;
|
||||
atomic_t ref_count;
|
||||
int do_nonshared;
|
||||
unsigned int vm_segtype;
|
||||
struct qrange range[6];
|
||||
int segcnt;
|
||||
struct resource *res;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(dcss_lock);
|
||||
static LIST_HEAD(dcss_list);
|
||||
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
|
||||
"EW/EN-MIXED" };
|
||||
static int loadshr_scode, loadnsr_scode, findseg_scode;
|
||||
static int segext_scode, purgeseg_scode;
|
||||
static int scode_set;
|
||||
|
||||
/* set correct Diag x'64' subcodes. */
|
||||
static int
|
||||
dcss_set_subcodes(void)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
|
||||
unsigned long rx, ry;
|
||||
int rc;
|
||||
|
||||
if (name == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rx = (unsigned long) name;
|
||||
ry = DCSS_FINDSEGX;
|
||||
|
||||
strcpy(name, "dummy");
|
||||
asm volatile(
|
||||
" diag %0,%1,0x64\n"
|
||||
"0: ipm %2\n"
|
||||
" srl %2,28\n"
|
||||
" j 2f\n"
|
||||
"1: la %2,3\n"
|
||||
"2:\n"
|
||||
EX_TABLE(0b, 1b)
|
||||
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
|
||||
|
||||
kfree(name);
|
||||
/* Diag x'64' new subcodes are supported, set to new subcodes */
|
||||
if (rc != 3) {
|
||||
loadshr_scode = DCSS_LOADSHRX;
|
||||
loadnsr_scode = DCSS_LOADNSRX;
|
||||
purgeseg_scode = DCSS_PURGESEG;
|
||||
findseg_scode = DCSS_FINDSEGX;
|
||||
segext_scode = DCSS_SEGEXTX;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
/* Diag x'64' new subcodes are not supported, set to old subcodes */
|
||||
loadshr_scode = DCSS_LOADNOLY;
|
||||
loadnsr_scode = DCSS_LOADNSR;
|
||||
purgeseg_scode = DCSS_PURGESEG;
|
||||
findseg_scode = DCSS_FINDSEG;
|
||||
segext_scode = DCSS_SEGEXT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the 8 bytes, ebcdic VM segment name from
|
||||
* an ascii name.
|
||||
*/
|
||||
static void
|
||||
dcss_mkname(char *name, char *dcss_name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (name[i] == '\0')
|
||||
break;
|
||||
dcss_name[i] = toupper(name[i]);
|
||||
};
|
||||
for (; i < 8; i++)
|
||||
dcss_name[i] = ' ';
|
||||
ASCEBC(dcss_name, 8);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* search all segments in dcss_list, and return the one
|
||||
* namend *name. If not found, return NULL.
|
||||
*/
|
||||
static struct dcss_segment *
|
||||
segment_by_name (char *name)
|
||||
{
|
||||
char dcss_name[9];
|
||||
struct list_head *l;
|
||||
struct dcss_segment *tmp, *retval = NULL;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dcss_lock));
|
||||
dcss_mkname (name, dcss_name);
|
||||
list_for_each (l, &dcss_list) {
|
||||
tmp = list_entry (l, struct dcss_segment, list);
|
||||
if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
|
||||
retval = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Perform a function on a dcss segment.
|
||||
*/
|
||||
static inline int
|
||||
dcss_diag(int *func, void *parameter,
|
||||
unsigned long *ret1, unsigned long *ret2)
|
||||
{
|
||||
unsigned long rx, ry;
|
||||
int rc;
|
||||
|
||||
if (scode_set == 0) {
|
||||
rc = dcss_set_subcodes();
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
scode_set = 1;
|
||||
}
|
||||
rx = (unsigned long) parameter;
|
||||
ry = (unsigned long) *func;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
|
||||
if (*func > DCSS_SEGEXT)
|
||||
asm volatile(
|
||||
" diag %0,%1,0x64\n"
|
||||
" ipm %2\n"
|
||||
" srl %2,28\n"
|
||||
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
|
||||
/* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
|
||||
else
|
||||
asm volatile(
|
||||
" sam31\n"
|
||||
" diag %0,%1,0x64\n"
|
||||
" sam64\n"
|
||||
" ipm %2\n"
|
||||
" srl %2,28\n"
|
||||
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
|
||||
#else
|
||||
asm volatile(
|
||||
" diag %0,%1,0x64\n"
|
||||
" ipm %2\n"
|
||||
" srl %2,28\n"
|
||||
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
|
||||
#endif
|
||||
*ret1 = rx;
|
||||
*ret2 = ry;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dcss_diag_translate_rc (int vm_rc) {
|
||||
if (vm_rc == 44)
|
||||
return -ENOENT;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
||||
/* do a diag to get info about a segment.
|
||||
* fills start_address, end and vm_segtype fields
|
||||
*/
|
||||
static int
|
||||
query_segment_type (struct dcss_segment *seg)
|
||||
{
|
||||
unsigned long dummy, vmrc;
|
||||
int diag_cc, rc, i;
|
||||
struct qout64 *qout;
|
||||
struct qin64 *qin;
|
||||
|
||||
qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
|
||||
qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
|
||||
if ((qin == NULL) || (qout == NULL)) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* initialize diag input parameters */
|
||||
qin->qopcode = DCSS_FINDSEGA;
|
||||
qin->qoutptr = (unsigned long) qout;
|
||||
qin->qoutlen = sizeof(struct qout64);
|
||||
memcpy (qin->qname, seg->dcss_name, 8);
|
||||
|
||||
diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc);
|
||||
|
||||
if (diag_cc < 0) {
|
||||
rc = diag_cc;
|
||||
goto out_free;
|
||||
}
|
||||
if (diag_cc > 1) {
|
||||
pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
|
||||
rc = dcss_diag_translate_rc (vmrc);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Only old format of output area of Diagnose x'64' is supported,
|
||||
copy data for the new format. */
|
||||
if (segext_scode == DCSS_SEGEXT) {
|
||||
struct qout64_old *qout_old;
|
||||
qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
|
||||
if (qout_old == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
memcpy(qout_old, qout, sizeof(struct qout64_old));
|
||||
qout->segstart = (unsigned long) qout_old->segstart;
|
||||
qout->segend = (unsigned long) qout_old->segend;
|
||||
qout->segcnt = qout_old->segcnt;
|
||||
qout->segrcnt = qout_old->segrcnt;
|
||||
|
||||
if (qout->segcnt > 6)
|
||||
qout->segrcnt = 6;
|
||||
for (i = 0; i < qout->segrcnt; i++) {
|
||||
qout->range[i].start =
|
||||
(unsigned long) qout_old->range[i].start;
|
||||
qout->range[i].end =
|
||||
(unsigned long) qout_old->range[i].end;
|
||||
}
|
||||
kfree(qout_old);
|
||||
}
|
||||
#endif
|
||||
if (qout->segcnt > 6) {
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (qout->segcnt == 1) {
|
||||
seg->vm_segtype = qout->range[0].start & 0xff;
|
||||
} else {
|
||||
/* multi-part segment. only one type supported here:
|
||||
- all parts are contiguous
|
||||
- all parts are either EW or EN type
|
||||
- maximum 6 parts allowed */
|
||||
unsigned long start = qout->segstart >> PAGE_SHIFT;
|
||||
for (i=0; i<qout->segcnt; i++) {
|
||||
if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
|
||||
((qout->range[i].start & 0xff) != SEG_TYPE_EN)) {
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out_free;
|
||||
}
|
||||
if (start != qout->range[i].start >> PAGE_SHIFT) {
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out_free;
|
||||
}
|
||||
start = (qout->range[i].end >> PAGE_SHIFT) + 1;
|
||||
}
|
||||
seg->vm_segtype = SEG_TYPE_EWEN;
|
||||
}
|
||||
|
||||
/* analyze diag output and update seg */
|
||||
seg->start_addr = qout->segstart;
|
||||
seg->end = qout->segend;
|
||||
|
||||
memcpy (seg->range, qout->range, 6*sizeof(struct qrange));
|
||||
seg->segcnt = qout->segcnt;
|
||||
|
||||
rc = 0;
|
||||
|
||||
out_free:
|
||||
kfree(qin);
|
||||
kfree(qout);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* get info about a segment
|
||||
* possible return values:
|
||||
* -ENOSYS : we are not running on VM
|
||||
* -EIO : could not perform query diagnose
|
||||
* -ENOENT : no such segment
|
||||
* -EOPNOTSUPP: multi-part segment cannot be used with linux
|
||||
* -ENOMEM : out of memory
|
||||
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
|
||||
*/
|
||||
int
|
||||
segment_type (char* name)
|
||||
{
|
||||
int rc;
|
||||
struct dcss_segment seg;
|
||||
|
||||
if (!MACHINE_IS_VM)
|
||||
return -ENOSYS;
|
||||
|
||||
dcss_mkname(name, seg.dcss_name);
|
||||
rc = query_segment_type (&seg);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
return seg.vm_segtype;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if segment collides with other segments that are currently loaded
|
||||
* returns 1 if this is the case, 0 if no collision was found
|
||||
*/
|
||||
static int
|
||||
segment_overlaps_others (struct dcss_segment *seg)
|
||||
{
|
||||
struct list_head *l;
|
||||
struct dcss_segment *tmp;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dcss_lock));
|
||||
list_for_each(l, &dcss_list) {
|
||||
tmp = list_entry(l, struct dcss_segment, list);
|
||||
if ((tmp->start_addr >> 20) > (seg->end >> 20))
|
||||
continue;
|
||||
if ((tmp->end >> 20) < (seg->start_addr >> 20))
|
||||
continue;
|
||||
if (seg == tmp)
|
||||
continue;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* real segment loading function, called from segment_load
|
||||
*/
|
||||
static int
|
||||
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
|
||||
{
|
||||
unsigned long start_addr, end_addr, dummy;
|
||||
struct dcss_segment *seg;
|
||||
int rc, diag_cc;
|
||||
|
||||
start_addr = end_addr = 0;
|
||||
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
|
||||
if (seg == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dcss_mkname (name, seg->dcss_name);
|
||||
rc = query_segment_type (seg);
|
||||
if (rc < 0)
|
||||
goto out_free;
|
||||
|
||||
if (loadshr_scode == DCSS_LOADSHRX) {
|
||||
if (segment_overlaps_others(seg)) {
|
||||
rc = -EBUSY;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
if (seg->res == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out_shared;
|
||||
}
|
||||
seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
||||
seg->res->start = seg->start_addr;
|
||||
seg->res->end = seg->end;
|
||||
memcpy(&seg->res_name, seg->dcss_name, 8);
|
||||
EBCASC(seg->res_name, 8);
|
||||
seg->res_name[8] = '\0';
|
||||
strncat(seg->res_name, " (DCSS)", 7);
|
||||
seg->res->name = seg->res_name;
|
||||
rc = seg->vm_segtype;
|
||||
if (rc == SEG_TYPE_SC ||
|
||||
((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
|
||||
seg->res->flags |= IORESOURCE_READONLY;
|
||||
if (request_resource(&iomem_resource, seg->res)) {
|
||||
rc = -EBUSY;
|
||||
kfree(seg->res);
|
||||
goto out_shared;
|
||||
}
|
||||
|
||||
if (do_nonshared)
|
||||
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
|
||||
&start_addr, &end_addr);
|
||||
else
|
||||
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
|
||||
&start_addr, &end_addr);
|
||||
if (diag_cc < 0) {
|
||||
dcss_diag(&purgeseg_scode, seg->dcss_name,
|
||||
&dummy, &dummy);
|
||||
rc = diag_cc;
|
||||
goto out_resource;
|
||||
}
|
||||
if (diag_cc > 1) {
|
||||
pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
|
||||
end_addr);
|
||||
rc = dcss_diag_translate_rc(end_addr);
|
||||
dcss_diag(&purgeseg_scode, seg->dcss_name,
|
||||
&dummy, &dummy);
|
||||
goto out_resource;
|
||||
}
|
||||
seg->start_addr = start_addr;
|
||||
seg->end = end_addr;
|
||||
seg->do_nonshared = do_nonshared;
|
||||
atomic_set(&seg->ref_count, 1);
|
||||
list_add(&seg->list, &dcss_list);
|
||||
*addr = seg->start_addr;
|
||||
*end = seg->end;
|
||||
if (do_nonshared)
|
||||
pr_info("DCSS %s of range %p to %p and type %s loaded as "
|
||||
"exclusive-writable\n", name, (void*) seg->start_addr,
|
||||
(void*) seg->end, segtype_string[seg->vm_segtype]);
|
||||
else {
|
||||
pr_info("DCSS %s of range %p to %p and type %s loaded in "
|
||||
"shared access mode\n", name, (void*) seg->start_addr,
|
||||
(void*) seg->end, segtype_string[seg->vm_segtype]);
|
||||
}
|
||||
goto out;
|
||||
out_resource:
|
||||
release_resource(seg->res);
|
||||
kfree(seg->res);
|
||||
out_shared:
|
||||
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
out_free:
|
||||
kfree(seg);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* this function loads a DCSS segment
|
||||
* name : name of the DCSS
|
||||
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
|
||||
* 1 indicates that the dcss should be exclusive for this linux image
|
||||
* addr : will be filled with start address of the segment
|
||||
* end : will be filled with end address of the segment
|
||||
* return values:
|
||||
* -ENOSYS : we are not running on VM
|
||||
* -EIO : could not perform query or load diagnose
|
||||
* -ENOENT : no such segment
|
||||
* -EOPNOTSUPP: multi-part segment cannot be used with linux
|
||||
* -ENOSPC : segment cannot be used (overlaps with storage)
|
||||
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
|
||||
* -ERANGE : segment cannot be used (exceeds kernel mapping range)
|
||||
* -EPERM : segment is currently loaded with incompatible permissions
|
||||
* -ENOMEM : out of memory
|
||||
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
|
||||
*/
|
||||
int
|
||||
segment_load (char *name, int do_nonshared, unsigned long *addr,
|
||||
unsigned long *end)
|
||||
{
|
||||
struct dcss_segment *seg;
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_IS_VM)
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&dcss_lock);
|
||||
seg = segment_by_name (name);
|
||||
if (seg == NULL)
|
||||
rc = __segment_load (name, do_nonshared, addr, end);
|
||||
else {
|
||||
if (do_nonshared == seg->do_nonshared) {
|
||||
atomic_inc(&seg->ref_count);
|
||||
*addr = seg->start_addr;
|
||||
*end = seg->end;
|
||||
rc = seg->vm_segtype;
|
||||
} else {
|
||||
*addr = *end = 0;
|
||||
rc = -EPERM;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dcss_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* this function modifies the shared state of a DCSS segment. note that
|
||||
* name : name of the DCSS
|
||||
* do_nonshared : 0 indicates that the dcss should be shared with other linux images
|
||||
* 1 indicates that the dcss should be exclusive for this linux image
|
||||
* return values:
|
||||
* -EIO : could not perform load diagnose (segment gone!)
|
||||
* -ENOENT : no such segment (segment gone!)
|
||||
* -EAGAIN : segment is in use by other exploiters, try later
|
||||
* -EINVAL : no segment with the given name is currently loaded - name invalid
|
||||
* -EBUSY : segment can temporarily not be used (overlaps with dcss)
|
||||
* 0 : operation succeeded
|
||||
*/
|
||||
int
|
||||
segment_modify_shared (char *name, int do_nonshared)
|
||||
{
|
||||
struct dcss_segment *seg;
|
||||
unsigned long start_addr, end_addr, dummy;
|
||||
int rc, diag_cc;
|
||||
|
||||
start_addr = end_addr = 0;
|
||||
mutex_lock(&dcss_lock);
|
||||
seg = segment_by_name (name);
|
||||
if (seg == NULL) {
|
||||
rc = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (do_nonshared == seg->do_nonshared) {
|
||||
pr_info("DCSS %s is already in the requested access "
|
||||
"mode\n", name);
|
||||
rc = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (atomic_read (&seg->ref_count) != 1) {
|
||||
pr_warning("DCSS %s is in use and cannot be reloaded\n",
|
||||
name);
|
||||
rc = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
release_resource(seg->res);
|
||||
if (do_nonshared)
|
||||
seg->res->flags &= ~IORESOURCE_READONLY;
|
||||
else
|
||||
if (seg->vm_segtype == SEG_TYPE_SR ||
|
||||
seg->vm_segtype == SEG_TYPE_ER)
|
||||
seg->res->flags |= IORESOURCE_READONLY;
|
||||
|
||||
if (request_resource(&iomem_resource, seg->res)) {
|
||||
pr_warning("DCSS %s overlaps with used memory resources "
|
||||
"and cannot be reloaded\n", name);
|
||||
rc = -EBUSY;
|
||||
kfree(seg->res);
|
||||
goto out_del_mem;
|
||||
}
|
||||
|
||||
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
|
||||
if (do_nonshared)
|
||||
diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name,
|
||||
&start_addr, &end_addr);
|
||||
else
|
||||
diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name,
|
||||
&start_addr, &end_addr);
|
||||
if (diag_cc < 0) {
|
||||
rc = diag_cc;
|
||||
goto out_del_res;
|
||||
}
|
||||
if (diag_cc > 1) {
|
||||
pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
|
||||
end_addr);
|
||||
rc = dcss_diag_translate_rc(end_addr);
|
||||
goto out_del_res;
|
||||
}
|
||||
seg->start_addr = start_addr;
|
||||
seg->end = end_addr;
|
||||
seg->do_nonshared = do_nonshared;
|
||||
rc = 0;
|
||||
goto out_unlock;
|
||||
out_del_res:
|
||||
release_resource(seg->res);
|
||||
kfree(seg->res);
|
||||
out_del_mem:
|
||||
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
list_del(&seg->list);
|
||||
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
|
||||
kfree(seg);
|
||||
out_unlock:
|
||||
mutex_unlock(&dcss_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrease the use count of a DCSS segment and remove
|
||||
* it from the address space if nobody is using it
|
||||
* any longer.
|
||||
*/
|
||||
void
|
||||
segment_unload(char *name)
|
||||
{
|
||||
unsigned long dummy;
|
||||
struct dcss_segment *seg;
|
||||
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
|
||||
mutex_lock(&dcss_lock);
|
||||
seg = segment_by_name (name);
|
||||
if (seg == NULL) {
|
||||
pr_err("Unloading unknown DCSS %s failed\n", name);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (atomic_dec_return(&seg->ref_count) != 0)
|
||||
goto out_unlock;
|
||||
release_resource(seg->res);
|
||||
kfree(seg->res);
|
||||
vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
|
||||
list_del(&seg->list);
|
||||
dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy);
|
||||
kfree(seg);
|
||||
out_unlock:
|
||||
mutex_unlock(&dcss_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* save segment content permanently
|
||||
*/
|
||||
void
|
||||
segment_save(char *name)
|
||||
{
|
||||
struct dcss_segment *seg;
|
||||
char cmd1[160];
|
||||
char cmd2[80];
|
||||
int i, response;
|
||||
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
|
||||
mutex_lock(&dcss_lock);
|
||||
seg = segment_by_name (name);
|
||||
|
||||
if (seg == NULL) {
|
||||
pr_err("Saving unknown DCSS %s failed\n", name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sprintf(cmd1, "DEFSEG %s", name);
|
||||
for (i=0; i<seg->segcnt; i++) {
|
||||
sprintf(cmd1+strlen(cmd1), " %lX-%lX %s",
|
||||
seg->range[i].start >> PAGE_SHIFT,
|
||||
seg->range[i].end >> PAGE_SHIFT,
|
||||
segtype_string[seg->range[i].start & 0xff]);
|
||||
}
|
||||
sprintf(cmd2, "SAVESEG %s", name);
|
||||
response = 0;
|
||||
cpcmd(cmd1, NULL, 0, &response);
|
||||
if (response) {
|
||||
pr_err("Saving a DCSS failed with DEFSEG response code "
|
||||
"%i\n", response);
|
||||
goto out;
|
||||
}
|
||||
cpcmd(cmd2, NULL, 0, &response);
|
||||
if (response) {
|
||||
pr_err("Saving a DCSS failed with SAVESEG response code "
|
||||
"%i\n", response);
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&dcss_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* print appropriate error message for segment_load()/segment_type()
|
||||
* return code
|
||||
*/
|
||||
void segment_warning(int rc, char *seg_name)
|
||||
{
|
||||
switch (rc) {
|
||||
case -ENOENT:
|
||||
pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
|
||||
break;
|
||||
case -ENOSYS:
|
||||
pr_err("DCSS %s cannot be loaded or queried without "
|
||||
"z/VM\n", seg_name);
|
||||
break;
|
||||
case -EIO:
|
||||
pr_err("Loading or querying DCSS %s resulted in a "
|
||||
"hardware error\n", seg_name);
|
||||
break;
|
||||
case -EOPNOTSUPP:
|
||||
pr_err("DCSS %s has multiple page ranges and cannot be "
|
||||
"loaded or queried\n", seg_name);
|
||||
break;
|
||||
case -ENOSPC:
|
||||
pr_err("DCSS %s overlaps with used storage and cannot "
|
||||
"be loaded\n", seg_name);
|
||||
break;
|
||||
case -EBUSY:
|
||||
pr_err("%s needs used memory resources and cannot be "
|
||||
"loaded or queried\n", seg_name);
|
||||
break;
|
||||
case -EPERM:
|
||||
pr_err("DCSS %s is already loaded in a different access "
|
||||
"mode\n", seg_name);
|
||||
break;
|
||||
case -ENOMEM:
|
||||
pr_err("There is not enough memory to load or query "
|
||||
"DCSS %s\n", seg_name);
|
||||
break;
|
||||
case -ERANGE:
|
||||
pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
|
||||
"and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(segment_load);
|
||||
EXPORT_SYMBOL(segment_unload);
|
||||
EXPORT_SYMBOL(segment_save);
|
||||
EXPORT_SYMBOL(segment_type);
|
||||
EXPORT_SYMBOL(segment_modify_shared);
|
||||
EXPORT_SYMBOL(segment_warning);
|
792
arch/s390/mm/fault.c
Normal file
792
arch/s390/mm/fault.c
Normal file
|
@ -0,0 +1,792 @@
|
|||
/*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999
|
||||
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
||||
* Ulrich Weigand (uweigand@de.ibm.com)
|
||||
*
|
||||
* Derived from "arch/i386/mm/fault.c"
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/facility.h>
|
||||
#include "../kernel/entry.h"
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define __FAIL_ADDR_MASK 0x7ffff000
|
||||
#define __SUBCODE_MASK 0x0200
|
||||
#define __PF_RES_FIELD 0ULL
|
||||
#else /* CONFIG_64BIT */
|
||||
#define __FAIL_ADDR_MASK -4096L
|
||||
#define __SUBCODE_MASK 0x0600
|
||||
#define __PF_RES_FIELD 0x8000000000000000ULL
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#define VM_FAULT_BADCONTEXT 0x010000
|
||||
#define VM_FAULT_BADMAP 0x020000
|
||||
#define VM_FAULT_BADACCESS 0x040000
|
||||
#define VM_FAULT_SIGNAL 0x080000
|
||||
#define VM_FAULT_PFAULT 0x100000
|
||||
|
||||
static unsigned long store_indication __read_mostly;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static int __init fault_init(void)
|
||||
{
|
||||
if (test_facility(75))
|
||||
store_indication = 0xc00;
|
||||
return 0;
|
||||
}
|
||||
early_initcall(fault_init);
|
||||
#endif
|
||||
|
||||
static inline int notify_page_fault(struct pt_regs *regs)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
if (kprobes_built_in() && !user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, 14))
|
||||
ret = 1;
|
||||
preempt_enable();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Unlock any spinlocks which will prevent us from getting the
|
||||
* message out.
|
||||
*/
|
||||
void bust_spinlocks(int yes)
|
||||
{
|
||||
if (yes) {
|
||||
oops_in_progress = 1;
|
||||
} else {
|
||||
int loglevel_save = console_loglevel;
|
||||
console_unblank();
|
||||
oops_in_progress = 0;
|
||||
/*
|
||||
* OK, the message is on the console. Now we call printk()
|
||||
* without oops_in_progress set so that printk will give klogd
|
||||
* a poke. Hold onto your hats...
|
||||
*/
|
||||
console_loglevel = 15;
|
||||
printk(" ");
|
||||
console_loglevel = loglevel_save;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the address space associated with the fault.
|
||||
* Returns 0 for kernel space and 1 for user space.
|
||||
*/
|
||||
static inline int user_space_fault(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
|
||||
/*
|
||||
* The lowest two bits of the translation exception
|
||||
* identification indicate which paging table was used.
|
||||
*/
|
||||
trans_exc_code = regs->int_parm_long & 3;
|
||||
if (trans_exc_code == 3) /* home space -> kernel */
|
||||
return 0;
|
||||
if (user_mode(regs))
|
||||
return 1;
|
||||
if (trans_exc_code == 2) /* secondary space -> set_fs */
|
||||
return current->thread.mm_segment.ar4;
|
||||
if (current->flags & PF_VCPU)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bad_address(void *p)
|
||||
{
|
||||
unsigned long dummy;
|
||||
|
||||
return probe_kernel_address((unsigned long *)p, dummy);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static void dump_pagetable(unsigned long asce, unsigned long address)
|
||||
{
|
||||
unsigned long *table = __va(asce & PAGE_MASK);
|
||||
|
||||
pr_alert("AS:%016lx ", asce);
|
||||
switch (asce & _ASCE_TYPE_MASK) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
table = table + ((address >> 53) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("R1:%016lx ", *table);
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
/* fallthrough */
|
||||
case _ASCE_TYPE_REGION2:
|
||||
table = table + ((address >> 42) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("R2:%016lx ", *table);
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
/* fallthrough */
|
||||
case _ASCE_TYPE_REGION3:
|
||||
table = table + ((address >> 31) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("R3:%016lx ", *table);
|
||||
if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
/* fallthrough */
|
||||
case _ASCE_TYPE_SEGMENT:
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont(KERN_CONT "S:%016lx ", *table);
|
||||
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
}
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("P:%016lx ", *table);
|
||||
out:
|
||||
pr_cont("\n");
|
||||
return;
|
||||
bad:
|
||||
pr_cont("BAD\n");
|
||||
}
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
static void dump_pagetable(unsigned long asce, unsigned long address)
|
||||
{
|
||||
unsigned long *table = __va(asce & PAGE_MASK);
|
||||
|
||||
pr_alert("AS:%08lx ", asce);
|
||||
table = table + ((address >> 20) & 0x7ff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("S:%08lx ", *table);
|
||||
if (*table & _SEGMENT_ENTRY_INVALID)
|
||||
goto out;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table = table + ((address >> 12) & 0xff);
|
||||
if (bad_address(table))
|
||||
goto bad;
|
||||
pr_cont("P:%08lx ", *table);
|
||||
out:
|
||||
pr_cont("\n");
|
||||
return;
|
||||
bad:
|
||||
pr_cont("BAD\n");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static void dump_fault_info(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long asce;
|
||||
|
||||
pr_alert("Fault in ");
|
||||
switch (regs->int_parm_long & 3) {
|
||||
case 3:
|
||||
pr_cont("home space ");
|
||||
break;
|
||||
case 2:
|
||||
pr_cont("secondary space ");
|
||||
break;
|
||||
case 1:
|
||||
pr_cont("access register ");
|
||||
break;
|
||||
case 0:
|
||||
pr_cont("primary space ");
|
||||
break;
|
||||
}
|
||||
pr_cont("mode while using ");
|
||||
if (!user_space_fault(regs)) {
|
||||
asce = S390_lowcore.kernel_asce;
|
||||
pr_cont("kernel ");
|
||||
}
|
||||
#ifdef CONFIG_PGSTE
|
||||
else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
|
||||
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
asce = gmap->asce;
|
||||
pr_cont("gmap ");
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
asce = S390_lowcore.user_asce;
|
||||
pr_cont("user ");
|
||||
}
|
||||
pr_cont("ASCE.\n");
|
||||
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
}
|
||||
|
||||
static inline void report_user_fault(struct pt_regs *regs, long signr)
|
||||
{
|
||||
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
|
||||
return;
|
||||
if (!unhandled_signal(current, signr))
|
||||
return;
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
printk(KERN_ALERT "User process fault: interruption code 0x%X ",
|
||||
regs->int_code);
|
||||
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
|
||||
printk(KERN_CONT "\n");
|
||||
printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
|
||||
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
|
||||
dump_fault_info(regs);
|
||||
show_regs(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send SIGSEGV to task. This is an external routine
|
||||
* to keep the stack usage of do_page_fault small.
|
||||
*/
|
||||
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
|
||||
{
|
||||
struct siginfo si;
|
||||
|
||||
report_user_fault(regs, SIGSEGV);
|
||||
si.si_signo = SIGSEGV;
|
||||
si.si_code = si_code;
|
||||
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
force_sig_info(SIGSEGV, &si, current);
|
||||
}
|
||||
|
||||
static noinline void do_no_context(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
unsigned long address;
|
||||
|
||||
/* Are we prepared to handle this kernel fault? */
|
||||
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
|
||||
if (fixup) {
|
||||
regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
address = regs->int_parm_long & __FAIL_ADDR_MASK;
|
||||
if (!user_space_fault(regs))
|
||||
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
|
||||
" in virtual kernel address space\n");
|
||||
else
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request"
|
||||
" in virtual user address space\n");
|
||||
printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
|
||||
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
|
||||
dump_fault_info(regs);
|
||||
die(regs, "Oops");
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
static noinline void do_low_address(struct pt_regs *regs)
|
||||
{
|
||||
/* Low-address protection hit in kernel mode means
|
||||
NULL pointer write access in kernel mode. */
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
/* Low-address protection hit in user mode 'cannot happen'. */
|
||||
die (regs, "Low-address protection");
|
||||
do_exit(SIGKILL);
|
||||
}
|
||||
|
||||
do_no_context(regs);
|
||||
}
|
||||
|
||||
static noinline void do_sigbus(struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct siginfo si;
|
||||
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
si.si_signo = SIGBUS;
|
||||
si.si_errno = 0;
|
||||
si.si_code = BUS_ADRERR;
|
||||
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
force_sig_info(SIGBUS, &si, tsk);
|
||||
}
|
||||
|
||||
static noinline void do_fault_error(struct pt_regs *regs, int fault)
|
||||
{
|
||||
int si_code;
|
||||
|
||||
switch (fault) {
|
||||
case VM_FAULT_BADACCESS:
|
||||
case VM_FAULT_BADMAP:
|
||||
/* Bad memory access. Check if it is kernel or user space. */
|
||||
if (user_mode(regs)) {
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
si_code = (fault == VM_FAULT_BADMAP) ?
|
||||
SEGV_MAPERR : SEGV_ACCERR;
|
||||
do_sigsegv(regs, si_code);
|
||||
return;
|
||||
}
|
||||
case VM_FAULT_BADCONTEXT:
|
||||
case VM_FAULT_PFAULT:
|
||||
do_no_context(regs);
|
||||
break;
|
||||
case VM_FAULT_SIGNAL:
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs);
|
||||
break;
|
||||
default: /* fault & VM_FAULT_ERROR */
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs);
|
||||
else
|
||||
pagefault_out_of_memory();
|
||||
} else if (fault & VM_FAULT_SIGSEGV) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs);
|
||||
else
|
||||
do_sigsegv(regs, SEGV_MAPERR);
|
||||
} else if (fault & VM_FAULT_SIGBUS) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs);
|
||||
else
|
||||
do_sigbus(regs);
|
||||
} else
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine handles page faults. It determines the address,
|
||||
* and the problem, and then passes it off to one of the appropriate
|
||||
* routines.
|
||||
*
|
||||
* interruption code (int_code):
|
||||
* 04 Protection -> Write-Protection (suprression)
|
||||
* 10 Segment translation -> Not present (nullification)
|
||||
* 11 Page translation -> Not present (nullification)
|
||||
* 3b Region third trans. -> Not present (nullification)
|
||||
*/
|
||||
static inline int do_exception(struct pt_regs *regs, int access)
|
||||
{
|
||||
#ifdef CONFIG_PGSTE
|
||||
struct gmap *gmap;
|
||||
#endif
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long trans_exc_code;
|
||||
unsigned long address;
|
||||
unsigned int flags;
|
||||
int fault;
|
||||
|
||||
tsk = current;
|
||||
/*
|
||||
* The instruction that caused the program check has
|
||||
* been nullified. Don't signal single step via SIGTRAP.
|
||||
*/
|
||||
clear_pt_regs_flag(regs, PIF_PER_TRAP);
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return 0;
|
||||
|
||||
mm = tsk->mm;
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
|
||||
/*
|
||||
* Verify that the fault happened in user space, that
|
||||
* we are not in an interrupt and that there is a
|
||||
* user context.
|
||||
*/
|
||||
fault = VM_FAULT_BADCONTEXT;
|
||||
if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
|
||||
goto out;
|
||||
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
#ifdef CONFIG_PGSTE
|
||||
gmap = (current->flags & PF_VCPU) ?
|
||||
(struct gmap *) S390_lowcore.gmap : NULL;
|
||||
if (gmap) {
|
||||
current->thread.gmap_addr = address;
|
||||
address = __gmap_translate(gmap, address);
|
||||
if (address == -EFAULT) {
|
||||
fault = VM_FAULT_BADMAP;
|
||||
goto out_up;
|
||||
}
|
||||
if (gmap->pfault_enabled)
|
||||
flags |= FAULT_FLAG_RETRY_NOWAIT;
|
||||
}
|
||||
#endif
|
||||
|
||||
retry:
|
||||
fault = VM_FAULT_BADMAP;
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto out_up;
|
||||
|
||||
if (unlikely(vma->vm_start > address)) {
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto out_up;
|
||||
if (expand_stack(vma, address))
|
||||
goto out_up;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
if (unlikely(!(vma->vm_flags & access)))
|
||||
goto out_up;
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
address &= HPAGE_MASK;
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
/* No reason to continue if interrupted by SIGKILL. */
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(fault & VM_FAULT_ERROR))
|
||||
goto out_up;
|
||||
|
||||
/*
|
||||
* Major/minor page fault accounting is only done on the
|
||||
* initial attempt. If we go through a retry, it is extremely
|
||||
* likely that the page will be found in page cache at that point.
|
||||
*/
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||
regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
/* FAULT_FLAG_RETRY_NOWAIT has been set,
|
||||
* mmap_sem has not been released */
|
||||
current->thread.gmap_pfault = 1;
|
||||
fault = VM_FAULT_PFAULT;
|
||||
goto out_up;
|
||||
}
|
||||
#endif
|
||||
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
||||
* of starvation. */
|
||||
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
|
||||
FAULT_FLAG_RETRY_NOWAIT);
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
down_read(&mm->mmap_sem);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (gmap) {
|
||||
address = __gmap_link(gmap, current->thread.gmap_addr,
|
||||
address);
|
||||
if (address == -EFAULT) {
|
||||
fault = VM_FAULT_BADMAP;
|
||||
goto out_up;
|
||||
}
|
||||
if (address == -ENOMEM) {
|
||||
fault = VM_FAULT_OOM;
|
||||
goto out_up;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
fault = 0;
|
||||
out_up:
|
||||
up_read(&mm->mmap_sem);
|
||||
out:
|
||||
return fault;
|
||||
}
|
||||
|
||||
void __kprobes do_protection_exception(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
int fault;
|
||||
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
/*
|
||||
* Protection exceptions are suppressing, decrement psw address.
|
||||
* The exception to this rule are aborted transactions, for these
|
||||
* the PSW already points to the correct location.
|
||||
*/
|
||||
if (!(regs->int_code & 0x200))
|
||||
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
|
||||
/*
|
||||
* Check for low-address protection. This needs to be treated
|
||||
* as a special case because the translation exception code
|
||||
* field is not guaranteed to contain valid data in this case.
|
||||
*/
|
||||
if (unlikely(!(trans_exc_code & 4))) {
|
||||
do_low_address(regs);
|
||||
return;
|
||||
}
|
||||
fault = do_exception(regs, VM_WRITE);
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
}
|
||||
|
||||
void __kprobes do_dat_exception(struct pt_regs *regs)
|
||||
{
|
||||
int access, fault;
|
||||
|
||||
access = VM_READ | VM_EXEC | VM_WRITE;
|
||||
fault = do_exception(regs, access);
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PFAULT
|
||||
/*
|
||||
* 'pfault' pseudo page faults routines.
|
||||
*/
|
||||
static int pfault_disable;
|
||||
|
||||
static int __init nopfault(char *str)
|
||||
{
|
||||
pfault_disable = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("nopfault", nopfault);
|
||||
|
||||
struct pfault_refbk {
|
||||
u16 refdiagc;
|
||||
u16 reffcode;
|
||||
u16 refdwlen;
|
||||
u16 refversn;
|
||||
u64 refgaddr;
|
||||
u64 refselmk;
|
||||
u64 refcmpmk;
|
||||
u64 reserved;
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
int pfault_init(void)
|
||||
{
|
||||
struct pfault_refbk refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 0,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
.refgaddr = __LC_CURRENT_PID,
|
||||
.refselmk = 1ULL << 48,
|
||||
.refcmpmk = 1ULL << 48,
|
||||
.reserved = __PF_RES_FIELD };
|
||||
int rc;
|
||||
|
||||
if (pfault_disable)
|
||||
return -1;
|
||||
asm volatile(
|
||||
" diag %1,%0,0x258\n"
|
||||
"0: j 2f\n"
|
||||
"1: la %0,8\n"
|
||||
"2:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
|
||||
return rc;
|
||||
}
|
||||
|
||||
void pfault_fini(void)
|
||||
{
|
||||
struct pfault_refbk refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 1,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
};
|
||||
|
||||
if (pfault_disable)
|
||||
return;
|
||||
asm volatile(
|
||||
" diag %0,0,0x258\n"
|
||||
"0:\n"
|
||||
EX_TABLE(0b,0b)
|
||||
: : "a" (&refbk), "m" (refbk) : "cc");
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(pfault_lock);
|
||||
static LIST_HEAD(pfault_list);
|
||||
|
||||
static void pfault_interrupt(struct ext_code ext_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
__u16 subcode;
|
||||
pid_t pid;
|
||||
|
||||
/*
|
||||
* Get the external interruption subcode & pfault
|
||||
* initial/completion signal bit. VM stores this
|
||||
* in the 'cpu address' field associated with the
|
||||
* external interrupt.
|
||||
*/
|
||||
subcode = ext_code.subcode;
|
||||
if ((subcode & 0xff00) != __SUBCODE_MASK)
|
||||
return;
|
||||
inc_irq_stat(IRQEXT_PFL);
|
||||
/* Get the token (= pid of the affected task). */
|
||||
pid = sizeof(void *) == 4 ? param32 : param64;
|
||||
rcu_read_lock();
|
||||
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||
if (tsk)
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
if (!tsk)
|
||||
return;
|
||||
spin_lock(&pfault_lock);
|
||||
if (subcode & 0x0080) {
|
||||
/* signal bit is set -> a page has been swapped in by VM */
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/* Initial interrupt was faster than the completion
|
||||
* interrupt. pfault_wait is valid. Set pfault_wait
|
||||
* back to zero and wake up the process. This can
|
||||
* safely be done because the task is still sleeping
|
||||
* and can't produce new pfaults. */
|
||||
tsk->thread.pfault_wait = 0;
|
||||
list_del(&tsk->thread.list);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
} else {
|
||||
/* Completion interrupt was faster than initial
|
||||
* interrupt. Set pfault_wait to -1 so the initial
|
||||
* interrupt doesn't put the task to sleep.
|
||||
* If the task is not running, ignore the completion
|
||||
* interrupt since it must be a leftover of a PFAULT
|
||||
* CANCEL operation which didn't remove all pending
|
||||
* completion interrupts. */
|
||||
if (tsk->state == TASK_RUNNING)
|
||||
tsk->thread.pfault_wait = -1;
|
||||
}
|
||||
} else {
|
||||
/* signal bit not set -> a real page is missing. */
|
||||
if (WARN_ON_ONCE(tsk != current))
|
||||
goto out;
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/* Already on the list with a reference: put to sleep */
|
||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
set_tsk_need_resched(tsk);
|
||||
} else if (tsk->thread.pfault_wait == -1) {
|
||||
/* Completion interrupt was faster than the initial
|
||||
* interrupt (pfault_wait == -1). Set pfault_wait
|
||||
* back to zero and exit. */
|
||||
tsk->thread.pfault_wait = 0;
|
||||
} else {
|
||||
/* Initial interrupt arrived before completion
|
||||
* interrupt. Let the task sleep.
|
||||
* An extra task reference is needed since a different
|
||||
* cpu may set the task state to TASK_RUNNING again
|
||||
* before the scheduler is reached. */
|
||||
get_task_struct(tsk);
|
||||
tsk->thread.pfault_wait = 1;
|
||||
list_add(&tsk->thread.list, &pfault_list);
|
||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
set_tsk_need_resched(tsk);
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&pfault_lock);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
|
||||
static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
struct thread_struct *thread, *next;
|
||||
struct task_struct *tsk;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DEAD:
|
||||
spin_lock_irq(&pfault_lock);
|
||||
list_for_each_entry_safe(thread, next, &pfault_list, list) {
|
||||
thread->pfault_wait = 0;
|
||||
list_del(&thread->list);
|
||||
tsk = container_of(thread, struct task_struct, thread);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
spin_unlock_irq(&pfault_lock);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init pfault_irq_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
if (rc)
|
||||
goto out_extint;
|
||||
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
|
||||
if (rc)
|
||||
goto out_pfault;
|
||||
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
|
||||
hotcpu_notifier(pfault_cpu_notify, 0);
|
||||
return 0;
|
||||
|
||||
out_pfault:
|
||||
unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
out_extint:
|
||||
pfault_disable = 1;
|
||||
return rc;
|
||||
}
|
||||
early_initcall(pfault_irq_init);
|
||||
|
||||
#endif /* CONFIG_PFAULT */
|
246
arch/s390/mm/gup.c
Normal file
246
arch/s390/mm/gup.c
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Lockless get_user_pages_fast for s390
|
||||
*
|
||||
* Copyright IBM Corp. 2010
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
* register pressure.
|
||||
*/
|
||||
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask;
|
||||
pte_t *ptep, pte;
|
||||
struct page *page;
|
||||
|
||||
mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
|
||||
|
||||
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
|
||||
do {
|
||||
pte = *ptep;
|
||||
barrier();
|
||||
if ((pte_val(pte) & mask) != 0)
|
||||
return 0;
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
if (!page_cache_get_speculative(page))
|
||||
return 0;
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
put_page(page);
|
||||
return 0;
|
||||
}
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask, result;
|
||||
struct page *head, *page, *tail;
|
||||
int refs;
|
||||
|
||||
result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
|
||||
mask = result | _SEGMENT_ENTRY_INVALID;
|
||||
if ((pmd_val(pmd) & mask) != result)
|
||||
return 0;
|
||||
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
|
||||
|
||||
refs = 0;
|
||||
head = pmd_page(pmd);
|
||||
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
tail = page;
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (!page_cache_add_speculative(head, refs)) {
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
|
||||
*nr -= refs;
|
||||
while (refs--)
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any tail page need their mapcount reference taken before we
|
||||
* return.
|
||||
*/
|
||||
while (refs--) {
|
||||
if (PageTail(tail))
|
||||
get_huge_page_tail(tail);
|
||||
tail++;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp, pmd;
|
||||
|
||||
pmdp = (pmd_t *) pudp;
|
||||
#ifdef CONFIG_64BIT
|
||||
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
pmdp = (pmd_t *) pud_deref(pud);
|
||||
pmdp += pmd_index(addr);
|
||||
#endif
|
||||
do {
|
||||
pmd = *pmdp;
|
||||
barrier();
|
||||
next = pmd_addr_end(addr, end);
|
||||
/*
|
||||
* The pmd_trans_splitting() check below explains why
|
||||
* pmdp_splitting_flush() has to serialize with
|
||||
* smp_call_function() against our disabled IRQs, to stop
|
||||
* this gup-fast code from running while we set the
|
||||
* splitting bit in the pmd. Returning zero will take
|
||||
* the slow path that will call wait_split_huge_page()
|
||||
* if the pmd is still in splitting state.
|
||||
*/
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (unlikely(pmd_large(pmd))) {
|
||||
if (!gup_huge_pmd(pmdp, pmd, addr, next,
|
||||
write, pages, nr))
|
||||
return 0;
|
||||
} else if (!gup_pte_range(pmdp, pmd, addr, next,
|
||||
write, pages, nr))
|
||||
return 0;
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp, pud;
|
||||
|
||||
pudp = (pud_t *) pgdp;
|
||||
#ifdef CONFIG_64BIT
|
||||
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
|
||||
pudp = (pud_t *) pgd_deref(pgd);
|
||||
pudp += pud_index(addr);
|
||||
#endif
|
||||
do {
|
||||
pud = *pudp;
|
||||
barrier();
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next, flags;
|
||||
pgd_t *pgdp, pgd;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
if ((end <= start) || (end > TASK_SIZE))
|
||||
return 0;
|
||||
/*
|
||||
* local_irq_save() doesn't prevent pagetable teardown, but does
|
||||
* prevent the pagetables from being freed on s390.
|
||||
*
|
||||
* So long as we atomically load page table pointers versus teardown,
|
||||
* we can follow the address down to the the page and take a ref on it.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd = *pgdp;
|
||||
barrier();
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
break;
|
||||
if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
|
||||
break;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
||||
* If not successful, it will fall back to taking the lock and
|
||||
* calling get_user_pages().
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
int nr, ret;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
nr = __get_user_pages_fast(start, nr_pages, write, pages);
|
||||
if (nr == nr_pages)
|
||||
return nr;
|
||||
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
nr_pages - nr, write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0)
|
||||
ret = (ret < 0) ? nr : ret + nr;
|
||||
return ret;
|
||||
}
|
206
arch/s390/mm/hugetlbpage.c
Normal file
206
arch/s390/mm/hugetlbpage.c
Normal file
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
* IBM System z Huge TLB Page Support for Kernel.
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
static inline pmd_t __pte_to_pmd(pte_t pte)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
/*
|
||||
* Convert encoding pte bits pmd bits
|
||||
* .IR...wrdytp dy..R...I...wr
|
||||
* empty .10...000000 -> 00..0...1...00
|
||||
* prot-none, clean, old .11...000001 -> 00..1...1...00
|
||||
* prot-none, clean, young .11...000101 -> 01..1...1...00
|
||||
* prot-none, dirty, old .10...001001 -> 10..1...1...00
|
||||
* prot-none, dirty, young .10...001101 -> 11..1...1...00
|
||||
* read-only, clean, old .11...010001 -> 00..1...1...01
|
||||
* read-only, clean, young .01...010101 -> 01..1...0...01
|
||||
* read-only, dirty, old .11...011001 -> 10..1...1...01
|
||||
* read-only, dirty, young .01...011101 -> 11..1...0...01
|
||||
* read-write, clean, old .11...110001 -> 00..0...1...11
|
||||
* read-write, clean, young .01...110101 -> 01..0...0...11
|
||||
* read-write, dirty, old .10...111001 -> 10..0...1...11
|
||||
* read-write, dirty, young .00...111101 -> 11..0...0...11
|
||||
*/
|
||||
if (pte_present(pte)) {
|
||||
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
|
||||
} else
|
||||
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pte_t __pmd_to_pte(pmd_t pmd)
|
||||
{
|
||||
pte_t pte;
|
||||
|
||||
/*
|
||||
* Convert encoding pmd bits pte bits
|
||||
* dy..R...I...wr .IR...wrdytp
|
||||
* empty 00..0...1...00 -> .10...001100
|
||||
* prot-none, clean, old 00..0...1...00 -> .10...000001
|
||||
* prot-none, clean, young 01..0...1...00 -> .10...000101
|
||||
* prot-none, dirty, old 10..0...1...00 -> .10...001001
|
||||
* prot-none, dirty, young 11..0...1...00 -> .10...001101
|
||||
* read-only, clean, old 00..1...1...01 -> .11...010001
|
||||
* read-only, clean, young 01..1...1...01 -> .11...010101
|
||||
* read-only, dirty, old 10..1...1...01 -> .11...011001
|
||||
* read-only, dirty, young 11..1...1...01 -> .11...011101
|
||||
* read-write, clean, old 00..0...1...11 -> .10...110001
|
||||
* read-write, clean, young 01..0...1...11 -> .10...110101
|
||||
* read-write, dirty, old 10..0...1...11 -> .10...111001
|
||||
* read-write, dirty, young 11..0...1...11 -> .10...111101
|
||||
*/
|
||||
if (pmd_present(pmd)) {
|
||||
pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
|
||||
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
|
||||
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
|
||||
pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
|
||||
} else
|
||||
pte_val(pte) = _PAGE_INVALID;
|
||||
return pte;
|
||||
}
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pmd_t pmd;
|
||||
|
||||
pmd = __pte_to_pmd(pte);
|
||||
if (!MACHINE_HAS_HPAGE) {
|
||||
/* Emulated huge ptes loose the dirty and young bit */
|
||||
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) |= pte_page(pte)[1].index;
|
||||
} else
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
|
||||
*(pmd_t *) ptep = pmd;
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
unsigned long origin;
|
||||
pmd_t pmd;
|
||||
|
||||
pmd = *(pmd_t *) ptep;
|
||||
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
|
||||
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) |= *(unsigned long *) origin;
|
||||
/* Emulated huge ptes are young and dirty by definition */
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
|
||||
}
|
||||
return __pmd_to_pte(pmd);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
pte_t pte = huge_ptep_get(ptep);
|
||||
|
||||
pmdp_flush_direct(mm, addr, pmdp);
|
||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
||||
return pte;
|
||||
}
|
||||
|
||||
int arch_prepare_hugepage(struct page *page)
|
||||
{
|
||||
unsigned long addr = page_to_phys(page);
|
||||
pte_t pte;
|
||||
pte_t *ptep;
|
||||
int i;
|
||||
|
||||
if (MACHINE_HAS_HPAGE)
|
||||
return 0;
|
||||
|
||||
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
|
||||
if (!ptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte_val(pte) = addr;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
|
||||
pte_val(pte) += PAGE_SIZE;
|
||||
}
|
||||
page[1].index = (unsigned long) ptep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_release_hugepage(struct page *page)
|
||||
{
|
||||
pte_t *ptep;
|
||||
|
||||
if (MACHINE_HAS_HPAGE)
|
||||
return;
|
||||
|
||||
ptep = (pte_t *) page[1].index;
|
||||
if (!ptep)
|
||||
return;
|
||||
clear_table((unsigned long *) ptep, _PAGE_INVALID,
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
page_table_free(&init_mm, (unsigned long *) ptep);
|
||||
page[1].index = 0;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp = NULL;
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_alloc(mm, pgdp, addr);
|
||||
if (pudp)
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
return (pte_t *) pmdp;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp = NULL;
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
if (pgd_present(*pgdp)) {
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
if (pud_present(*pudp))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
}
|
||||
return (pte_t *) pmdp;
|
||||
}
|
||||
|
||||
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
if (!MACHINE_HAS_HPAGE)
|
||||
return 0;
|
||||
|
||||
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
232
arch/s390/mm/init.c
Normal file
232
arch/s390/mm/init.c
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999
|
||||
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
|
||||
|
||||
unsigned long empty_zero_page, zero_page_mask;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(zero_page_mask);
|
||||
|
||||
static void __init setup_zero_pages(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
unsigned int order;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672: /* g5 */
|
||||
case 0x2064: /* z900 */
|
||||
case 0x2066: /* z900 */
|
||||
case 0x2084: /* z990 */
|
||||
case 0x2086: /* z990 */
|
||||
case 0x2094: /* z9-109 */
|
||||
case 0x2096: /* z9-109 */
|
||||
order = 0;
|
||||
break;
|
||||
case 0x2097: /* z10 */
|
||||
case 0x2098: /* z10 */
|
||||
case 0x2817: /* z196 */
|
||||
case 0x2818: /* z196 */
|
||||
order = 2;
|
||||
break;
|
||||
case 0x2827: /* zEC12 */
|
||||
case 0x2828: /* zEC12 */
|
||||
default:
|
||||
order = 5;
|
||||
break;
|
||||
}
|
||||
/* Limit number of empty zero pages for small memory sizes */
|
||||
if (order > 2 && totalram_pages <= 16384)
|
||||
order = 2;
|
||||
|
||||
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!empty_zero_page)
|
||||
panic("Out of memory in setup_zero_pages");
|
||||
|
||||
page = virt_to_page((void *) empty_zero_page);
|
||||
split_page(page, order);
|
||||
for (i = 1 << order; i > 0; i--) {
|
||||
mark_page_reserved(page);
|
||||
page++;
|
||||
}
|
||||
|
||||
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
unsigned long pgd_type, asce_bits;
|
||||
|
||||
init_mm.pgd = swapper_pg_dir;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (VMALLOC_END > (1UL << 42)) {
|
||||
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
|
||||
pgd_type = _REGION2_ENTRY_EMPTY;
|
||||
} else {
|
||||
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
||||
pgd_type = _REGION3_ENTRY_EMPTY;
|
||||
}
|
||||
#else
|
||||
asce_bits = _ASCE_TABLE_LENGTH;
|
||||
pgd_type = _SEGMENT_ENTRY_EMPTY;
|
||||
#endif
|
||||
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
|
||||
clear_table((unsigned long *) init_mm.pgd, pgd_type,
|
||||
sizeof(unsigned long)*2048);
|
||||
vmem_map_init();
|
||||
|
||||
/* enable virtual mapping in kernel mode */
|
||||
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
||||
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
|
||||
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
|
||||
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
|
||||
|
||||
sparse_memory_present_with_active_regions(MAX_NUMNODES);
|
||||
sparse_init();
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
if (MACHINE_HAS_TLB_LC)
|
||||
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
|
||||
cpumask_set_cpu(0, mm_cpumask(&init_mm));
|
||||
atomic_set(&init_mm.context.attach_count, 1);
|
||||
|
||||
max_mapnr = max_low_pfn;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
/* Setup guest page hinting */
|
||||
cmma_init();
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
setup_zero_pages(); /* Setup zeroed pages. */
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
printk("Write protected kernel read-only data: %#lx - %#lx\n",
|
||||
(unsigned long)&_stext,
|
||||
PFN_ALIGN((unsigned long)&_eshared) - 1);
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int arch_add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long size_pages = PFN_DOWN(size);
|
||||
struct zone *zone;
|
||||
int rc;
|
||||
|
||||
rc = vmem_add_mapping(start, size);
|
||||
if (rc)
|
||||
return rc;
|
||||
for_each_zone(zone) {
|
||||
if (zone_idx(zone) != ZONE_MOVABLE) {
|
||||
/* Add range within existing zone limits */
|
||||
zone_start_pfn = zone->zone_start_pfn;
|
||||
zone_end_pfn = zone->zone_start_pfn +
|
||||
zone->spanned_pages;
|
||||
} else {
|
||||
/* Add remaining range to ZONE_MOVABLE */
|
||||
zone_start_pfn = start_pfn;
|
||||
zone_end_pfn = start_pfn + size_pages;
|
||||
}
|
||||
if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
|
||||
continue;
|
||||
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
|
||||
zone_end_pfn - start_pfn : size_pages;
|
||||
rc = __add_pages(nid, zone, start_pfn, nr_pages);
|
||||
if (rc)
|
||||
break;
|
||||
start_pfn += nr_pages;
|
||||
size_pages -= nr_pages;
|
||||
if (!size_pages)
|
||||
break;
|
||||
}
|
||||
if (rc)
|
||||
vmem_remove_mapping(start, size);
|
||||
return rc;
|
||||
}
|
||||
|
||||
unsigned long memory_block_size_bytes(void)
|
||||
{
|
||||
/*
|
||||
* Make sure the memory block size is always greater
|
||||
* or equal than the memory increment size.
|
||||
*/
|
||||
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size)
|
||||
{
|
||||
/*
|
||||
* There is no hardware or firmware interface which could trigger a
|
||||
* hot memory remove on s390. So there is nothing that needs to be
|
||||
* implemented.
|
||||
*/
|
||||
return -EBUSY;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
204
arch/s390/mm/maccess.c
Normal file
204
arch/s390/mm/maccess.c
Normal file
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Access kernel memory without faulting -- s390 specific implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2009
|
||||
*
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* This function writes to kernel memory bypassing DAT and possible
|
||||
* write protection. It copies one to four bytes from src to dst
|
||||
* using the stura instruction.
|
||||
* Returns the number of bytes copied or -EFAULT.
|
||||
*/
|
||||
static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
|
||||
{
|
||||
unsigned long count, aligned;
|
||||
int offset, mask;
|
||||
int rc = -EFAULT;
|
||||
|
||||
aligned = (unsigned long) dst & ~3UL;
|
||||
offset = (unsigned long) dst & 3;
|
||||
count = min_t(unsigned long, 4 - offset, size);
|
||||
mask = (0xf << (4 - count)) & 0xf;
|
||||
mask >>= offset;
|
||||
asm volatile(
|
||||
" bras 1,0f\n"
|
||||
" icm 0,0,0(%3)\n"
|
||||
"0: l 0,0(%1)\n"
|
||||
" lra %1,0(%1)\n"
|
||||
"1: ex %2,0(1)\n"
|
||||
"2: stura 0,%1\n"
|
||||
" la %0,0\n"
|
||||
"3:\n"
|
||||
EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
|
||||
: "+d" (rc), "+a" (aligned)
|
||||
: "a" (mask), "a" (src) : "cc", "memory", "0", "1");
|
||||
return rc ? rc : count;
|
||||
}
|
||||
|
||||
long probe_kernel_write(void *dst, const void *src, size_t size)
|
||||
{
|
||||
long copied = 0;
|
||||
|
||||
while (size) {
|
||||
copied = probe_kernel_write_odd(dst, src, size);
|
||||
if (copied < 0)
|
||||
break;
|
||||
dst += copied;
|
||||
src += copied;
|
||||
size -= copied;
|
||||
}
|
||||
return copied < 0 ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int __memcpy_real(void *dest, void *src, size_t count)
|
||||
{
|
||||
register unsigned long _dest asm("2") = (unsigned long) dest;
|
||||
register unsigned long _len1 asm("3") = (unsigned long) count;
|
||||
register unsigned long _src asm("4") = (unsigned long) src;
|
||||
register unsigned long _len2 asm("5") = (unsigned long) count;
|
||||
int rc = -EFAULT;
|
||||
|
||||
asm volatile (
|
||||
"0: mvcle %1,%2,0x0\n"
|
||||
"1: jo 0b\n"
|
||||
" lhi %0,0x0\n"
|
||||
"2:\n"
|
||||
EX_TABLE(1b,2b)
|
||||
: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
|
||||
"+d" (_len2), "=m" (*((long *) dest))
|
||||
: "m" (*((long *) src))
|
||||
: "cc", "memory");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy memory in real mode (kernel to kernel)
|
||||
*/
|
||||
int memcpy_real(void *dest, void *src, size_t count)
|
||||
{
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
local_irq_save(flags);
|
||||
__arch_local_irq_stnsm(0xfbUL);
|
||||
rc = __memcpy_real(dest, src, count);
|
||||
local_irq_restore(flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy memory in absolute mode (kernel to kernel)
|
||||
*/
|
||||
void memcpy_absolute(void *dest, void *src, size_t count)
|
||||
{
|
||||
unsigned long cr0, flags, prefix;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_clear_bit(0, 28); /* disable lowcore protection */
|
||||
prefix = store_prefix();
|
||||
if (prefix) {
|
||||
local_mcck_disable();
|
||||
set_prefix(0);
|
||||
memcpy(dest, src, count);
|
||||
set_prefix(prefix);
|
||||
local_mcck_enable();
|
||||
} else {
|
||||
memcpy(dest, src, count);
|
||||
}
|
||||
__ctl_load(cr0, 0, 0);
|
||||
arch_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy memory from kernel (real) to user (virtual)
|
||||
*/
|
||||
int copy_to_user_real(void __user *dest, void *src, unsigned long count)
|
||||
{
|
||||
int offs = 0, size, rc;
|
||||
char *buf;
|
||||
|
||||
buf = (char *) __get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
rc = -EFAULT;
|
||||
while (offs < count) {
|
||||
size = min(PAGE_SIZE, count - offs);
|
||||
if (memcpy_real(buf, src + offs, size))
|
||||
goto out;
|
||||
if (copy_to_user(dest + offs, buf, size))
|
||||
goto out;
|
||||
offs += size;
|
||||
}
|
||||
rc = 0;
|
||||
out:
|
||||
free_page((unsigned long) buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if physical address is within prefix or zero page
|
||||
*/
|
||||
static int is_swapped(unsigned long addr)
|
||||
{
|
||||
unsigned long lc;
|
||||
int cpu;
|
||||
|
||||
if (addr < sizeof(struct _lowcore))
|
||||
return 1;
|
||||
for_each_online_cpu(cpu) {
|
||||
lc = (unsigned long) lowcore_ptr[cpu];
|
||||
if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
|
||||
continue;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a physical pointer for /dev/mem access
|
||||
*
|
||||
* For swapped prefix pages a new buffer is returned that contains a copy of
|
||||
* the absolute memory. The buffer size is maximum one page large.
|
||||
*/
|
||||
void *xlate_dev_mem_ptr(unsigned long addr)
|
||||
{
|
||||
void *bounce = (void *) addr;
|
||||
unsigned long size;
|
||||
|
||||
get_online_cpus();
|
||||
preempt_disable();
|
||||
if (is_swapped(addr)) {
|
||||
size = PAGE_SIZE - (addr & ~PAGE_MASK);
|
||||
bounce = (void *) __get_free_page(GFP_ATOMIC);
|
||||
if (bounce)
|
||||
memcpy_absolute(bounce, (void *) addr, size);
|
||||
}
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
return bounce;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free converted buffer for /dev/mem access (if necessary)
|
||||
*/
|
||||
void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
|
||||
{
|
||||
if ((void *) addr != buf)
|
||||
free_page((unsigned long) buf);
|
||||
}
|
65
arch/s390/mm/mem_detect.c
Normal file
65
arch/s390/mm/mem_detect.c
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2008, 2009
|
||||
*
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#define ADDR2G (1ULL << 31)
|
||||
|
||||
#define CHUNK_READ_WRITE 0
|
||||
#define CHUNK_READ_ONLY 1
|
||||
|
||||
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
|
||||
{
|
||||
memblock_add_range(&memblock.memory, start, size, 0, 0);
|
||||
memblock_add_range(&memblock.physmem, start, size, 0, 0);
|
||||
}
|
||||
|
||||
void __init detect_memory_memblock(void)
|
||||
{
|
||||
unsigned long long memsize, rnmax, rzm;
|
||||
unsigned long addr, size;
|
||||
int type;
|
||||
|
||||
rzm = sclp_get_rzm();
|
||||
rnmax = sclp_get_rnmax();
|
||||
memsize = rzm * rnmax;
|
||||
if (!rzm)
|
||||
rzm = 1ULL << 17;
|
||||
if (IS_ENABLED(CONFIG_32BIT)) {
|
||||
rzm = min(ADDR2G, rzm);
|
||||
memsize = min(ADDR2G, memsize);
|
||||
}
|
||||
max_physmem_end = memsize;
|
||||
addr = 0;
|
||||
/* keep memblock lists close to the kernel */
|
||||
memblock_set_bottom_up(true);
|
||||
do {
|
||||
size = 0;
|
||||
type = tprot(addr);
|
||||
do {
|
||||
size += rzm;
|
||||
if (max_physmem_end && addr + size >= max_physmem_end)
|
||||
break;
|
||||
} while (type == tprot(addr + size));
|
||||
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
|
||||
if (max_physmem_end && (addr + size > max_physmem_end))
|
||||
size = max_physmem_end - addr;
|
||||
memblock_physmem_add(addr, size);
|
||||
}
|
||||
addr += size;
|
||||
} while (addr < max_physmem_end);
|
||||
memblock_set_bottom_up(false);
|
||||
if (!max_physmem_end)
|
||||
max_physmem_end = memblock_end_of_DRAM();
|
||||
}
|
180
arch/s390/mm/mmap.c
Normal file
180
arch/s390/mm/mmap.c
Normal file
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
* flexible mmap layout support
|
||||
*
|
||||
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*
|
||||
* Started by Ingo Molnar <mingo@elte.hu>
|
||||
*/
|
||||
|
||||
#include <linux/personality.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/compat.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
if (current->personality & ADDR_NO_RANDOMIZE)
|
||||
return 0;
|
||||
return STACK_RND_MASK << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Top of mmap area (just below the process stack).
|
||||
*
|
||||
* Leave at least a ~32 MB hole.
|
||||
*/
|
||||
#define MIN_GAP (32*1024*1024)
|
||||
#define MAX_GAP (STACK_TOP/6*5)
|
||||
|
||||
static inline int mmap_is_legacy(void)
|
||||
{
|
||||
if (current->personality & ADDR_COMPAT_LAYOUT)
|
||||
return 1;
|
||||
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
|
||||
return 1;
|
||||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
static unsigned long mmap_rnd(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
/* 8MB randomization for mmap_base */
|
||||
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(void)
|
||||
{
|
||||
return TASK_UNMAPPED_BASE + mmap_rnd();
|
||||
}
|
||||
|
||||
static inline unsigned long mmap_base(void)
|
||||
{
|
||||
unsigned long gap = rlimit(RLIMIT_STACK);
|
||||
|
||||
if (gap < MIN_GAP)
|
||||
gap = MIN_GAP;
|
||||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
gap &= PAGE_MASK;
|
||||
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
|
||||
/*
|
||||
* This function, called very early during the creation of a new
|
||||
* process VM image, sets up which VM layout function to use:
|
||||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* Fall back to the standard layout if the personality
|
||||
* bit is set, or if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_base_legacy();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
|
||||
{
|
||||
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
|
||||
return 0;
|
||||
if (!(flags & MAP_FIXED))
|
||||
addr = 0;
|
||||
if ((addr + len) >= TASK_SIZE)
|
||||
return crst_table_upgrade(current->mm, 1UL << 53);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
s390_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long area;
|
||||
int rc;
|
||||
|
||||
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
|
||||
if (!(area & ~PAGE_MASK))
|
||||
return area;
|
||||
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
|
||||
/* Upgrade the page table to 4 levels and retry. */
|
||||
rc = crst_table_upgrade(mm, 1UL << 53);
|
||||
if (rc)
|
||||
return (unsigned long) rc;
|
||||
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
|
||||
}
|
||||
return area;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
|
||||
const unsigned long len, const unsigned long pgoff,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long area;
|
||||
int rc;
|
||||
|
||||
area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
|
||||
if (!(area & ~PAGE_MASK))
|
||||
return area;
|
||||
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
|
||||
/* Upgrade the page table to 4 levels and retry. */
|
||||
rc = crst_table_upgrade(mm, 1UL << 53);
|
||||
if (rc)
|
||||
return (unsigned long) rc;
|
||||
area = arch_get_unmapped_area_topdown(filp, addr, len,
|
||||
pgoff, flags);
|
||||
}
|
||||
return area;
|
||||
}
|
||||
/*
|
||||
* This function, called very early during the creation of a new
|
||||
* process VM image, sets up which VM layout function to use:
|
||||
*/
|
||||
void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* Fall back to the standard layout if the personality
|
||||
* bit is set, or if the expected stack growth is unlimited:
|
||||
*/
|
||||
if (mmap_is_legacy()) {
|
||||
mm->mmap_base = mmap_base_legacy();
|
||||
mm->get_unmapped_area = s390_get_unmapped_area;
|
||||
} else {
|
||||
mm->mmap_base = mmap_base();
|
||||
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
114
arch/s390/mm/page-states.c
Normal file
114
arch/s390/mm/page-states.c
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* Guest page hinting for unused pages.
|
||||
*
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#define ESSA_SET_STABLE 1
|
||||
#define ESSA_SET_UNUSED 2
|
||||
|
||||
static int cmma_flag = 1;
|
||||
|
||||
static int __init cmma(char *str)
|
||||
{
|
||||
char *parm;
|
||||
|
||||
parm = strstrip(str);
|
||||
if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
|
||||
cmma_flag = 1;
|
||||
return 1;
|
||||
}
|
||||
cmma_flag = 0;
|
||||
if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
__setup("cmma=", cmma);
|
||||
|
||||
void __init cmma_init(void)
|
||||
{
|
||||
register unsigned long tmp asm("0") = 0;
|
||||
register int rc asm("1") = -EOPNOTSUPP;
|
||||
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
asm volatile(
|
||||
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
|
||||
"0: la %0,0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+&d" (rc), "+&d" (tmp));
|
||||
if (rc)
|
||||
cmma_flag = 0;
|
||||
}
|
||||
|
||||
static inline void set_page_unstable(struct page *page, int order)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
|
||||
: "=&d" (rc)
|
||||
: "a" (page_to_phys(page + i)),
|
||||
"i" (ESSA_SET_UNUSED));
|
||||
}
|
||||
|
||||
void arch_free_page(struct page *page, int order)
|
||||
{
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
set_page_unstable(page, order);
|
||||
}
|
||||
|
||||
static inline void set_page_stable(struct page *page, int order)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
|
||||
: "=&d" (rc)
|
||||
: "a" (page_to_phys(page + i)),
|
||||
"i" (ESSA_SET_STABLE));
|
||||
}
|
||||
|
||||
void arch_alloc_page(struct page *page, int order)
|
||||
{
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
set_page_stable(page, order);
|
||||
}
|
||||
|
||||
void arch_set_page_states(int make_stable)
|
||||
{
|
||||
unsigned long flags, order, t;
|
||||
struct list_head *l;
|
||||
struct page *page;
|
||||
struct zone *zone;
|
||||
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
if (make_stable)
|
||||
drain_local_pages(NULL);
|
||||
for_each_populated_zone(zone) {
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for_each_migratetype_order(order, t) {
|
||||
list_for_each(l, &zone->free_area[order].free_list[t]) {
|
||||
page = list_entry(l, struct page, lru);
|
||||
if (make_stable)
|
||||
set_page_stable(page, order);
|
||||
else
|
||||
set_page_unstable(page, order);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
}
|
||||
}
|
170
arch/s390/mm/pageattr.c
Normal file
170
arch/s390/mm/pageattr.c
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2011
|
||||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if PAGE_DEFAULT_KEY
|
||||
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
|
||||
{
|
||||
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
|
||||
: [addr] "+a" (addr) : [skey] "d" (skey));
|
||||
return addr;
|
||||
}
|
||||
|
||||
void __storage_key_init_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long boundary, size;
|
||||
|
||||
while (start < end) {
|
||||
if (MACHINE_HAS_EDAT1) {
|
||||
/* set storage keys for a 1MB frame */
|
||||
size = 1UL << 20;
|
||||
boundary = (start + size) & ~(size - 1);
|
||||
if (boundary <= end) {
|
||||
do {
|
||||
start = sske_frame(start, PAGE_DEFAULT_KEY);
|
||||
} while (start < boundary);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static pte_t *walk_page_table(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgdp))
|
||||
return NULL;
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
if (pud_none(*pudp) || pud_large(*pudp))
|
||||
return NULL;
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if (pmd_none(*pmdp) || pmd_large(*pmdp))
|
||||
return NULL;
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
if (pte_none(*ptep))
|
||||
return NULL;
|
||||
return ptep;
|
||||
}
|
||||
|
||||
static void change_page_attr(unsigned long addr, int numpages,
|
||||
pte_t (*set) (pte_t))
|
||||
{
|
||||
pte_t *ptep, pte;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < numpages; i++) {
|
||||
ptep = walk_page_table(addr);
|
||||
if (WARN_ON_ONCE(!ptep))
|
||||
break;
|
||||
pte = *ptep;
|
||||
pte = set(pte);
|
||||
__ptep_ipte(addr, ptep);
|
||||
*ptep = pte;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
change_page_attr(addr, numpages, pte_wrprotect);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
change_page_attr(addr, numpages, pte_mkwrite);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* not possible */
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
|
||||
static void ipte_range(pte_t *pte, unsigned long address, int nr)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
|
||||
__ptep_ipte_range(address, nr - 1, pte);
|
||||
return;
|
||||
}
|
||||
for (i = 0; i < nr; i++) {
|
||||
__ptep_ipte(address, pte);
|
||||
address += PAGE_SIZE;
|
||||
pte++;
|
||||
}
|
||||
}
|
||||
|
||||
void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
unsigned long address;
|
||||
int nr, i, j;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
for (i = 0; i < numpages;) {
|
||||
address = page_to_phys(page + i);
|
||||
pgd = pgd_offset_k(address);
|
||||
pud = pud_offset(pgd, address);
|
||||
pmd = pmd_offset(pud, address);
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
nr = (unsigned long)pte >> ilog2(sizeof(long));
|
||||
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
|
||||
nr = min(numpages - i, nr);
|
||||
if (enable) {
|
||||
for (j = 0; j < nr; j++) {
|
||||
pte_val(*pte) = __pa(address);
|
||||
address += PAGE_SIZE;
|
||||
pte++;
|
||||
}
|
||||
} else {
|
||||
ipte_range(pte, address, nr);
|
||||
}
|
||||
i += nr;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
unsigned long addr;
|
||||
int cc;
|
||||
|
||||
addr = page_to_phys(page);
|
||||
asm volatile(
|
||||
" lra %1,0(%1)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (cc), "+a" (addr) : : "cc");
|
||||
return cc == 0;
|
||||
}
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
1457
arch/s390/mm/pgtable.c
Normal file
1457
arch/s390/mm/pgtable.c
Normal file
File diff suppressed because it is too large
Load diff
422
arch/s390/mm/vmem.c
Normal file
422
arch/s390/mm/vmem.c
Normal file
|
@ -0,0 +1,422 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static DEFINE_MUTEX(vmem_mutex);
|
||||
|
||||
struct memory_segment {
|
||||
struct list_head list;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
static LIST_HEAD(mem_segs);
|
||||
|
||||
static void __ref *vmem_alloc_pages(unsigned int order)
|
||||
{
|
||||
if (slab_is_available())
|
||||
return (void *)__get_free_pages(GFP_KERNEL, order);
|
||||
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline pud_t *vmem_pud_alloc(void)
|
||||
{
|
||||
pud_t *pud = NULL;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
pud = vmem_alloc_pages(2);
|
||||
if (!pud)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
|
||||
#endif
|
||||
return pud;
|
||||
}
|
||||
|
||||
static inline pmd_t *vmem_pmd_alloc(void)
|
||||
{
|
||||
pmd_t *pmd = NULL;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
pmd = vmem_alloc_pages(2);
|
||||
if (!pmd)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
|
||||
#endif
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static pte_t __ref *vmem_pte_alloc(unsigned long address)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
if (slab_is_available())
|
||||
pte = (pte_t *) page_table_alloc(&init_mm);
|
||||
else
|
||||
pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
if (!pte)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pte, _PAGE_INVALID,
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a physical memory range to the 1:1 mapping.
|
||||
*/
|
||||
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
|
||||
{
|
||||
unsigned long end = start + size;
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
pud_t *pu_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
pu_dir = vmem_pud_alloc();
|
||||
if (!pu_dir)
|
||||
goto out;
|
||||
pgd_populate(&init_mm, pg_dir, pu_dir);
|
||||
}
|
||||
pu_dir = pud_offset(pg_dir, address);
|
||||
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
|
||||
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
|
||||
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
|
||||
pud_val(*pu_dir) = __pa(address) |
|
||||
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
|
||||
(ro ? _REGION_ENTRY_PROTECT : 0);
|
||||
address += PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
if (pud_none(*pu_dir)) {
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pu_dir, pm_dir);
|
||||
}
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
|
||||
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
|
||||
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
|
||||
pmd_val(*pm_dir) = __pa(address) |
|
||||
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
|
||||
_SEGMENT_ENTRY_YOUNG |
|
||||
(ro ? _SEGMENT_ENTRY_PROTECT : 0);
|
||||
address += PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
if (pmd_none(*pm_dir)) {
|
||||
pt_dir = vmem_pte_alloc(address);
|
||||
if (!pt_dir)
|
||||
goto out;
|
||||
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
pte_val(*pt_dir) = __pa(address) |
|
||||
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a physical memory range from the 1:1 mapping.
|
||||
* Currently only invalidates page table entries.
|
||||
*/
|
||||
static void vmem_remove_range(unsigned long start, unsigned long size)
|
||||
{
|
||||
unsigned long end = start + size;
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
pud_t *pu_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
pte_t pte;
|
||||
|
||||
pte_val(pte) = _PAGE_INVALID;
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
address += PGDIR_SIZE;
|
||||
continue;
|
||||
}
|
||||
pu_dir = pud_offset(pg_dir, address);
|
||||
if (pud_none(*pu_dir)) {
|
||||
address += PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
if (pud_large(*pu_dir)) {
|
||||
pud_clear(pu_dir);
|
||||
address += PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
address += PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
if (pmd_large(*pm_dir)) {
|
||||
pmd_clear(pm_dir);
|
||||
address += PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
*pt_dir = pte;
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a backed mem_map array to the virtual mem_map array.
|
||||
*/
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
|
||||
{
|
||||
unsigned long address = start;
|
||||
pgd_t *pg_dir;
|
||||
pud_t *pu_dir;
|
||||
pmd_t *pm_dir;
|
||||
pte_t *pt_dir;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
for (address = start; address < end;) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
pu_dir = vmem_pud_alloc();
|
||||
if (!pu_dir)
|
||||
goto out;
|
||||
pgd_populate(&init_mm, pg_dir, pu_dir);
|
||||
}
|
||||
|
||||
pu_dir = pud_offset(pg_dir, address);
|
||||
if (pud_none(*pu_dir)) {
|
||||
pm_dir = vmem_pmd_alloc();
|
||||
if (!pm_dir)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pu_dir, pm_dir);
|
||||
}
|
||||
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Use 1MB frames for vmemmap if available. We always
|
||||
* use large frames even if they are only partially
|
||||
* used.
|
||||
* Otherwise we would have also page tables since
|
||||
* vmemmap_populate gets called for each section
|
||||
* separately. */
|
||||
if (MACHINE_HAS_EDAT1) {
|
||||
void *new_page;
|
||||
|
||||
new_page = vmemmap_alloc_block(PMD_SIZE, node);
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pmd_val(*pm_dir) = __pa(new_page) |
|
||||
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
pt_dir = vmem_pte_alloc(address);
|
||||
if (!pt_dir)
|
||||
goto out;
|
||||
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||
} else if (pmd_large(*pm_dir)) {
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
pt_dir = pte_offset_kernel(pm_dir, address);
|
||||
if (pte_none(*pt_dir)) {
|
||||
void *new_page;
|
||||
|
||||
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pte_val(*pt_dir) =
|
||||
__pa(new_page) | pgprot_val(PAGE_KERNEL);
|
||||
}
|
||||
address += PAGE_SIZE;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmemmap_free(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Add memory segment to the segment list if it doesn't overlap with
|
||||
* an already present segment.
|
||||
*/
|
||||
static int insert_memory_segment(struct memory_segment *seg)
|
||||
{
|
||||
struct memory_segment *tmp;
|
||||
|
||||
if (seg->start + seg->size > VMEM_MAX_PHYS ||
|
||||
seg->start + seg->size < seg->start)
|
||||
return -ERANGE;
|
||||
|
||||
list_for_each_entry(tmp, &mem_segs, list) {
|
||||
if (seg->start >= tmp->start + tmp->size)
|
||||
continue;
|
||||
if (seg->start + seg->size <= tmp->start)
|
||||
continue;
|
||||
return -ENOSPC;
|
||||
}
|
||||
list_add(&seg->list, &mem_segs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove memory segment from the segment list.
|
||||
*/
|
||||
static void remove_memory_segment(struct memory_segment *seg)
|
||||
{
|
||||
list_del(&seg->list);
|
||||
}
|
||||
|
||||
static void __remove_shared_memory(struct memory_segment *seg)
|
||||
{
|
||||
remove_memory_segment(seg);
|
||||
vmem_remove_range(seg->start, seg->size);
|
||||
}
|
||||
|
||||
int vmem_remove_mapping(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct memory_segment *seg;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
|
||||
ret = -ENOENT;
|
||||
list_for_each_entry(seg, &mem_segs, list) {
|
||||
if (seg->start == start && seg->size == size)
|
||||
break;
|
||||
}
|
||||
|
||||
if (seg->start != start || seg->size != size)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
__remove_shared_memory(seg);
|
||||
kfree(seg);
|
||||
out:
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmem_add_mapping(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct memory_segment *seg;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
ret = -ENOMEM;
|
||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||
if (!seg)
|
||||
goto out;
|
||||
seg->start = start;
|
||||
seg->size = size;
|
||||
|
||||
ret = insert_memory_segment(seg);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = vmem_add_mem(start, size, 0);
|
||||
if (ret)
|
||||
goto out_remove;
|
||||
goto out;
|
||||
|
||||
out_remove:
|
||||
__remove_shared_memory(seg);
|
||||
out_free:
|
||||
kfree(seg);
|
||||
out:
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* map whole physical memory to virtual memory (identity mapping)
|
||||
* we reserve enough space in the vmalloc area for vmemmap to hotplug
|
||||
* additional memory segments.
|
||||
*/
|
||||
void __init vmem_map_init(void)
|
||||
{
|
||||
unsigned long ro_start, ro_end;
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t start, end;
|
||||
|
||||
ro_start = PFN_ALIGN((unsigned long)&_stext);
|
||||
ro_end = (unsigned long)&_eshared & PAGE_MASK;
|
||||
for_each_memblock(memory, reg) {
|
||||
start = reg->base;
|
||||
end = reg->base + reg->size - 1;
|
||||
if (start >= ro_end || end <= ro_start)
|
||||
vmem_add_mem(start, end - start, 0);
|
||||
else if (start >= ro_start && end <= ro_end)
|
||||
vmem_add_mem(start, end - start, 1);
|
||||
else if (start >= ro_start) {
|
||||
vmem_add_mem(start, ro_end - start, 1);
|
||||
vmem_add_mem(ro_end, end - ro_end, 0);
|
||||
} else if (end < ro_end) {
|
||||
vmem_add_mem(start, ro_start - start, 0);
|
||||
vmem_add_mem(ro_start, end - ro_start, 1);
|
||||
} else {
|
||||
vmem_add_mem(start, ro_start - start, 0);
|
||||
vmem_add_mem(ro_start, ro_end - ro_start, 1);
|
||||
vmem_add_mem(ro_end, end - ro_end, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert memblock.memory to a memory segment list so there is a single
|
||||
* list that contains all memory segments.
|
||||
*/
|
||||
static int __init vmem_convert_memory_chunk(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
struct memory_segment *seg;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
for_each_memblock(memory, reg) {
|
||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||
if (!seg)
|
||||
panic("Out of memory...\n");
|
||||
seg->start = reg->base;
|
||||
seg->size = reg->size;
|
||||
insert_memory_segment(seg);
|
||||
}
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(vmem_convert_memory_chunk);
|
Loading…
Add table
Add a link
Reference in a new issue