Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,4 @@
obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o

View file

@ -0,0 +1,236 @@
/*
* Copyright (C) 2013 Intel Corporation; author Matt Fleming
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*/
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/font.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <asm/setup.h>
static const struct font_desc *font;
static u32 efi_x, efi_y;
static void *efi_fb;
static bool early_efi_keep;
/*
* efi earlyprintk need use early_ioremap to map the framebuffer.
* But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
* be used instead. ioremap will be available after paging_init() which is
* earlier than initcall callbacks. Thus adding this early initcall function
* early_efi_map_fb to map the whole efi framebuffer.
*/
static __init int early_efi_map_fb(void)
{
unsigned long base, size;
if (!early_efi_keep)
return 0;
base = boot_params.screen_info.lfb_base;
size = boot_params.screen_info.lfb_size;
efi_fb = ioremap(base, size);
return efi_fb ? 0 : -ENOMEM;
}
early_initcall(early_efi_map_fb);
/*
* early_efi_map maps efi framebuffer region [start, start + len -1]
* In case earlyprintk=efi,keep we have the whole framebuffer mapped already
* so just return the offset efi_fb + start.
*/
static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
{
unsigned long base;
base = boot_params.screen_info.lfb_base;
if (efi_fb)
return (efi_fb + start);
else
return early_ioremap(base + start, len);
}
static __init_refok void early_efi_unmap(void *addr, unsigned long len)
{
if (!efi_fb)
early_iounmap(addr, len);
}
static void early_efi_clear_scanline(unsigned int y)
{
unsigned long *dst;
u16 len;
len = boot_params.screen_info.lfb_linelength;
dst = early_efi_map(y*len, len);
if (!dst)
return;
memset(dst, 0, len);
early_efi_unmap(dst, len);
}
static void early_efi_scroll_up(void)
{
unsigned long *dst, *src;
u16 len;
u32 i, height;
len = boot_params.screen_info.lfb_linelength;
height = boot_params.screen_info.lfb_height;
for (i = 0; i < height - font->height; i++) {
dst = early_efi_map(i*len, len);
if (!dst)
return;
src = early_efi_map((i + font->height) * len, len);
if (!src) {
early_efi_unmap(dst, len);
return;
}
memmove(dst, src, len);
early_efi_unmap(src, len);
early_efi_unmap(dst, len);
}
}
static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
{
const u32 color_black = 0x00000000;
const u32 color_white = 0x00ffffff;
const u8 *src;
u8 s8;
int m;
src = font->data + c * font->height;
s8 = *(src + h);
for (m = 0; m < 8; m++) {
if ((s8 >> (7 - m)) & 1)
*dst = color_white;
else
*dst = color_black;
dst++;
}
}
static void
early_efi_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
unsigned int len;
const char *s;
void *dst;
si = &boot_params.screen_info;
len = si->lfb_linelength;
while (num) {
unsigned int linemax;
unsigned int h, count = 0;
for (s = str; *s && *s != '\n'; s++) {
if (count == num)
break;
count++;
}
linemax = (si->lfb_width - efi_x) / font->width;
if (count > linemax)
count = linemax;
for (h = 0; h < font->height; h++) {
unsigned int n, x;
dst = early_efi_map((efi_y + h) * len, len);
if (!dst)
return;
s = str;
n = count;
x = efi_x;
while (n-- > 0) {
early_efi_write_char(dst + x*4, *s, h);
x += font->width;
s++;
}
early_efi_unmap(dst, len);
}
num -= count;
efi_x += count * font->width;
str += count;
if (num > 0 && *s == '\n') {
efi_x = 0;
efi_y += font->height;
str++;
num--;
}
if (efi_x >= si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}
if (efi_y + font->height > si->lfb_height) {
u32 i;
efi_y -= font->height;
early_efi_scroll_up();
for (i = 0; i < font->height; i++)
early_efi_clear_scanline(efi_y + i);
}
}
}
static __init int early_efi_setup(struct console *con, char *options)
{
struct screen_info *si;
u16 xres, yres;
u32 i;
si = &boot_params.screen_info;
xres = si->lfb_width;
yres = si->lfb_height;
/*
* early_efi_write_char() implicitly assumes a framebuffer with
* 32-bits per pixel.
*/
if (si->lfb_depth != 32)
return -ENODEV;
font = get_default_font(xres, yres, -1, -1);
if (!font)
return -ENODEV;
efi_y = rounddown(yres, font->height) - font->height;
for (i = 0; i < (yres - efi_y) / font->height; i++)
early_efi_scroll_up();
/* early_console_register will unset CON_BOOT in case ,keep */
if (!(con->flags & CON_BOOT))
early_efi_keep = true;
return 0;
}
struct console early_efi_console = {
.name = "earlyefi",
.write = early_efi_write,
.setup = early_efi_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};

View file

@ -0,0 +1,105 @@
/*
* Copyright 2012 Intel Corporation
* Author: Josh Triplett <josh@joshtriplett.org>
*
* Based on the bgrt driver:
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
* Author: Matthew Garrett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
struct acpi_table_bgrt *bgrt_tab;
void *__initdata bgrt_image;
size_t __initdata bgrt_image_size;
struct bmp_header {
u16 id;
u32 size;
} __packed;
void __init efi_bgrt_init(void)
{
acpi_status status;
void __iomem *image;
bool ioremapped = false;
struct bmp_header bmp_header;
if (acpi_disabled)
return;
status = acpi_get_table("BGRT", 0,
(struct acpi_table_header **)&bgrt_tab);
if (ACPI_FAILURE(status))
return;
if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
bgrt_tab->header.length, sizeof(*bgrt_tab));
return;
}
if (bgrt_tab->version != 1) {
pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
bgrt_tab->version);
return;
}
if (bgrt_tab->status != 1) {
pr_err("Ignoring BGRT: invalid status %u (expected 1)\n",
bgrt_tab->status);
return;
}
if (bgrt_tab->image_type != 0) {
pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt_tab->image_type);
return;
}
if (!bgrt_tab->image_address) {
pr_err("Ignoring BGRT: null image address\n");
return;
}
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) {
image = early_memremap(bgrt_tab->image_address,
sizeof(bmp_header));
ioremapped = true;
if (!image) {
pr_err("Ignoring BGRT: failed to map image header memory\n");
return;
}
}
memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
if (ioremapped)
early_iounmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
if (!bgrt_image) {
pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
bgrt_image_size);
return;
}
if (ioremapped) {
image = early_memremap(bgrt_tab->image_address,
bmp_header.size);
if (!image) {
pr_err("Ignoring BGRT: failed to map image memory\n");
kfree(bgrt_image);
bgrt_image = NULL;
return;
}
}
memcpy_fromio(bgrt_image, image, bgrt_image_size);
if (ioremapped)
early_iounmap(image, bmp_header.size);
}

945
arch/x86/platform/efi/efi.c Normal file
View file

@ -0,0 +1,945 @@
/*
* Common EFI (Extensible Firmware Interface) support functions
* Based on Extensible Firmware Interface Specification version 1.0
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2002 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2005-2008 Intel Co.
* Fenghua Yu <fenghua.yu@intel.com>
* Bibo Mao <bibo.mao@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
* Huang Ying <ying.huang@intel.com>
* Copyright (C) 2013 SuSE Labs
* Borislav Petkov <bp@suse.de> - runtime services VA mapping
*
* Copied from efi_32.c to eliminate the duplicated code between EFI
* 32/64 support code. --ying 2007-10-26
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
* in a future version. --drummond 1999-07-20
*
* Implemented EFI runtime services and virtual mode calls. --davidm
*
* Goutham Rao: <goutham.rao@intel.com>
* Skip non-WB memory and ignore empty memory ranges.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/export.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>
#include <asm/setup.h>
#include <asm/efi.h>
#include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/rtc.h>
#include <asm/uv/uv.h>
#define EFI_DEBUG
struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
static efi_config_table_type_t arch_tables[] __initdata = {
#ifdef CONFIG_X86_UV
{UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
#endif
{NULL_GUID, NULL, NULL},
};
u64 efi_setup; /* efi setup_data physical address */
static int add_efi_memmap __initdata;
static int __init setup_add_efi_memmap(char *arg)
{
add_efi_memmap = 1;
return 0;
}
early_param("add_efi_memmap", setup_add_efi_memmap);
static efi_status_t __init phys_efi_set_virtual_address_map(
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map)
{
efi_status_t status;
efi_call_phys_prolog();
status = efi_call_phys(efi_phys.set_virtual_address_map,
memory_map_size, descriptor_size,
descriptor_version, virtual_map);
efi_call_phys_epilog();
return status;
}
void efi_get_time(struct timespec *now)
{
efi_status_t status;
efi_time_t eft;
efi_time_cap_t cap;
status = efi.get_time(&eft, &cap);
if (status != EFI_SUCCESS)
pr_err("Oops: efitime: can't read time!\n");
now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
eft.minute, eft.second);
now->tv_nsec = 0;
}
/*
* Tell the kernel about the EFI memory map. This might include
* more than the max 128 entries that can fit in the e820 legacy
* (zeropage) memory map.
*/
static void __init do_add_efi_memmap(void)
{
void *p;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
int e820_type;
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
if (md->attribute & EFI_MEMORY_WB)
e820_type = E820_RAM;
else
e820_type = E820_RESERVED;
break;
case EFI_ACPI_RECLAIM_MEMORY:
e820_type = E820_ACPI;
break;
case EFI_ACPI_MEMORY_NVS:
e820_type = E820_NVS;
break;
case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE;
break;
default:
/*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
* EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
* EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
*/
e820_type = E820_RESERVED;
break;
}
e820_add_region(start, size, e820_type);
}
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
int __init efi_memblock_x86_reserve_range(void)
{
struct efi_info *e = &boot_params.efi_info;
unsigned long pmap;
if (efi_enabled(EFI_PARAVIRT))
return 0;
#ifdef CONFIG_X86_32
/* Can't handle data above 4GB at this time */
if (e->efi_memmap_hi) {
pr_err("Memory map is above 4GB, disabling EFI.\n");
return -EINVAL;
}
pmap = e->efi_memmap;
#else
pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
#endif
memmap.phys_map = (void *)pmap;
memmap.nr_map = e->efi_memmap_size /
e->efi_memdesc_size;
memmap.desc_size = e->efi_memdesc_size;
memmap.desc_version = e->efi_memdesc_version;
memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
efi.memmap = &memmap;
return 0;
}
static void __init print_efi_memmap(void)
{
#ifdef EFI_DEBUG
efi_memory_desc_t *md;
void *p;
int i;
for (p = memmap.map, i = 0;
p < memmap.map_end;
p += memmap.desc_size, i++) {
char buf[64];
md = p;
pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
i, efi_md_typeattr_format(buf, sizeof(buf), md),
md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
}
#endif /* EFI_DEBUG */
}
void __init efi_unmap_memmap(void)
{
clear_bit(EFI_MEMMAP, &efi.flags);
if (memmap.map) {
early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
memmap.map = NULL;
}
}
static int __init efi_systab_init(void *phys)
{
if (efi_enabled(EFI_64BIT)) {
efi_system_table_64_t *systab64;
struct efi_setup_data *data = NULL;
u64 tmp = 0;
if (efi_setup) {
data = early_memremap(efi_setup, sizeof(*data));
if (!data)
return -ENOMEM;
}
systab64 = early_memremap((unsigned long)phys,
sizeof(*systab64));
if (systab64 == NULL) {
pr_err("Couldn't map the system table!\n");
if (data)
early_memunmap(data, sizeof(*data));
return -ENOMEM;
}
efi_systab.hdr = systab64->hdr;
efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
systab64->fw_vendor;
tmp |= data ? data->fw_vendor : systab64->fw_vendor;
efi_systab.fw_revision = systab64->fw_revision;
efi_systab.con_in_handle = systab64->con_in_handle;
tmp |= systab64->con_in_handle;
efi_systab.con_in = systab64->con_in;
tmp |= systab64->con_in;
efi_systab.con_out_handle = systab64->con_out_handle;
tmp |= systab64->con_out_handle;
efi_systab.con_out = systab64->con_out;
tmp |= systab64->con_out;
efi_systab.stderr_handle = systab64->stderr_handle;
tmp |= systab64->stderr_handle;
efi_systab.stderr = systab64->stderr;
tmp |= systab64->stderr;
efi_systab.runtime = data ?
(void *)(unsigned long)data->runtime :
(void *)(unsigned long)systab64->runtime;
tmp |= data ? data->runtime : systab64->runtime;
efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
tmp |= systab64->boottime;
efi_systab.nr_tables = systab64->nr_tables;
efi_systab.tables = data ? (unsigned long)data->tables :
systab64->tables;
tmp |= data ? data->tables : systab64->tables;
early_memunmap(systab64, sizeof(*systab64));
if (data)
early_memunmap(data, sizeof(*data));
#ifdef CONFIG_X86_32
if (tmp >> 32) {
pr_err("EFI data located above 4GB, disabling EFI.\n");
return -EINVAL;
}
#endif
} else {
efi_system_table_32_t *systab32;
systab32 = early_memremap((unsigned long)phys,
sizeof(*systab32));
if (systab32 == NULL) {
pr_err("Couldn't map the system table!\n");
return -ENOMEM;
}
efi_systab.hdr = systab32->hdr;
efi_systab.fw_vendor = systab32->fw_vendor;
efi_systab.fw_revision = systab32->fw_revision;
efi_systab.con_in_handle = systab32->con_in_handle;
efi_systab.con_in = systab32->con_in;
efi_systab.con_out_handle = systab32->con_out_handle;
efi_systab.con_out = systab32->con_out;
efi_systab.stderr_handle = systab32->stderr_handle;
efi_systab.stderr = systab32->stderr;
efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
efi_systab.nr_tables = systab32->nr_tables;
efi_systab.tables = systab32->tables;
early_memunmap(systab32, sizeof(*systab32));
}
efi.systab = &efi_systab;
/*
* Verify the EFI Table
*/
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect!\n");
return -EINVAL;
}
if ((efi.systab->hdr.revision >> 16) == 0)
pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
return 0;
}
static int __init efi_runtime_init32(void)
{
efi_runtime_services_32_t *runtime;
runtime = early_memremap((unsigned long)efi.systab->runtime,
sizeof(efi_runtime_services_32_t));
if (!runtime) {
pr_err("Could not map the runtime service table!\n");
return -ENOMEM;
}
/*
* We will only need *early* access to the SetVirtualAddressMap
* EFI runtime service. All other runtime services will be called
* via the virtual mapping.
*/
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
(unsigned long)runtime->set_virtual_address_map;
early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
return 0;
}
static int __init efi_runtime_init64(void)
{
efi_runtime_services_64_t *runtime;
runtime = early_memremap((unsigned long)efi.systab->runtime,
sizeof(efi_runtime_services_64_t));
if (!runtime) {
pr_err("Could not map the runtime service table!\n");
return -ENOMEM;
}
/*
* We will only need *early* access to the SetVirtualAddressMap
* EFI runtime service. All other runtime services will be called
* via the virtual mapping.
*/
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
(unsigned long)runtime->set_virtual_address_map;
early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
return 0;
}
static int __init efi_runtime_init(void)
{
int rv;
/*
* Check out the runtime services table. We need to map
* the runtime services table so that we can grab the physical
* address of several of the EFI runtime functions, needed to
* set the firmware into virtual mode.
*
* When EFI_PARAVIRT is in force then we could not map runtime
* service memory region because we do not have direct access to it.
* However, runtime services are available through proxy functions
* (e.g. in case of Xen dom0 EFI implementation they call special
* hypercall which executes relevant EFI functions) and that is why
* they are always enabled.
*/
if (!efi_enabled(EFI_PARAVIRT)) {
if (efi_enabled(EFI_64BIT))
rv = efi_runtime_init64();
else
rv = efi_runtime_init32();
if (rv)
return rv;
}
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return 0;
}
static int __init efi_memmap_init(void)
{
if (efi_enabled(EFI_PARAVIRT))
return 0;
/* Map the EFI memory map */
memmap.map = early_memremap((unsigned long)memmap.phys_map,
memmap.nr_map * memmap.desc_size);
if (memmap.map == NULL) {
pr_err("Could not map the memory map!\n");
return -ENOMEM;
}
memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
if (add_efi_memmap)
do_add_efi_memmap();
set_bit(EFI_MEMMAP, &efi.flags);
return 0;
}
void __init efi_init(void)
{
efi_char16_t *c16;
char vendor[100] = "unknown";
int i = 0;
void *tmp;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
boot_params.efi_info.efi_memmap_hi) {
pr_info("Table located above 4GB, disabling EFI.\n");
return;
}
efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
#else
efi_phys.systab = (efi_system_table_t *)
(boot_params.efi_info.efi_systab |
((__u64)boot_params.efi_info.efi_systab_hi<<32));
#endif
if (efi_systab_init(efi_phys.systab))
return;
efi.config_table = (unsigned long)efi.systab->tables;
efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
efi.runtime = (unsigned long)efi.systab->runtime;
/*
* Show what we know for posterity
*/
c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
if (c16) {
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = *c16++;
vendor[i] = '\0';
} else
pr_err("Could not map the firmware vendor!\n");
early_memunmap(tmp, 2);
pr_info("EFI v%u.%.02u by %s\n",
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
return;
if (efi_config_init(arch_tables))
return;
/*
* Note: We currently don't support runtime services on an EFI
* that doesn't match the kernel 32/64-bit mode.
*/
if (!efi_runtime_supported())
pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
else {
if (efi_runtime_disabled() || efi_runtime_init())
return;
}
if (efi_memmap_init())
return;
print_efi_memmap();
}
void __init efi_late_init(void)
{
efi_bgrt_init();
}
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
u64 addr, npages;
addr = md->virt_addr;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
if (executable)
set_memory_x(addr, npages);
else
set_memory_nx(addr, npages);
}
void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
void *p;
/* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (md->type != EFI_RUNTIME_SERVICES_CODE)
continue;
efi_set_executable(md, true);
}
}
void __init efi_memory_uc(u64 addr, unsigned long size)
{
unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
u64 npages;
npages = round_up(size, page_shift) / page_shift;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
void __init old_map_region(efi_memory_desc_t *md)
{
u64 start_pfn, end_pfn, end;
unsigned long size;
void *va;
start_pfn = PFN_DOWN(md->phys_addr);
size = md->num_pages << PAGE_SHIFT;
end = md->phys_addr + size;
end_pfn = PFN_UP(end);
if (pfn_range_is_mapped(start_pfn, end_pfn)) {
va = __va(md->phys_addr);
if (!(md->attribute & EFI_MEMORY_WB))
efi_memory_uc((u64)(unsigned long)va, size);
} else
va = efi_ioremap(md->phys_addr, size,
md->type, md->attribute);
md->virt_addr = (u64) (unsigned long) va;
if (!va)
pr_err("ioremap of 0x%llX failed!\n",
(unsigned long long)md->phys_addr);
}
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
void *p;
efi_memory_desc_t *md, *prev_md = NULL;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
u64 prev_size;
md = p;
if (!prev_md) {
prev_md = md;
continue;
}
if (prev_md->type != md->type ||
prev_md->attribute != md->attribute) {
prev_md = md;
continue;
}
prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
prev_md->num_pages += md->num_pages;
md->type = EFI_RESERVED_TYPE;
md->attribute = 0;
continue;
}
prev_md = md;
}
}
static void __init get_systab_virt_addr(efi_memory_desc_t *md)
{
unsigned long size;
u64 end, systab;
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
systab = (u64)(unsigned long)efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
efi.systab = (efi_system_table_t *)(unsigned long)systab;
}
}
static void __init save_runtime_map(void)
{
#ifdef CONFIG_KEXEC
efi_memory_desc_t *md;
void *tmp, *p, *q = NULL;
int count = 0;
if (efi_enabled(EFI_OLD_MEMMAP))
return;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
(md->type == EFI_BOOT_SERVICES_CODE) ||
(md->type == EFI_BOOT_SERVICES_DATA))
continue;
tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
if (!tmp)
goto out;
q = tmp;
memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
count++;
}
efi_runtime_map_setup(q, count, memmap.desc_size);
return;
out:
kfree(q);
pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
#endif
}
static void *realloc_pages(void *old_memmap, int old_shift)
{
void *ret;
ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
if (!ret)
goto out;
/*
* A first-time allocation doesn't have anything to copy.
*/
if (!old_memmap)
return ret;
memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
out:
free_pages((unsigned long)old_memmap, old_shift);
return ret;
}
/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
*/
static void * __init efi_map_regions(int *count, int *pg_shift)
{
void *p, *new_memmap = NULL;
unsigned long left = 0;
efi_memory_desc_t *md;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
#endif
continue;
}
efi_map_region(md);
get_systab_virt_addr(md);
if (left < memmap.desc_size) {
new_memmap = realloc_pages(new_memmap, *pg_shift);
if (!new_memmap)
return NULL;
left += PAGE_SIZE << *pg_shift;
(*pg_shift)++;
}
memcpy(new_memmap + (*count * memmap.desc_size), md,
memmap.desc_size);
left -= memmap.desc_size;
(*count)++;
}
return new_memmap;
}
static void __init kexec_enter_virtual_mode(void)
{
#ifdef CONFIG_KEXEC
efi_memory_desc_t *md;
void *p;
efi.systab = NULL;
/*
* We don't do virtual mode, since we don't do runtime services, on
* non-native EFI
*/
if (!efi_is_native()) {
efi_unmap_memmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
/*
* Map efi regions which were passed via setup_data. The virt_addr is a
* fixed addr which was used in first kernel of a kexec boot.
*/
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
efi_map_region_fixed(md); /* FIXME: add error handling */
get_systab_virt_addr(md);
}
save_runtime_map();
BUG_ON(!efi.systab);
efi_sync_low_kernel_mappings();
/*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
*
* Call EFI services through wrapper functions.
*/
efi.runtime_version = efi_systab.hdr.revision;
efi_native_runtime_setup();
efi.set_virtual_address_map = NULL;
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
/* clean DUMMY object */
efi_delete_dummy_variable();
#endif
}
/*
* This function will switch the EFI runtime services to virtual mode.
* Essentially, we look through the EFI memmap and map every region that
* has the runtime attribute bit set in its memory descriptor into the
* ->trampoline_pgd page table using a top-down VA allocation scheme.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
* kernel is booted with efi=old_map on its command line. Same old
* method enabled the runtime services to be called without having to
* thunk back into physical mode for every invocation.
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PGDs of the
* kernel page table into ->trampoline_pgd prior to each call.
*
* Specially for kexec boot, efi runtime maps in previous kernel should
* be passed in via setup_data. In that case runtime ranges will be mapped
* to the same virtual addresses as the first kernel, see
* kexec_enter_virtual_mode().
*/
static void __init __efi_enter_virtual_mode(void)
{
int count = 0, pg_shift = 0;
void *new_memmap = NULL;
efi_status_t status;
efi.systab = NULL;
efi_merge_regions();
new_memmap = efi_map_regions(&count, &pg_shift);
if (!new_memmap) {
pr_err("Error reallocating memory, EFI runtime non-functional!\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
save_runtime_map();
BUG_ON(!efi.systab);
if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
efi_sync_low_kernel_mappings();
efi_dump_pagetable();
if (efi_is_native()) {
status = phys_efi_set_virtual_address_map(
memmap.desc_size * count,
memmap.desc_size,
memmap.desc_version,
(efi_memory_desc_t *)__pa(new_memmap));
} else {
status = efi_thunk_set_virtual_address_map(
efi_phys.set_virtual_address_map,
memmap.desc_size * count,
memmap.desc_size,
memmap.desc_version,
(efi_memory_desc_t *)__pa(new_memmap));
}
if (status != EFI_SUCCESS) {
pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
status);
panic("EFI call to SetVirtualAddressMap() failed!");
}
/*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
*
* Call EFI services through wrapper functions.
*/
efi.runtime_version = efi_systab.hdr.revision;
if (efi_is_native())
efi_native_runtime_setup();
else
efi_thunk_runtime_setup();
efi.set_virtual_address_map = NULL;
efi_runtime_mkexec();
/*
* We mapped the descriptor array into the EFI pagetable above but we're
* not unmapping it here. Here's why:
*
* We're copying select PGDs from the kernel page table to the EFI page
* table and when we do so and make changes to those PGDs like unmapping
* stuff from them, those changes appear in the kernel page table and we
* go boom.
*
* From setup_real_mode():
*
* ...
* trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
*
* In this particular case, our allocation is in PGD 0 of the EFI page
* table but we've copied that PGD from PGD[272] of the EFI page table:
*
* pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
*
* where the direct memory mapping in kernel space is.
*
* new_memmap's VA comes from that direct mapping and thus clearing it,
* it would get cleared in the kernel page table too.
*
* efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
*/
free_pages((unsigned long)new_memmap, pg_shift);
/* clean DUMMY object */
efi_delete_dummy_variable();
}
void __init efi_enter_virtual_mode(void)
{
if (efi_enabled(EFI_PARAVIRT))
return;
if (efi_setup)
kexec_enter_virtual_mode();
else
__efi_enter_virtual_mode();
}
/*
* Convenience functions to obtain memory types and attributes
*/
u32 efi_mem_type(unsigned long phys_addr)
{
efi_memory_desc_t *md;
void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
return md->type;
}
return 0;
}
u64 efi_mem_attributes(unsigned long phys_addr)
{
efi_memory_desc_t *md;
void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
return md->attribute;
}
return 0;
}
static int __init arch_parse_efi_cmdline(char *str)
{
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
return 0;
}
early_param("efi", arch_parse_efi_cmdline);

View file

@ -0,0 +1,92 @@
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 1.0
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2002 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
* in a future version. --drummond 1999-07-20
*
* Implemented EFI runtime services and virtual mode calls. --davidm
*
* Goutham Rao: <goutham.rao@intel.com>
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/efi.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
/*
* To make EFI call EFI runtime service in physical addressing mode we need
* prolog/epilog before/after the invocation to disable interrupt, to
* claim EFI runtime service handler exclusively and to duplicate a memory in
* low memory space say 0 - 3G.
*/
static unsigned long efi_rt_eflags;
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void) {}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
return 0;
}
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
}
void __init efi_map_region(efi_memory_desc_t *md)
{
old_map_region(md);
}
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
void __init efi_call_phys_prolog(void)
{
struct desc_ptr gdt_descr;
local_irq_save(efi_rt_eflags);
load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
}
void __init efi_call_phys_epilog(void)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
load_cr3(swapper_pg_dir);
__flush_tlb_all();
local_irq_restore(efi_rt_eflags);
}
void __init efi_runtime_mkexec(void)
{
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
}

View file

@ -0,0 +1,604 @@
/*
* x86_64 specific EFI support functions
* Based on Extensible Firmware Interface Specification version 1.0
*
* Copyright (C) 2005-2008 Intel Co.
* Fenghua Yu <fenghua.yu@intel.com>
* Bibo Mao <bibo.mao@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
* Huang Ying <ying.huang@intel.com>
*
* Code to convert EFI to E820 map has been implemented in elilo bootloader
* based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
* is setup appropriately for EFI runtime code.
* - mouli 06/14/2007.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/realmode.h>
#include <asm/time.h>
static pgd_t *save_pgd __initdata;
static unsigned long efi_flags __initdata;
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
* 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
*/
static u64 efi_va = -4 * (1UL << 30);
#define EFI_VA_END (-68 * (1UL << 30))
/*
* Scratch space used for switching the pagetable in the EFI stub
*/
struct efi_scratch {
u64 r15;
u64 prev_cr3;
pgd_t *efi_pgt;
bool use_pgd;
u64 phys_stack;
} __packed;
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
void *p;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
void __init efi_call_phys_prolog(void)
{
unsigned long vaddress;
int pgd;
int n_pgds;
if (!efi_enabled(EFI_OLD_MEMMAP))
return;
early_code_mapping_set_exec(1);
local_irq_save(efi_flags);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
for (pgd = 0; pgd < n_pgds; pgd++) {
save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
__flush_tlb_all();
}
void __init efi_call_phys_epilog(void)
{
/*
* After the lock is released, the original page table is restored.
*/
int pgd;
int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
if (!efi_enabled(EFI_OLD_MEMMAP))
return;
for (pgd = 0; pgd < n_pgds; pgd++)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
kfree(save_pgd);
__flush_tlb_all();
local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
/*
* Add low kernel mappings for passing arguments to EFI functions.
*/
void efi_sync_low_kernel_mappings(void)
{
unsigned num_pgds;
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
if (efi_enabled(EFI_OLD_MEMMAP))
return;
num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
memcpy(pgd + pgd_index(PAGE_OFFSET),
init_mm.pgd + pgd_index(PAGE_OFFSET),
sizeof(pgd_t) * num_pgds);
}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
unsigned long text;
struct page *page;
unsigned npages;
pgd_t *pgd;
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
pgd = __va(efi_scratch.efi_pgt);
/*
* It can happen that the physical address of new_memmap lands in memory
* which is not mapped in the EFI page table. Therefore we need to go
* and ident-map those pages containing the map before calling
* phys_efi_set_virtual_address_map().
*/
if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
efi_scratch.use_pgd = true;
/*
* When making calls to the firmware everything needs to be 1:1
* mapped and addressable with 32-bit pointers. Map the kernel
* text and allocate a new stack because we can't rely on the
* stack pointer being < 4GB.
*/
if (!IS_ENABLED(CONFIG_EFI_MIXED))
return 0;
page = alloc_page(GFP_KERNEL|__GFP_DMA32);
if (!page)
panic("Unable to allocate EFI runtime stack < 4GB\n");
efi_scratch.phys_stack = virt_to_phys(page_address(page));
efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
npages = (_end - _text) >> PAGE_SHIFT;
text = __pa(_text);
if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
pr_err("Failed to map kernel text 1:1\n");
return 1;
}
return 0;
}
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
}
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
unsigned long pf = 0;
if (!(md->attribute & EFI_MEMORY_WB))
pf |= _PAGE_PCD;
if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, va);
}
void __init efi_map_region(efi_memory_desc_t *md)
{
unsigned long size = md->num_pages << PAGE_SHIFT;
u64 pa = md->phys_addr;
if (efi_enabled(EFI_OLD_MEMMAP))
return old_map_region(md);
/*
* Make sure the 1:1 mappings are present as a catch-all for b0rked
* firmware which doesn't update all internal pointers after switching
* to virtual mode and would otherwise crap on us.
*/
__map_region(md, md->phys_addr);
/*
* Enforce the 1:1 mapping as the default virtual address when
* booting in EFI mixed mode, because even though we may be
* running a 64-bit kernel, the firmware may only be 32-bit.
*/
if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
md->virt_addr = md->phys_addr;
return;
}
efi_va -= size;
/* Is PA 2M-aligned? */
if (!(pa & (PMD_SIZE - 1))) {
efi_va &= PMD_MASK;
} else {
u64 pa_offset = pa & (PMD_SIZE - 1);
u64 prev_va = efi_va;
/* get us the same offset within this 2M page */
efi_va = (efi_va & PMD_MASK) + pa_offset;
if (efi_va > prev_va)
efi_va -= PMD_SIZE;
}
if (efi_va < EFI_VA_END) {
pr_warn(FW_WARN "VA address range overflow!\n");
return;
}
/* Do the VA map */
__map_region(md, efi_va);
md->virt_addr = efi_va;
}
/*
* kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
* md->virt_addr is the original virtual address which had been mapped in kexec
* 1st kernel.
*/
void __init efi_map_region_fixed(efi_memory_desc_t *md)
{
__map_region(md, md->virt_addr);
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type, u64 attribute)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type, attribute);
}
if (!(attribute & EFI_MEMORY_WB))
efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
return (void __iomem *)__va(phys_addr);
}
void __init parse_efi_setup(u64 phys_addr, u32 data_len)
{
efi_setup = phys_addr + sizeof(struct setup_data);
}
void __init efi_runtime_mkexec(void)
{
if (!efi_enabled(EFI_OLD_MEMMAP))
return;
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
}
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
ptdump_walk_pgd_level(NULL, pgd);
#endif
}
#ifdef CONFIG_EFI_MIXED
extern efi_status_t efi64_thunk(u32, ...);
#define runtime_service32(func) \
({ \
u32 table = (u32)(unsigned long)efi.systab; \
u32 *rt, *___f; \
\
rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
*___f; \
})
/*
* Switch to the EFI page tables early so that we can access the 1:1
* runtime services mappings which are not mapped in any other page
* tables. This function must be called before runtime_service32().
*
* Also, disable interrupts because the IDT points to 64-bit handlers,
* which aren't going to function correctly when we switch to 32-bit.
*/
#define efi_thunk(f, ...) \
({ \
efi_status_t __s; \
unsigned long flags; \
u32 func; \
\
efi_sync_low_kernel_mappings(); \
local_irq_save(flags); \
\
efi_scratch.prev_cr3 = read_cr3(); \
write_cr3((unsigned long)efi_scratch.efi_pgt); \
__flush_tlb_all(); \
\
func = runtime_service32(f); \
__s = efi64_thunk(func, __VA_ARGS__); \
\
write_cr3(efi_scratch.prev_cr3); \
__flush_tlb_all(); \
local_irq_restore(flags); \
\
__s; \
})
efi_status_t efi_thunk_set_virtual_address_map(
void *phys_set_virtual_address_map,
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map)
{
efi_status_t status;
unsigned long flags;
u32 func;
efi_sync_low_kernel_mappings();
local_irq_save(flags);
efi_scratch.prev_cr3 = read_cr3();
write_cr3((unsigned long)efi_scratch.efi_pgt);
__flush_tlb_all();
func = (u32)(unsigned long)phys_set_virtual_address_map;
status = efi64_thunk(func, memory_map_size, descriptor_size,
descriptor_version, virtual_map);
write_cr3(efi_scratch.prev_cr3);
__flush_tlb_all();
local_irq_restore(flags);
return status;
}
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
u32 phys_tm, phys_tc;
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
phys_tc = virt_to_phys(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
spin_unlock(&rtc_lock);
return status;
}
static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
status = efi_thunk(set_time, phys_tm);
spin_unlock(&rtc_lock);
return status;
}
static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
efi_time_t *tm)
{
efi_status_t status;
u32 phys_enabled, phys_pending, phys_tm;
spin_lock(&rtc_lock);
phys_enabled = virt_to_phys(enabled);
phys_pending = virt_to_phys(pending);
phys_tm = virt_to_phys(tm);
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
spin_unlock(&rtc_lock);
return status;
}
static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
efi_status_t status;
u32 phys_tm;
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
spin_unlock(&rtc_lock);
return status;
}
static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 *attr, unsigned long *data_size, void *data)
{
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
phys_data_size = virt_to_phys(data_size);
phys_vendor = virt_to_phys(vendor);
phys_name = virt_to_phys(name);
phys_attr = virt_to_phys(attr);
phys_data = virt_to_phys(data);
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
return status;
}
static efi_status_t
efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data)
{
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
phys_name = virt_to_phys(name);
phys_vendor = virt_to_phys(vendor);
phys_data = virt_to_phys(data);
/* If data_size is > sizeof(u32) we've got problems */
status = efi_thunk(set_variable, phys_name, phys_vendor,
attr, data_size, phys_data);
return status;
}
static efi_status_t
efi_thunk_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
phys_name_size = virt_to_phys(name_size);
phys_vendor = virt_to_phys(vendor);
phys_name = virt_to_phys(name);
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
return status;
}
static efi_status_t
efi_thunk_get_next_high_mono_count(u32 *count)
{
efi_status_t status;
u32 phys_count;
phys_count = virt_to_phys(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
return status;
}
static void
efi_thunk_reset_system(int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
u32 phys_data;
phys_data = virt_to_phys(data);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
}
static efi_status_t
efi_thunk_update_capsule(efi_capsule_header_t **capsules,
unsigned long count, unsigned long sg_list)
{
/*
* To properly support this function we would need to repackage
* 'capsules' because the firmware doesn't understand 64-bit
* pointers.
*/
return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
u64 *remaining_space,
u64 *max_variable_size)
{
efi_status_t status;
u32 phys_storage, phys_remaining, phys_max;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
phys_storage = virt_to_phys(storage_space);
phys_remaining = virt_to_phys(remaining_space);
phys_max = virt_to_phys(max_variable_size);
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
return status;
}
static efi_status_t
efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
unsigned long count, u64 *max_size,
int *reset_type)
{
/*
* To properly support this function we would need to repackage
* 'capsules' because the firmware doesn't understand 64-bit
* pointers.
*/
return EFI_UNSUPPORTED;
}
void efi_thunk_runtime_setup(void)
{
efi.get_time = efi_thunk_get_time;
efi.set_time = efi_thunk_set_time;
efi.get_wakeup_time = efi_thunk_get_wakeup_time;
efi.set_wakeup_time = efi_thunk_set_wakeup_time;
efi.get_variable = efi_thunk_get_variable;
efi.get_next_variable = efi_thunk_get_next_variable;
efi.set_variable = efi_thunk_set_variable;
efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
efi.reset_system = efi_thunk_reset_system;
efi.query_variable_info = efi_thunk_query_variable_info;
efi.update_capsule = efi_thunk_update_capsule;
efi.query_capsule_caps = efi_thunk_query_capsule_caps;
}
#endif /* CONFIG_EFI_MIXED */

View file

@ -0,0 +1,123 @@
/*
* EFI call stub for IA32.
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off.
*/
#include <linux/linkage.h>
#include <asm/page_types.h>
/*
* efi_call_phys(void *, ...) is a function with variable parameters.
* All the callers of this function assure that all the parameters are 4-bytes.
*/
/*
* In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
* So we'd better save all of them at the beginning of this function and restore
* at the end no matter how many we use, because we can not assure EFI runtime
* service functions will comply with gcc calling convention, too.
*/
.text
ENTRY(efi_call_phys)
/*
* 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
* the values of these registers are the same. And, the corresponding
* GDT entries are identical. So I will do nothing about segment reg
* and GDT, but change GDT base register in prolog and epilog.
*/
/*
* 1. Now I am running with EIP = <physical address> + PAGE_OFFSET.
* But to make it smoothly switch from virtual mode to flat mode.
* The mapping of lower virtual memory has been created in prolog and
* epilog.
*/
movl $1f, %edx
subl $__PAGE_OFFSET, %edx
jmp *%edx
1:
/*
* 2. Now on the top of stack is the return
* address in the caller of efi_call_phys(), then parameter 1,
* parameter 2, ..., param n. To make things easy, we save the return
* address of efi_call_phys in a global variable.
*/
popl %edx
movl %edx, saved_return_addr
/* get the function pointer into ECX*/
popl %ecx
movl %ecx, efi_rt_function_ptr
movl $2f, %edx
subl $__PAGE_OFFSET, %edx
pushl %edx
/*
* 3. Clear PG bit in %CR0.
*/
movl %cr0, %edx
andl $0x7fffffff, %edx
movl %edx, %cr0
jmp 1f
1:
/*
* 4. Adjust stack pointer.
*/
subl $__PAGE_OFFSET, %esp
/*
* 5. Call the physical function.
*/
jmp *%ecx
2:
/*
* 6. After EFI runtime service returns, control will return to
* following instruction. We'd better readjust stack pointer first.
*/
addl $__PAGE_OFFSET, %esp
/*
* 7. Restore PG bit
*/
movl %cr0, %edx
orl $0x80000000, %edx
movl %edx, %cr0
jmp 1f
1:
/*
* 8. Now restore the virtual mode from flat mode by
* adding EIP with PAGE_OFFSET.
*/
movl $1f, %edx
jmp *%edx
1:
/*
* 9. Balance the stack. And because EAX contain the return value,
* we'd better not clobber it.
*/
leal efi_rt_function_ptr, %edx
movl (%edx), %ecx
pushl %ecx
/*
* 10. Push the saved return address onto the stack and return.
*/
leal saved_return_addr, %edx
movl (%edx), %ecx
pushl %ecx
ret
ENDPROC(efi_call_phys)
.previous
.data
saved_return_addr:
.long 0
efi_rt_function_ptr:
.long 0

View file

@ -0,0 +1,98 @@
/*
* Function calling ABI conversion from Linux to EFI for x86_64
*
* Copyright (C) 2007 Intel Corp
* Bibo Mao <bibo.mao@intel.com>
* Huang Ying <ying.huang@intel.com>
*/
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include <asm/page_types.h>
#define SAVE_XMM \
mov %rsp, %rax; \
subq $0x70, %rsp; \
and $~0xf, %rsp; \
mov %rax, (%rsp); \
mov %cr0, %rax; \
clts; \
mov %rax, 0x8(%rsp); \
movaps %xmm0, 0x60(%rsp); \
movaps %xmm1, 0x50(%rsp); \
movaps %xmm2, 0x40(%rsp); \
movaps %xmm3, 0x30(%rsp); \
movaps %xmm4, 0x20(%rsp); \
movaps %xmm5, 0x10(%rsp)
#define RESTORE_XMM \
movaps 0x60(%rsp), %xmm0; \
movaps 0x50(%rsp), %xmm1; \
movaps 0x40(%rsp), %xmm2; \
movaps 0x30(%rsp), %xmm3; \
movaps 0x20(%rsp), %xmm4; \
movaps 0x10(%rsp), %xmm5; \
mov 0x8(%rsp), %rsi; \
mov %rsi, %cr0; \
mov (%rsp), %rsp
/* stolen from gcc */
.macro FLUSH_TLB_ALL
movq %r15, efi_scratch(%rip)
movq %r14, efi_scratch+8(%rip)
movq %cr4, %r15
movq %r15, %r14
andb $0x7f, %r14b
movq %r14, %cr4
movq %r15, %cr4
movq efi_scratch+8(%rip), %r14
movq efi_scratch(%rip), %r15
.endm
.macro SWITCH_PGT
cmpb $0, efi_scratch+24(%rip)
je 1f
movq %r15, efi_scratch(%rip) # r15
# save previous CR3
movq %cr3, %r15
movq %r15, efi_scratch+8(%rip) # prev_cr3
movq efi_scratch+16(%rip), %r15 # EFI pgt
movq %r15, %cr3
1:
.endm
.macro RESTORE_PGT
cmpb $0, efi_scratch+24(%rip)
je 2f
movq efi_scratch+8(%rip), %r15
movq %r15, %cr3
movq efi_scratch(%rip), %r15
FLUSH_TLB_ALL
2:
.endm
ENTRY(efi_call)
SAVE_XMM
mov (%rsp), %rax
mov 8(%rax), %rax
subq $48, %rsp
mov %r9, 32(%rsp)
mov %rax, 40(%rsp)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
SWITCH_PGT
call *%rdi
RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
ENDPROC(efi_call)
.data
ENTRY(efi_scratch)
.fill 3,8,0
.byte 0
.quad 0

View file

@ -0,0 +1,152 @@
/*
* Copyright (C) 2014 Intel Corporation; author Matt Fleming
*
* Support for invoking 32-bit EFI runtime services from a 64-bit
* kernel.
*
* The below thunking functions are only used after ExitBootServices()
* has been called. This simplifies things considerably as compared with
* the early EFI thunking because we can leave all the kernel state
* intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
* services from __KERNEL32_CS. This means we can continue to service
* interrupts across an EFI mixed mode call.
*
* We do however, need to handle the fact that we're running in a full
* 64-bit virtual address space. Things like the stack and instruction
* addresses need to be accessible by the 32-bit firmware, so we rely on
* using the identity mappings in the EFI page table to access the stack
* and kernel text (see efi_setup_page_tables()).
*/
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/segment.h>
.text
.code64
ENTRY(efi64_thunk)
push %rbp
push %rbx
/*
* Switch to 1:1 mapped 32-bit stack pointer.
*/
movq %rsp, efi_saved_sp(%rip)
movq efi_scratch+25(%rip), %rsp
/*
* Calculate the physical address of the kernel text.
*/
movq $__START_KERNEL_map, %rax
subq phys_base(%rip), %rax
/*
* Push some physical addresses onto the stack. This is easier
* to do now in a code64 section while the assembler can address
* 64-bit values. Note that all the addresses on the stack are
* 32-bit.
*/
subq $16, %rsp
leaq efi_exit32(%rip), %rbx
subq %rax, %rbx
movl %ebx, 8(%rsp)
leaq __efi64_thunk(%rip), %rbx
subq %rax, %rbx
call *%rbx
movq efi_saved_sp(%rip), %rsp
pop %rbx
pop %rbp
retq
ENDPROC(efi64_thunk)
/*
* We run this function from the 1:1 mapping.
*
* This function must be invoked with a 1:1 mapped stack.
*/
ENTRY(__efi64_thunk)
movl %ds, %eax
push %rax
movl %es, %eax
push %rax
movl %ss, %eax
push %rax
subq $32, %rsp
movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp)
movq %r8, %rsi
movl %esi, 0xc(%rsp)
movq %r9, %rsi
movl %esi, 0x10(%rsp)
leaq 1f(%rip), %rbx
movq %rbx, func_rt_ptr(%rip)
/* Switch to 32-bit descriptor */
pushq $__KERNEL32_CS
leaq efi_enter32(%rip), %rax
pushq %rax
lretq
1: addq $32, %rsp
pop %rbx
movl %ebx, %ss
pop %rbx
movl %ebx, %es
pop %rbx
movl %ebx, %ds
/*
* Convert 32-bit status code into 64-bit.
*/
test %rax, %rax
jz 1f
movl %eax, %ecx
andl $0x0fffffff, %ecx
andl $0xf0000000, %eax
shl $32, %rax
or %rcx, %rax
1:
ret
ENDPROC(__efi64_thunk)
ENTRY(efi_exit32)
movq func_rt_ptr(%rip), %rax
push %rax
mov %rdi, %rax
ret
ENDPROC(efi_exit32)
.code32
/*
* EFI service pointer must be in %edi.
*
* The stack should represent the 32-bit calling convention.
*/
ENTRY(efi_enter32)
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
call *%edi
/* We must preserve return value */
movl %eax, %edi
movl 72(%esp), %eax
pushl $__KERNEL_CS
pushl %eax
lret
ENDPROC(efi_enter32)
.data
.balign 8
func_rt_ptr: .quad 0
efi_saved_sp: .quad 0

View file

@ -0,0 +1,290 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/efi.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <asm/efi.h>
#include <asm/uv/uv.h>
#define EFI_MIN_RESERVE 5120
#define EFI_DUMMY_GUID \
EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
static bool efi_no_storage_paranoia;
/*
* Some firmware implementations refuse to boot if there's insufficient
* space in the variable store. The implementation of garbage collection
* in some FW versions causes stale (deleted) variables to take up space
* longer than intended and space is only freed once the store becomes
* almost completely full.
*
* Enabling this option disables the space checks in
* efi_query_variable_store() and forces garbage collection.
*
* Only enable this option if deleting EFI variables does not free up
* space in your variable store, e.g. if despite deleting variables
* you're unable to create new ones.
*/
static int __init setup_storage_paranoia(char *arg)
{
efi_no_storage_paranoia = true;
return 0;
}
early_param("efi_no_storage_paranoia", setup_storage_paranoia);
/*
* Deleting the dummy variable which kicks off garbage collection
*/
void efi_delete_dummy_variable(void)
{
efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
0, NULL);
}
/*
* Some firmware implementations refuse to boot if there's insufficient space
* in the variable store. Ensure that we never use more than a safe limit.
*
* Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
* store.
*/
efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
{
efi_status_t status;
u64 storage_size, remaining_size, max_size;
if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
return 0;
status = efi.query_variable_info(attributes, &storage_size,
&remaining_size, &max_size);
if (status != EFI_SUCCESS)
return status;
/*
* We account for that by refusing the write if permitting it would
* reduce the available space to under 5KB. This figure was provided by
* Samsung, so should be safe.
*/
if ((remaining_size - size < EFI_MIN_RESERVE) &&
!efi_no_storage_paranoia) {
/*
* Triggering garbage collection may require that the firmware
* generate a real EFI_OUT_OF_RESOURCES error. We can force
* that by attempting to use more space than is available.
*/
unsigned long dummy_size = remaining_size + 1024;
void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
if (!dummy)
return EFI_OUT_OF_RESOURCES;
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
dummy_size, dummy);
if (status == EFI_SUCCESS) {
/*
* This should have failed, so if it didn't make sure
* that we delete it...
*/
efi_delete_dummy_variable();
}
kfree(dummy);
/*
* The runtime code may now have triggered a garbage collection
* run, so check the variable info again
*/
status = efi.query_variable_info(attributes, &storage_size,
&remaining_size, &max_size);
if (status != EFI_SUCCESS)
return status;
/*
* There still isn't enough room, so return an error
*/
if (remaining_size - size < EFI_MIN_RESERVE)
return EFI_OUT_OF_RESOURCES;
}
return EFI_SUCCESS;
}
EXPORT_SYMBOL_GPL(efi_query_variable_store);
/*
* The UEFI specification makes it clear that the operating system is free to do
* whatever it wants with boot services code after ExitBootServices() has been
* called. Ignoring this recommendation a significant bunch of EFI implementations
* continue calling into boot services code (SetVirtualAddressMap). In order to
* work around such buggy implementations we reserve boot services region during
* EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it
* is discarded.
*/
void __init efi_reserve_boot_services(void)
{
void *p;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
/* Only reserve where possible:
* - Not within any already allocated areas
* - Not over any memory area (really needed, if above?)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
if ((start + size > __pa_symbol(_text)
&& start <= __pa_symbol(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_is_region_reserved(start, size)) {
/* Could not reserve, skip it */
md->num_pages = 0;
memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
start, start+size-1);
} else
memblock_reserve(start, size);
}
}
void __init efi_free_boot_services(void)
{
void *p;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
/* Could not reserve boot area */
if (!size)
continue;
free_bootmem_late(start, size);
}
efi_unmap_memmap();
}
/*
* A number of config table entries get remapped to virtual addresses
* after entering EFI virtual mode. However, the kexec kernel requires
* their physical addresses therefore we pass them via setup_data and
* correct those entries to their respective physical addresses here.
*
* Currently only handles smbios which is necessary for some firmware
* implementation.
*/
int __init efi_reuse_config(u64 tables, int nr_tables)
{
int i, sz, ret = 0;
void *p, *tablep;
struct efi_setup_data *data;
if (!efi_setup)
return 0;
if (!efi_enabled(EFI_64BIT))
return 0;
data = early_memremap(efi_setup, sizeof(*data));
if (!data) {
ret = -ENOMEM;
goto out;
}
if (!data->smbios)
goto out_memremap;
sz = sizeof(efi_config_table_64_t);
p = tablep = early_memremap(tables, nr_tables * sz);
if (!p) {
pr_err("Could not map Configuration table!\n");
ret = -ENOMEM;
goto out_memremap;
}
for (i = 0; i < efi.systab->nr_tables; i++) {
efi_guid_t guid;
guid = ((efi_config_table_64_t *)p)->guid;
if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
((efi_config_table_64_t *)p)->table = data->smbios;
p += sz;
}
early_memunmap(tablep, nr_tables * sz);
out_memremap:
early_memunmap(data, sizeof(*data));
out:
return ret;
}
void __init efi_apply_memmap_quirks(void)
{
/*
* Once setup is done earlier, unmap the EFI memory map on mismatched
* firmware/kernel architectures since there is no support for runtime
* services.
*/
if (!efi_runtime_supported()) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
efi_unmap_memmap();
}
/*
* UV doesn't support the new EFI pagetable mapping yet.
*/
if (is_uv_system())
set_bit(EFI_OLD_MEMMAP, &efi.flags);
}
/*
* For most modern platforms the preferred method of powering off is via
* ACPI. However, there are some that are known to require the use of
* EFI runtime services and for which ACPI does not work at all.
*
* Using EFI is a last resort, to be used only if no other option
* exists.
*/
bool efi_reboot_required(void)
{
if (!acpi_gbl_reduced_hardware)
return false;
efi_reboot_quirk_mode = EFI_RESET_WARM;
return true;
}
bool efi_poweroff_required(void)
{
return !!acpi_gbl_reduced_hardware;
}