mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-09 01:28:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
87
arch/x86/boot/compressed/Makefile
Normal file
87
arch/x86/boot/compressed/Makefile
Normal file
|
@ -0,0 +1,87 @@
|
|||
#
|
||||
# linux/arch/x86/boot/compressed/Makefile
|
||||
#
|
||||
# create a compressed vmlinux image from the original vmlinux
|
||||
#
|
||||
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
||||
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
KBUILD_CFLAGS += -mno-mmx -mno-sse
|
||||
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
LDFLAGS_vmlinux := -T
|
||||
|
||||
hostprogs-y := mkpiggy
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||
|
||||
vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
|
||||
$(obj)/string.o $(obj)/cmdline.o \
|
||||
$(obj)/piggy.o $(obj)/cpuflags.o
|
||||
|
||||
vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
|
||||
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/aslr.o
|
||||
|
||||
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
|
||||
|
||||
vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
|
||||
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(call if_changed,ld)
|
||||
@:
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
targets += $(patsubst $(obj)/%,%,$(vmlinux-objs-y)) vmlinux.bin.all vmlinux.relocs
|
||||
|
||||
CMD_RELOCS = arch/x86/tools/relocs
|
||||
quiet_cmd_relocs = RELOCS $@
|
||||
cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
|
||||
$(obj)/vmlinux.relocs: vmlinux FORCE
|
||||
$(call if_changed,relocs)
|
||||
|
||||
vmlinux.bin.all-y := $(obj)/vmlinux.bin
|
||||
vmlinux.bin.all-$(CONFIG_X86_NEED_RELOCS) += $(obj)/vmlinux.relocs
|
||||
|
||||
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,lzma)
|
||||
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,xzkern)
|
||||
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,lzo)
|
||||
$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
|
||||
$(call if_changed,lz4)
|
||||
|
||||
suffix-$(CONFIG_KERNEL_GZIP) := gz
|
||||
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
|
||||
suffix-$(CONFIG_KERNEL_LZMA) := lzma
|
||||
suffix-$(CONFIG_KERNEL_XZ) := xz
|
||||
suffix-$(CONFIG_KERNEL_LZO) := lzo
|
||||
suffix-$(CONFIG_KERNEL_LZ4) := lz4
|
||||
|
||||
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
|
||||
$(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
|
||||
quiet_cmd_mkpiggy = MKPIGGY $@
|
||||
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
|
||||
|
||||
targets += piggy.S
|
||||
$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
|
||||
$(call if_changed,mkpiggy)
|
336
arch/x86/boot/compressed/aslr.c
Normal file
336
arch/x86/boot/compressed/aslr.c
Normal file
|
@ -0,0 +1,336 @@
|
|||
#include "misc.h"
|
||||
|
||||
#include <asm/msr.h>
|
||||
#include <asm/archrandom.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
#include <generated/compile.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uts.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
/* Simplified build-specific string for starting entropy. */
|
||||
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
|
||||
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
|
||||
|
||||
#define I8254_PORT_CONTROL 0x43
|
||||
#define I8254_PORT_COUNTER0 0x40
|
||||
#define I8254_CMD_READBACK 0xC0
|
||||
#define I8254_SELECT_COUNTER0 0x02
|
||||
#define I8254_STATUS_NOTREADY 0x40
|
||||
static inline u16 i8254(void)
|
||||
{
|
||||
u16 status, timer;
|
||||
|
||||
do {
|
||||
outb(I8254_PORT_CONTROL,
|
||||
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
||||
status = inb(I8254_PORT_COUNTER0);
|
||||
timer = inb(I8254_PORT_COUNTER0);
|
||||
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
||||
} while (status & I8254_STATUS_NOTREADY);
|
||||
|
||||
return timer;
|
||||
}
|
||||
|
||||
static unsigned long rotate_xor(unsigned long hash, const void *area,
|
||||
size_t size)
|
||||
{
|
||||
size_t i;
|
||||
unsigned long *ptr = (unsigned long *)area;
|
||||
|
||||
for (i = 0; i < size / sizeof(hash); i++) {
|
||||
/* Rotate by odd number of bits and XOR. */
|
||||
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
|
||||
hash ^= ptr[i];
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
/* Attempt to create a simple but unpredictable starting entropy. */
|
||||
static unsigned long get_random_boot(void)
|
||||
{
|
||||
unsigned long hash = 0;
|
||||
|
||||
hash = rotate_xor(hash, build_str, sizeof(build_str));
|
||||
hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static unsigned long get_random_long(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
|
||||
#else
|
||||
const unsigned long mix_const = 0x3f39e593UL;
|
||||
#endif
|
||||
unsigned long raw, random = get_random_boot();
|
||||
bool use_i8254 = true;
|
||||
|
||||
debug_putstr("KASLR using");
|
||||
|
||||
if (has_cpuflag(X86_FEATURE_RDRAND)) {
|
||||
debug_putstr(" RDRAND");
|
||||
if (rdrand_long(&raw)) {
|
||||
random ^= raw;
|
||||
use_i8254 = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_cpuflag(X86_FEATURE_TSC)) {
|
||||
debug_putstr(" RDTSC");
|
||||
rdtscll(raw);
|
||||
|
||||
random ^= raw;
|
||||
use_i8254 = false;
|
||||
}
|
||||
|
||||
if (use_i8254) {
|
||||
debug_putstr(" i8254");
|
||||
random ^= i8254();
|
||||
}
|
||||
|
||||
/* Circular multiply for better bit diffusion */
|
||||
asm("mul %3"
|
||||
: "=a" (random), "=d" (raw)
|
||||
: "a" (random), "rm" (mix_const));
|
||||
random += raw;
|
||||
|
||||
debug_putstr("...\n");
|
||||
|
||||
return random;
|
||||
}
|
||||
|
||||
struct mem_vector {
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
#define MEM_AVOID_MAX 5
|
||||
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
|
||||
|
||||
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
|
||||
{
|
||||
/* Item at least partially before region. */
|
||||
if (item->start < region->start)
|
||||
return false;
|
||||
/* Item at least partially after region. */
|
||||
if (item->start + item->size > region->start + region->size)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
|
||||
{
|
||||
/* Item one is entirely before item two. */
|
||||
if (one->start + one->size <= two->start)
|
||||
return false;
|
||||
/* Item one is entirely after item two. */
|
||||
if (one->start >= two->start + two->size)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mem_avoid_init(unsigned long input, unsigned long input_size,
|
||||
unsigned long output, unsigned long output_size)
|
||||
{
|
||||
u64 initrd_start, initrd_size;
|
||||
u64 cmd_line, cmd_line_size;
|
||||
unsigned long unsafe, unsafe_len;
|
||||
char *ptr;
|
||||
|
||||
/*
|
||||
* Avoid the region that is unsafe to overlap during
|
||||
* decompression (see calculations at top of misc.c).
|
||||
*/
|
||||
unsafe_len = (output_size >> 12) + 32768 + 18;
|
||||
unsafe = (unsigned long)input + input_size - unsafe_len;
|
||||
mem_avoid[0].start = unsafe;
|
||||
mem_avoid[0].size = unsafe_len;
|
||||
|
||||
/* Avoid initrd. */
|
||||
initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
|
||||
initrd_start |= real_mode->hdr.ramdisk_image;
|
||||
initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
|
||||
initrd_size |= real_mode->hdr.ramdisk_size;
|
||||
mem_avoid[1].start = initrd_start;
|
||||
mem_avoid[1].size = initrd_size;
|
||||
|
||||
/* Avoid kernel command line. */
|
||||
cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
|
||||
cmd_line |= real_mode->hdr.cmd_line_ptr;
|
||||
/* Calculate size of cmd_line. */
|
||||
ptr = (char *)(unsigned long)cmd_line;
|
||||
for (cmd_line_size = 0; ptr[cmd_line_size++]; )
|
||||
;
|
||||
mem_avoid[2].start = cmd_line;
|
||||
mem_avoid[2].size = cmd_line_size;
|
||||
|
||||
/* Avoid heap memory. */
|
||||
mem_avoid[3].start = (unsigned long)free_mem_ptr;
|
||||
mem_avoid[3].size = BOOT_HEAP_SIZE;
|
||||
|
||||
/* Avoid stack memory. */
|
||||
mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
|
||||
mem_avoid[4].size = BOOT_STACK_SIZE;
|
||||
}
|
||||
|
||||
/* Does this memory vector overlap a known avoided area? */
|
||||
static bool mem_avoid_overlap(struct mem_vector *img)
|
||||
{
|
||||
int i;
|
||||
struct setup_data *ptr;
|
||||
|
||||
for (i = 0; i < MEM_AVOID_MAX; i++) {
|
||||
if (mem_overlaps(img, &mem_avoid[i]))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Avoid all entries in the setup_data linked list. */
|
||||
ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
|
||||
while (ptr) {
|
||||
struct mem_vector avoid;
|
||||
|
||||
avoid.start = (unsigned long)ptr;
|
||||
avoid.size = sizeof(*ptr) + ptr->len;
|
||||
|
||||
if (mem_overlaps(img, &avoid))
|
||||
return true;
|
||||
|
||||
ptr = (struct setup_data *)(unsigned long)ptr->next;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
|
||||
CONFIG_PHYSICAL_ALIGN];
|
||||
static unsigned long slot_max;
|
||||
|
||||
static void slots_append(unsigned long addr)
|
||||
{
|
||||
/* Overflowing the slots list should be impossible. */
|
||||
if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
|
||||
CONFIG_PHYSICAL_ALIGN)
|
||||
return;
|
||||
|
||||
slots[slot_max++] = addr;
|
||||
}
|
||||
|
||||
static unsigned long slots_fetch_random(void)
|
||||
{
|
||||
/* Handle case of no slots stored. */
|
||||
if (slot_max == 0)
|
||||
return 0;
|
||||
|
||||
return slots[get_random_long() % slot_max];
|
||||
}
|
||||
|
||||
static void process_e820_entry(struct e820entry *entry,
|
||||
unsigned long minimum,
|
||||
unsigned long image_size)
|
||||
{
|
||||
struct mem_vector region, img;
|
||||
|
||||
/* Skip non-RAM entries. */
|
||||
if (entry->type != E820_RAM)
|
||||
return;
|
||||
|
||||
/* Ignore entries entirely above our maximum. */
|
||||
if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
|
||||
return;
|
||||
|
||||
/* Ignore entries entirely below our minimum. */
|
||||
if (entry->addr + entry->size < minimum)
|
||||
return;
|
||||
|
||||
region.start = entry->addr;
|
||||
region.size = entry->size;
|
||||
|
||||
/* Potentially raise address to minimum location. */
|
||||
if (region.start < minimum)
|
||||
region.start = minimum;
|
||||
|
||||
/* Potentially raise address to meet alignment requirements. */
|
||||
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
|
||||
|
||||
/* Did we raise the address above the bounds of this e820 region? */
|
||||
if (region.start > entry->addr + entry->size)
|
||||
return;
|
||||
|
||||
/* Reduce size by any delta from the original address. */
|
||||
region.size -= region.start - entry->addr;
|
||||
|
||||
/* Reduce maximum size to fit end of image within maximum limit. */
|
||||
if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
|
||||
region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
|
||||
|
||||
/* Walk each aligned slot and check for avoided areas. */
|
||||
for (img.start = region.start, img.size = image_size ;
|
||||
mem_contains(®ion, &img) ;
|
||||
img.start += CONFIG_PHYSICAL_ALIGN) {
|
||||
if (mem_avoid_overlap(&img))
|
||||
continue;
|
||||
slots_append(img.start);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long find_random_addr(unsigned long minimum,
|
||||
unsigned long size)
|
||||
{
|
||||
int i;
|
||||
unsigned long addr;
|
||||
|
||||
/* Make sure minimum is aligned. */
|
||||
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
|
||||
|
||||
/* Verify potential e820 positions, appending to slots list. */
|
||||
for (i = 0; i < real_mode->e820_entries; i++) {
|
||||
process_e820_entry(&real_mode->e820_map[i], minimum, size);
|
||||
}
|
||||
|
||||
return slots_fetch_random();
|
||||
}
|
||||
|
||||
unsigned char *choose_kernel_location(unsigned char *input,
|
||||
unsigned long input_size,
|
||||
unsigned char *output,
|
||||
unsigned long output_size)
|
||||
{
|
||||
unsigned long choice = (unsigned long)output;
|
||||
unsigned long random;
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
if (!cmdline_find_option_bool("kaslr")) {
|
||||
debug_putstr("KASLR disabled by default...\n");
|
||||
goto out;
|
||||
}
|
||||
#else
|
||||
if (cmdline_find_option_bool("nokaslr")) {
|
||||
debug_putstr("KASLR disabled by cmdline...\n");
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Record the various known unsafe memory ranges. */
|
||||
mem_avoid_init((unsigned long)input, input_size,
|
||||
(unsigned long)output, output_size);
|
||||
|
||||
/* Walk e820 and find a random address. */
|
||||
random = find_random_addr(choice, output_size);
|
||||
if (!random) {
|
||||
debug_putstr("KASLR could not find suitable E820 region...\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Always enforce the minimum. */
|
||||
if (random < choice)
|
||||
goto out;
|
||||
|
||||
choice = random;
|
||||
out:
|
||||
return (unsigned char *)choice;
|
||||
}
|
33
arch/x86/boot/compressed/cmdline.c
Normal file
33
arch/x86/boot/compressed/cmdline.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
#include "misc.h"
|
||||
|
||||
#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
|
||||
|
||||
static unsigned long fs;
|
||||
static inline void set_fs(unsigned long seg)
|
||||
{
|
||||
fs = seg << 4; /* shift it back */
|
||||
}
|
||||
typedef unsigned long addr_t;
|
||||
static inline char rdfs8(addr_t addr)
|
||||
{
|
||||
return *((char *)(fs + addr));
|
||||
}
|
||||
#include "../cmdline.c"
|
||||
static unsigned long get_cmd_line_ptr(void)
|
||||
{
|
||||
unsigned long cmd_line_ptr = real_mode->hdr.cmd_line_ptr;
|
||||
|
||||
cmd_line_ptr |= (u64)real_mode->ext_cmd_line_ptr << 32;
|
||||
|
||||
return cmd_line_ptr;
|
||||
}
|
||||
int cmdline_find_option(const char *option, char *buffer, int bufsize)
|
||||
{
|
||||
return __cmdline_find_option(get_cmd_line_ptr(), option, buffer, bufsize);
|
||||
}
|
||||
int cmdline_find_option_bool(const char *option)
|
||||
{
|
||||
return __cmdline_find_option_bool(get_cmd_line_ptr(), option);
|
||||
}
|
||||
|
||||
#endif
|
12
arch/x86/boot/compressed/cpuflags.c
Normal file
12
arch/x86/boot/compressed/cpuflags.c
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
|
||||
#include "../cpuflags.c"
|
||||
|
||||
bool has_cpuflag(int flag)
|
||||
{
|
||||
get_cpuflags();
|
||||
|
||||
return test_bit(flag, cpu.flags);
|
||||
}
|
||||
|
||||
#endif
|
5
arch/x86/boot/compressed/early_serial_console.c
Normal file
5
arch/x86/boot/compressed/early_serial_console.c
Normal file
|
@ -0,0 +1,5 @@
|
|||
#include "misc.h"
|
||||
|
||||
int early_serial_base;
|
||||
|
||||
#include "../early_serial_console.c"
|
1514
arch/x86/boot/compressed/eboot.c
Normal file
1514
arch/x86/boot/compressed/eboot.c
Normal file
File diff suppressed because it is too large
Load diff
122
arch/x86/boot/compressed/eboot.h
Normal file
122
arch/x86/boot/compressed/eboot.h
Normal file
|
@ -0,0 +1,122 @@
|
|||
#ifndef BOOT_COMPRESSED_EBOOT_H
|
||||
#define BOOT_COMPRESSED_EBOOT_H
|
||||
|
||||
#define SEG_TYPE_DATA (0 << 3)
|
||||
#define SEG_TYPE_READ_WRITE (1 << 1)
|
||||
#define SEG_TYPE_CODE (1 << 3)
|
||||
#define SEG_TYPE_EXEC_READ (1 << 1)
|
||||
#define SEG_TYPE_TSS ((1 << 3) | (1 << 0))
|
||||
#define SEG_OP_SIZE_32BIT (1 << 0)
|
||||
#define SEG_GRANULARITY_4KB (1 << 0)
|
||||
|
||||
#define DESC_TYPE_CODE_DATA (1 << 0)
|
||||
|
||||
#define EFI_CONSOLE_OUT_DEVICE_GUID \
|
||||
EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, \
|
||||
0x3f, 0xc1, 0x4d)
|
||||
|
||||
#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
|
||||
#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
|
||||
#define PIXEL_BIT_MASK 2
|
||||
#define PIXEL_BLT_ONLY 3
|
||||
#define PIXEL_FORMAT_MAX 4
|
||||
|
||||
struct efi_pixel_bitmask {
|
||||
u32 red_mask;
|
||||
u32 green_mask;
|
||||
u32 blue_mask;
|
||||
u32 reserved_mask;
|
||||
};
|
||||
|
||||
struct efi_graphics_output_mode_info {
|
||||
u32 version;
|
||||
u32 horizontal_resolution;
|
||||
u32 vertical_resolution;
|
||||
int pixel_format;
|
||||
struct efi_pixel_bitmask pixel_information;
|
||||
u32 pixels_per_scan_line;
|
||||
} __packed;
|
||||
|
||||
struct efi_graphics_output_protocol_mode_32 {
|
||||
u32 max_mode;
|
||||
u32 mode;
|
||||
u32 info;
|
||||
u32 size_of_info;
|
||||
u64 frame_buffer_base;
|
||||
u32 frame_buffer_size;
|
||||
} __packed;
|
||||
|
||||
struct efi_graphics_output_protocol_mode_64 {
|
||||
u32 max_mode;
|
||||
u32 mode;
|
||||
u64 info;
|
||||
u64 size_of_info;
|
||||
u64 frame_buffer_base;
|
||||
u64 frame_buffer_size;
|
||||
} __packed;
|
||||
|
||||
struct efi_graphics_output_protocol_mode {
|
||||
u32 max_mode;
|
||||
u32 mode;
|
||||
unsigned long info;
|
||||
unsigned long size_of_info;
|
||||
u64 frame_buffer_base;
|
||||
unsigned long frame_buffer_size;
|
||||
} __packed;
|
||||
|
||||
struct efi_graphics_output_protocol_32 {
|
||||
u32 query_mode;
|
||||
u32 set_mode;
|
||||
u32 blt;
|
||||
u32 mode;
|
||||
};
|
||||
|
||||
struct efi_graphics_output_protocol_64 {
|
||||
u64 query_mode;
|
||||
u64 set_mode;
|
||||
u64 blt;
|
||||
u64 mode;
|
||||
};
|
||||
|
||||
struct efi_graphics_output_protocol {
|
||||
void *query_mode;
|
||||
unsigned long set_mode;
|
||||
unsigned long blt;
|
||||
struct efi_graphics_output_protocol_mode *mode;
|
||||
};
|
||||
|
||||
struct efi_uga_draw_protocol_32 {
|
||||
u32 get_mode;
|
||||
u32 set_mode;
|
||||
u32 blt;
|
||||
};
|
||||
|
||||
struct efi_uga_draw_protocol_64 {
|
||||
u64 get_mode;
|
||||
u64 set_mode;
|
||||
u64 blt;
|
||||
};
|
||||
|
||||
struct efi_uga_draw_protocol {
|
||||
void *get_mode;
|
||||
void *set_mode;
|
||||
void *blt;
|
||||
};
|
||||
|
||||
struct efi_config {
|
||||
u64 image_handle;
|
||||
u64 table;
|
||||
u64 allocate_pool;
|
||||
u64 allocate_pages;
|
||||
u64 get_memory_map;
|
||||
u64 free_pool;
|
||||
u64 free_pages;
|
||||
u64 locate_handle;
|
||||
u64 handle_protocol;
|
||||
u64 exit_boot_services;
|
||||
u64 text_output;
|
||||
efi_status_t (*call)(unsigned long, ...);
|
||||
bool is64;
|
||||
} __packed;
|
||||
|
||||
#endif /* BOOT_COMPRESSED_EBOOT_H */
|
86
arch/x86/boot/compressed/efi_stub_32.S
Normal file
86
arch/x86/boot/compressed/efi_stub_32.S
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* EFI call stub for IA32.
|
||||
*
|
||||
* This stub allows us to make EFI calls in physical mode with interrupts
|
||||
* turned off. Note that this implementation is different from the one in
|
||||
* arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical
|
||||
* mode at this point.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/page_types.h>
|
||||
|
||||
/*
|
||||
* efi_call_phys(void *, ...) is a function with variable parameters.
|
||||
* All the callers of this function assure that all the parameters are 4-bytes.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
|
||||
* So we'd better save all of them at the beginning of this function and restore
|
||||
* at the end no matter how many we use, because we can not assure EFI runtime
|
||||
* service functions will comply with gcc calling convention, too.
|
||||
*/
|
||||
|
||||
.text
|
||||
ENTRY(efi_call_phys)
|
||||
/*
|
||||
* 0. The function can only be called in Linux kernel. So CS has been
|
||||
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
|
||||
* the values of these registers are the same. And, the corresponding
|
||||
* GDT entries are identical. So I will do nothing about segment reg
|
||||
* and GDT, but change GDT base register in prelog and epilog.
|
||||
*/
|
||||
|
||||
/*
|
||||
* 1. Because we haven't been relocated by this point we need to
|
||||
* use relative addressing.
|
||||
*/
|
||||
call 1f
|
||||
1: popl %edx
|
||||
subl $1b, %edx
|
||||
|
||||
/*
|
||||
* 2. Now on the top of stack is the return
|
||||
* address in the caller of efi_call_phys(), then parameter 1,
|
||||
* parameter 2, ..., param n. To make things easy, we save the return
|
||||
* address of efi_call_phys in a global variable.
|
||||
*/
|
||||
popl %ecx
|
||||
movl %ecx, saved_return_addr(%edx)
|
||||
/* get the function pointer into ECX*/
|
||||
popl %ecx
|
||||
movl %ecx, efi_rt_function_ptr(%edx)
|
||||
|
||||
/*
|
||||
* 3. Call the physical function.
|
||||
*/
|
||||
call *%ecx
|
||||
|
||||
/*
|
||||
* 4. Balance the stack. And because EAX contain the return value,
|
||||
* we'd better not clobber it. We need to calculate our address
|
||||
* again because %ecx and %edx are not preserved across EFI function
|
||||
* calls.
|
||||
*/
|
||||
call 1f
|
||||
1: popl %edx
|
||||
subl $1b, %edx
|
||||
|
||||
movl efi_rt_function_ptr(%edx), %ecx
|
||||
pushl %ecx
|
||||
|
||||
/*
|
||||
* 10. Push the saved return address onto the stack and return.
|
||||
*/
|
||||
movl saved_return_addr(%edx), %ecx
|
||||
pushl %ecx
|
||||
ret
|
||||
ENDPROC(efi_call_phys)
|
||||
.previous
|
||||
|
||||
.data
|
||||
saved_return_addr:
|
||||
.long 0
|
||||
efi_rt_function_ptr:
|
||||
.long 0
|
5
arch/x86/boot/compressed/efi_stub_64.S
Normal file
5
arch/x86/boot/compressed/efi_stub_64.S
Normal file
|
@ -0,0 +1,5 @@
|
|||
#include <asm/segment.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#include "../../platform/efi/efi_stub_64.S"
|
196
arch/x86/boot/compressed/efi_thunk_64.S
Normal file
196
arch/x86/boot/compressed/efi_thunk_64.S
Normal file
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
|
||||
*
|
||||
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
|
||||
*
|
||||
* Because this thunking occurs before ExitBootServices() we have to
|
||||
* restore the firmware's 32-bit GDT before we make EFI serivce calls,
|
||||
* since the firmware's 32-bit IDT is still currently installed and it
|
||||
* needs to be able to service interrupts.
|
||||
*
|
||||
* On the plus side, we don't have to worry about mangling 64-bit
|
||||
* addresses into 32-bits because we're executing with an identify
|
||||
* mapped pagetable and haven't transitioned to 64-bit virtual addresses
|
||||
* yet.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/segment.h>
|
||||
|
||||
.code64
|
||||
.text
|
||||
ENTRY(efi64_thunk)
|
||||
push %rbp
|
||||
push %rbx
|
||||
|
||||
subq $8, %rsp
|
||||
leaq efi_exit32(%rip), %rax
|
||||
movl %eax, 4(%rsp)
|
||||
leaq efi_gdt64(%rip), %rax
|
||||
movl %eax, (%rsp)
|
||||
movl %eax, 2(%rax) /* Fixup the gdt base address */
|
||||
|
||||
movl %ds, %eax
|
||||
push %rax
|
||||
movl %es, %eax
|
||||
push %rax
|
||||
movl %ss, %eax
|
||||
push %rax
|
||||
|
||||
/*
|
||||
* Convert x86-64 ABI params to i386 ABI
|
||||
*/
|
||||
subq $32, %rsp
|
||||
movl %esi, 0x0(%rsp)
|
||||
movl %edx, 0x4(%rsp)
|
||||
movl %ecx, 0x8(%rsp)
|
||||
movq %r8, %rsi
|
||||
movl %esi, 0xc(%rsp)
|
||||
movq %r9, %rsi
|
||||
movl %esi, 0x10(%rsp)
|
||||
|
||||
sgdt save_gdt(%rip)
|
||||
|
||||
leaq 1f(%rip), %rbx
|
||||
movq %rbx, func_rt_ptr(%rip)
|
||||
|
||||
/*
|
||||
* Switch to gdt with 32-bit segments. This is the firmware GDT
|
||||
* that was installed when the kernel started executing. This
|
||||
* pointer was saved at the EFI stub entry point in head_64.S.
|
||||
*/
|
||||
leaq efi32_boot_gdt(%rip), %rax
|
||||
lgdt (%rax)
|
||||
|
||||
pushq $__KERNEL_CS
|
||||
leaq efi_enter32(%rip), %rax
|
||||
pushq %rax
|
||||
lretq
|
||||
|
||||
1: addq $32, %rsp
|
||||
|
||||
lgdt save_gdt(%rip)
|
||||
|
||||
pop %rbx
|
||||
movl %ebx, %ss
|
||||
pop %rbx
|
||||
movl %ebx, %es
|
||||
pop %rbx
|
||||
movl %ebx, %ds
|
||||
|
||||
/*
|
||||
* Convert 32-bit status code into 64-bit.
|
||||
*/
|
||||
test %rax, %rax
|
||||
jz 1f
|
||||
movl %eax, %ecx
|
||||
andl $0x0fffffff, %ecx
|
||||
andl $0xf0000000, %eax
|
||||
shl $32, %rax
|
||||
or %rcx, %rax
|
||||
1:
|
||||
addq $8, %rsp
|
||||
pop %rbx
|
||||
pop %rbp
|
||||
ret
|
||||
ENDPROC(efi64_thunk)
|
||||
|
||||
ENTRY(efi_exit32)
|
||||
movq func_rt_ptr(%rip), %rax
|
||||
push %rax
|
||||
mov %rdi, %rax
|
||||
ret
|
||||
ENDPROC(efi_exit32)
|
||||
|
||||
.code32
|
||||
/*
|
||||
* EFI service pointer must be in %edi.
|
||||
*
|
||||
* The stack should represent the 32-bit calling convention.
|
||||
*/
|
||||
ENTRY(efi_enter32)
|
||||
movl $__KERNEL_DS, %eax
|
||||
movl %eax, %ds
|
||||
movl %eax, %es
|
||||
movl %eax, %ss
|
||||
|
||||
/* Reload pgtables */
|
||||
movl %cr3, %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
/* Disable paging */
|
||||
movl %cr0, %eax
|
||||
btrl $X86_CR0_PG_BIT, %eax
|
||||
movl %eax, %cr0
|
||||
|
||||
/* Disable long mode via EFER */
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btrl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
|
||||
call *%edi
|
||||
|
||||
/* We must preserve return value */
|
||||
movl %eax, %edi
|
||||
|
||||
/*
|
||||
* Some firmware will return with interrupts enabled. Be sure to
|
||||
* disable them before we switch GDTs.
|
||||
*/
|
||||
cli
|
||||
|
||||
movl 56(%esp), %eax
|
||||
movl %eax, 2(%eax)
|
||||
lgdtl (%eax)
|
||||
|
||||
movl %cr4, %eax
|
||||
btsl $(X86_CR4_PAE_BIT), %eax
|
||||
movl %eax, %cr4
|
||||
|
||||
movl %cr3, %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btsl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
|
||||
xorl %eax, %eax
|
||||
lldt %ax
|
||||
|
||||
movl 60(%esp), %eax
|
||||
pushl $__KERNEL_CS
|
||||
pushl %eax
|
||||
|
||||
/* Enable paging */
|
||||
movl %cr0, %eax
|
||||
btsl $X86_CR0_PG_BIT, %eax
|
||||
movl %eax, %cr0
|
||||
lret
|
||||
ENDPROC(efi_enter32)
|
||||
|
||||
.data
|
||||
.balign 8
|
||||
.global efi32_boot_gdt
|
||||
efi32_boot_gdt: .word 0
|
||||
.quad 0
|
||||
|
||||
save_gdt: .word 0
|
||||
.quad 0
|
||||
func_rt_ptr: .quad 0
|
||||
|
||||
.global efi_gdt64
|
||||
efi_gdt64:
|
||||
.word efi_gdt64_end - efi_gdt64
|
||||
.long 0 /* Filled out by user */
|
||||
.word 0
|
||||
.quad 0x0000000000000000 /* NULL descriptor */
|
||||
.quad 0x00af9a000000ffff /* __KERNEL_CS */
|
||||
.quad 0x00cf92000000ffff /* __KERNEL_DS */
|
||||
.quad 0x0080890000000000 /* TS descriptor */
|
||||
.quad 0x0000000000000000 /* TS continued */
|
||||
efi_gdt64_end:
|
247
arch/x86/boot/compressed/head_32.S
Normal file
247
arch/x86/boot/compressed/head_32.S
Normal file
|
@ -0,0 +1,247 @@
|
|||
/*
|
||||
* linux/boot/head.S
|
||||
*
|
||||
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* head.S contains the 32-bit startup code.
|
||||
*
|
||||
* NOTE!!! Startup happens at absolute address 0x00001000, which is also where
|
||||
* the page directory will exist. The startup code will be overwritten by
|
||||
* the page directory. [According to comments etc elsewhere on a compressed
|
||||
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
|
||||
*
|
||||
* Page 0 is deliberately kept safe, since System Management Mode code in
|
||||
* laptops may need to access the BIOS data stored there. This is also
|
||||
* useful for future device drivers that either access the BIOS via VM86
|
||||
* mode.
|
||||
*/
|
||||
|
||||
/*
|
||||
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
||||
*/
|
||||
.text
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
__HEAD
|
||||
ENTRY(startup_32)
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
jmp preferred_addr
|
||||
|
||||
/*
|
||||
* We don't need the return address, so set up the stack so
|
||||
* efi_main() can find its arguments.
|
||||
*/
|
||||
ENTRY(efi_pe_entry)
|
||||
add $0x4, %esp
|
||||
|
||||
call 1f
|
||||
1: popl %esi
|
||||
subl $1b, %esi
|
||||
|
||||
popl %ecx
|
||||
movl %ecx, efi32_config(%esi) /* Handle */
|
||||
popl %ecx
|
||||
movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
|
||||
|
||||
/* Relocate efi_config->call() */
|
||||
leal efi32_config(%esi), %eax
|
||||
add %esi, 88(%eax)
|
||||
pushl %eax
|
||||
|
||||
call make_boot_params
|
||||
cmpl $0, %eax
|
||||
je fail
|
||||
movl %esi, BP_code32_start(%eax)
|
||||
popl %ecx
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
jmp 2f /* Skip efi_config initialization */
|
||||
|
||||
ENTRY(efi32_stub_entry)
|
||||
add $0x4, %esp
|
||||
popl %ecx
|
||||
popl %edx
|
||||
|
||||
call 1f
|
||||
1: popl %esi
|
||||
subl $1b, %esi
|
||||
|
||||
movl %ecx, efi32_config(%esi) /* Handle */
|
||||
movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
|
||||
|
||||
/* Relocate efi_config->call() */
|
||||
leal efi32_config(%esi), %eax
|
||||
add %esi, 88(%eax)
|
||||
pushl %eax
|
||||
2:
|
||||
call efi_main
|
||||
cmpl $0, %eax
|
||||
movl %eax, %esi
|
||||
jne 2f
|
||||
fail:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp fail
|
||||
2:
|
||||
movl BP_code32_start(%esi), %eax
|
||||
leal preferred_addr(%eax), %eax
|
||||
jmp *%eax
|
||||
|
||||
preferred_addr:
|
||||
#endif
|
||||
cld
|
||||
/*
|
||||
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments
|
||||
*/
|
||||
testb $(1<<6), BP_loadflags(%esi)
|
||||
jnz 1f
|
||||
|
||||
cli
|
||||
movl $__BOOT_DS, %eax
|
||||
movl %eax, %ds
|
||||
movl %eax, %es
|
||||
movl %eax, %fs
|
||||
movl %eax, %gs
|
||||
movl %eax, %ss
|
||||
1:
|
||||
|
||||
/*
|
||||
* Calculate the delta between where we were compiled to run
|
||||
* at and where we were actually loaded at. This can only be done
|
||||
* with a short local call on x86. Nothing else will tell us what
|
||||
* address we are running at. The reserved chunk of the real-mode
|
||||
* data at 0x1e4 (defined as a scratch field) are used as the stack
|
||||
* for this calculation. Only 4 bytes are needed.
|
||||
*/
|
||||
leal (BP_scratch+4)(%esi), %esp
|
||||
call 1f
|
||||
1: popl %ebp
|
||||
subl $1b, %ebp
|
||||
|
||||
/*
|
||||
* %ebp contains the address we are loaded at by the boot loader and %ebx
|
||||
* contains the address where we should move the kernel image temporarily
|
||||
* for safe in-place decompression.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
movl %ebp, %ebx
|
||||
movl BP_kernel_alignment(%esi), %eax
|
||||
decl %eax
|
||||
addl %eax, %ebx
|
||||
notl %eax
|
||||
andl %eax, %ebx
|
||||
cmpl $LOAD_PHYSICAL_ADDR, %ebx
|
||||
jge 1f
|
||||
#endif
|
||||
movl $LOAD_PHYSICAL_ADDR, %ebx
|
||||
1:
|
||||
|
||||
/* Target address to relocate to for decompression */
|
||||
addl $z_extract_offset, %ebx
|
||||
|
||||
/* Set up the stack */
|
||||
leal boot_stack_end(%ebx), %esp
|
||||
|
||||
/* Zero EFLAGS */
|
||||
pushl $0
|
||||
popfl
|
||||
|
||||
/*
|
||||
* Copy the compressed kernel to the end of our buffer
|
||||
* where decompression in place becomes safe.
|
||||
*/
|
||||
pushl %esi
|
||||
leal (_bss-4)(%ebp), %esi
|
||||
leal (_bss-4)(%ebx), %edi
|
||||
movl $(_bss - startup_32), %ecx
|
||||
shrl $2, %ecx
|
||||
std
|
||||
rep movsl
|
||||
cld
|
||||
popl %esi
|
||||
|
||||
/*
|
||||
* Jump to the relocated address.
|
||||
*/
|
||||
leal relocated(%ebx), %eax
|
||||
jmp *%eax
|
||||
ENDPROC(startup_32)
|
||||
|
||||
.text
|
||||
relocated:
|
||||
|
||||
/*
|
||||
* Clear BSS (stack is currently empty)
|
||||
*/
|
||||
xorl %eax, %eax
|
||||
leal _bss(%ebx), %edi
|
||||
leal _ebss(%ebx), %ecx
|
||||
subl %edi, %ecx
|
||||
shrl $2, %ecx
|
||||
rep stosl
|
||||
|
||||
/*
|
||||
* Adjust our own GOT
|
||||
*/
|
||||
leal _got(%ebx), %edx
|
||||
leal _egot(%ebx), %ecx
|
||||
1:
|
||||
cmpl %ecx, %edx
|
||||
jae 2f
|
||||
addl %ebx, (%edx)
|
||||
addl $4, %edx
|
||||
jmp 1b
|
||||
2:
|
||||
|
||||
/*
|
||||
* Do the decompression, and jump to the new kernel..
|
||||
*/
|
||||
/* push arguments for decompress_kernel: */
|
||||
pushl $z_run_size /* size of kernel with .bss and .brk */
|
||||
pushl $z_output_len /* decompressed length, end of relocs */
|
||||
leal z_extract_offset_negative(%ebx), %ebp
|
||||
pushl %ebp /* output address */
|
||||
pushl $z_input_len /* input_len */
|
||||
leal input_data(%ebx), %eax
|
||||
pushl %eax /* input_data */
|
||||
leal boot_heap(%ebx), %eax
|
||||
pushl %eax /* heap area */
|
||||
pushl %esi /* real mode pointer */
|
||||
call decompress_kernel /* returns kernel location in %eax */
|
||||
addl $28, %esp
|
||||
|
||||
/*
|
||||
* Jump to the decompressed kernel.
|
||||
*/
|
||||
xorl %ebx, %ebx
|
||||
jmp *%eax
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
.data
|
||||
efi32_config:
|
||||
.fill 11,8,0
|
||||
.long efi_call_phys
|
||||
.long 0
|
||||
.byte 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stack and heap for uncompression
|
||||
*/
|
||||
.bss
|
||||
.balign 4
|
||||
boot_heap:
|
||||
.fill BOOT_HEAP_SIZE, 1, 0
|
||||
boot_stack:
|
||||
.fill BOOT_STACK_SIZE, 1, 0
|
||||
boot_stack_end:
|
479
arch/x86/boot/compressed/head_64.S
Normal file
479
arch/x86/boot/compressed/head_64.S
Normal file
|
@ -0,0 +1,479 @@
|
|||
/*
|
||||
* linux/boot/head.S
|
||||
*
|
||||
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* head.S contains the 32-bit startup code.
|
||||
*
|
||||
* NOTE!!! Startup happens at absolute address 0x00001000, which is also where
|
||||
* the page directory will exist. The startup code will be overwritten by
|
||||
* the page directory. [According to comments etc elsewhere on a compressed
|
||||
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
|
||||
*
|
||||
* Page 0 is deliberately kept safe, since System Management Mode code in
|
||||
* laptops may need to access the BIOS data stored there. This is also
|
||||
* useful for future device drivers that either access the BIOS via VM86
|
||||
* mode.
|
||||
*/
|
||||
|
||||
/*
|
||||
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
||||
*/
|
||||
.code32
|
||||
.text
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
__HEAD
|
||||
.code32
|
||||
ENTRY(startup_32)
|
||||
/*
|
||||
* 32bit entry is 0 and it is ABI so immutable!
|
||||
* If we come here directly from a bootloader,
|
||||
* kernel(text+data+bss+brk) ramdisk, zero_page, command line
|
||||
* all need to be under the 4G limit.
|
||||
*/
|
||||
cld
|
||||
/*
|
||||
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments
|
||||
*/
|
||||
testb $(1<<6), BP_loadflags(%esi)
|
||||
jnz 1f
|
||||
|
||||
cli
|
||||
movl $(__BOOT_DS), %eax
|
||||
movl %eax, %ds
|
||||
movl %eax, %es
|
||||
movl %eax, %ss
|
||||
1:
|
||||
|
||||
/*
|
||||
* Calculate the delta between where we were compiled to run
|
||||
* at and where we were actually loaded at. This can only be done
|
||||
* with a short local call on x86. Nothing else will tell us what
|
||||
* address we are running at. The reserved chunk of the real-mode
|
||||
* data at 0x1e4 (defined as a scratch field) are used as the stack
|
||||
* for this calculation. Only 4 bytes are needed.
|
||||
*/
|
||||
leal (BP_scratch+4)(%esi), %esp
|
||||
call 1f
|
||||
1: popl %ebp
|
||||
subl $1b, %ebp
|
||||
|
||||
/* setup a stack and make sure cpu supports long mode. */
|
||||
movl $boot_stack_end, %eax
|
||||
addl %ebp, %eax
|
||||
movl %eax, %esp
|
||||
|
||||
call verify_cpu
|
||||
testl %eax, %eax
|
||||
jnz no_longmode
|
||||
|
||||
/*
|
||||
* Compute the delta between where we were compiled to run at
|
||||
* and where the code will actually run at.
|
||||
*
|
||||
* %ebp contains the address we are loaded at by the boot loader and %ebx
|
||||
* contains the address where we should move the kernel image temporarily
|
||||
* for safe in-place decompression.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
movl %ebp, %ebx
|
||||
movl BP_kernel_alignment(%esi), %eax
|
||||
decl %eax
|
||||
addl %eax, %ebx
|
||||
notl %eax
|
||||
andl %eax, %ebx
|
||||
cmpl $LOAD_PHYSICAL_ADDR, %ebx
|
||||
jge 1f
|
||||
#endif
|
||||
movl $LOAD_PHYSICAL_ADDR, %ebx
|
||||
1:
|
||||
|
||||
/* Target address to relocate to for decompression */
|
||||
addl $z_extract_offset, %ebx
|
||||
|
||||
/*
|
||||
* Prepare for entering 64 bit mode
|
||||
*/
|
||||
|
||||
/* Load new GDT with the 64bit segments using 32bit descriptor */
|
||||
leal gdt(%ebp), %eax
|
||||
movl %eax, gdt+2(%ebp)
|
||||
lgdt gdt(%ebp)
|
||||
|
||||
/* Enable PAE mode */
|
||||
movl %cr4, %eax
|
||||
orl $X86_CR4_PAE, %eax
|
||||
movl %eax, %cr4
|
||||
|
||||
/*
|
||||
* Build early 4G boot pagetable
|
||||
*/
|
||||
/* Initialize Page tables to 0 */
|
||||
leal pgtable(%ebx), %edi
|
||||
xorl %eax, %eax
|
||||
movl $((4096*6)/4), %ecx
|
||||
rep stosl
|
||||
|
||||
/* Build Level 4 */
|
||||
leal pgtable + 0(%ebx), %edi
|
||||
leal 0x1007 (%edi), %eax
|
||||
movl %eax, 0(%edi)
|
||||
|
||||
/* Build Level 3 */
|
||||
leal pgtable + 0x1000(%ebx), %edi
|
||||
leal 0x1007(%edi), %eax
|
||||
movl $4, %ecx
|
||||
1: movl %eax, 0x00(%edi)
|
||||
addl $0x00001000, %eax
|
||||
addl $8, %edi
|
||||
decl %ecx
|
||||
jnz 1b
|
||||
|
||||
/* Build Level 2 */
|
||||
leal pgtable + 0x2000(%ebx), %edi
|
||||
movl $0x00000183, %eax
|
||||
movl $2048, %ecx
|
||||
1: movl %eax, 0(%edi)
|
||||
addl $0x00200000, %eax
|
||||
addl $8, %edi
|
||||
decl %ecx
|
||||
jnz 1b
|
||||
|
||||
/* Enable the boot page tables */
|
||||
leal pgtable(%ebx), %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
/* Enable Long mode in EFER (Extended Feature Enable Register) */
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btsl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
|
||||
/* After gdt is loaded */
|
||||
xorl %eax, %eax
|
||||
lldt %ax
|
||||
movl $0x20, %eax
|
||||
ltr %ax
|
||||
|
||||
/*
|
||||
* Setup for the jump to 64bit mode
|
||||
*
|
||||
* When the jump is performend we will be in long mode but
|
||||
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
|
||||
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
|
||||
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
|
||||
* We place all of the values on our mini stack so lret can
|
||||
* used to perform that far jump.
|
||||
*/
|
||||
pushl $__KERNEL_CS
|
||||
leal startup_64(%ebp), %eax
|
||||
#ifdef CONFIG_EFI_MIXED
|
||||
movl efi32_config(%ebp), %ebx
|
||||
cmp $0, %ebx
|
||||
jz 1f
|
||||
leal handover_entry(%ebp), %eax
|
||||
1:
|
||||
#endif
|
||||
pushl %eax
|
||||
|
||||
/* Enter paged protected Mode, activating Long Mode */
|
||||
movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
|
||||
movl %eax, %cr0
|
||||
|
||||
/* Jump from 32bit compatibility mode into 64bit mode. */
|
||||
lret
|
||||
ENDPROC(startup_32)
|
||||
|
||||
#ifdef CONFIG_EFI_MIXED
|
||||
.org 0x190
|
||||
ENTRY(efi32_stub_entry)
|
||||
add $0x4, %esp /* Discard return address */
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
|
||||
leal (BP_scratch+4)(%esi), %esp
|
||||
call 1f
|
||||
1: pop %ebp
|
||||
subl $1b, %ebp
|
||||
|
||||
movl %ecx, efi32_config(%ebp)
|
||||
movl %edx, efi32_config+8(%ebp)
|
||||
sgdtl efi32_boot_gdt(%ebp)
|
||||
|
||||
leal efi32_config(%ebp), %eax
|
||||
movl %eax, efi_config(%ebp)
|
||||
|
||||
jmp startup_32
|
||||
ENDPROC(efi32_stub_entry)
|
||||
#endif
|
||||
|
||||
.code64
|
||||
.org 0x200
|
||||
ENTRY(startup_64)
|
||||
/*
|
||||
* 64bit entry is 0x200 and it is ABI so immutable!
|
||||
* We come here either from startup_32 or directly from a
|
||||
* 64bit bootloader.
|
||||
* If we come here from a bootloader, kernel(text+data+bss+brk),
|
||||
* ramdisk, zero_page, command line could be above 4G.
|
||||
* We depend on an identity mapped page table being provided
|
||||
* that maps our entire kernel(text+data+bss+brk), zero page
|
||||
* and command line.
|
||||
*/
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
/*
|
||||
* The entry point for the PE/COFF executable is efi_pe_entry, so
|
||||
* only legacy boot loaders will execute this jmp.
|
||||
*/
|
||||
jmp preferred_addr
|
||||
|
||||
ENTRY(efi_pe_entry)
|
||||
movq %rcx, efi64_config(%rip) /* Handle */
|
||||
movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
|
||||
|
||||
leaq efi64_config(%rip), %rax
|
||||
movq %rax, efi_config(%rip)
|
||||
|
||||
call 1f
|
||||
1: popq %rbp
|
||||
subq $1b, %rbp
|
||||
|
||||
/*
|
||||
* Relocate efi_config->call().
|
||||
*/
|
||||
addq %rbp, efi64_config+88(%rip)
|
||||
|
||||
movq %rax, %rdi
|
||||
call make_boot_params
|
||||
cmpq $0,%rax
|
||||
je fail
|
||||
mov %rax, %rsi
|
||||
leaq startup_32(%rip), %rax
|
||||
movl %eax, BP_code32_start(%rsi)
|
||||
jmp 2f /* Skip the relocation */
|
||||
|
||||
handover_entry:
|
||||
call 1f
|
||||
1: popq %rbp
|
||||
subq $1b, %rbp
|
||||
|
||||
/*
|
||||
* Relocate efi_config->call().
|
||||
*/
|
||||
movq efi_config(%rip), %rax
|
||||
addq %rbp, 88(%rax)
|
||||
2:
|
||||
movq efi_config(%rip), %rdi
|
||||
call efi_main
|
||||
movq %rax,%rsi
|
||||
cmpq $0,%rax
|
||||
jne 2f
|
||||
fail:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp fail
|
||||
2:
|
||||
movl BP_code32_start(%esi), %eax
|
||||
leaq preferred_addr(%rax), %rax
|
||||
jmp *%rax
|
||||
|
||||
preferred_addr:
|
||||
#endif
|
||||
|
||||
/* Setup data segments. */
|
||||
xorl %eax, %eax
|
||||
movl %eax, %ds
|
||||
movl %eax, %es
|
||||
movl %eax, %ss
|
||||
movl %eax, %fs
|
||||
movl %eax, %gs
|
||||
|
||||
/*
|
||||
* Compute the decompressed kernel start address. It is where
|
||||
* we were loaded at aligned to a 2M boundary. %rbp contains the
|
||||
* decompressed kernel start address.
|
||||
*
|
||||
* If it is a relocatable kernel then decompress and run the kernel
|
||||
* from load address aligned to 2MB addr, otherwise decompress and
|
||||
* run the kernel from LOAD_PHYSICAL_ADDR
|
||||
*
|
||||
* We cannot rely on the calculation done in 32-bit mode, since we
|
||||
* may have been invoked via the 64-bit entry point.
|
||||
*/
|
||||
|
||||
/* Start with the delta to where the kernel will run at. */
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
leaq startup_32(%rip) /* - $startup_32 */, %rbp
|
||||
movl BP_kernel_alignment(%rsi), %eax
|
||||
decl %eax
|
||||
addq %rax, %rbp
|
||||
notq %rax
|
||||
andq %rax, %rbp
|
||||
cmpq $LOAD_PHYSICAL_ADDR, %rbp
|
||||
jge 1f
|
||||
#endif
|
||||
movq $LOAD_PHYSICAL_ADDR, %rbp
|
||||
1:
|
||||
|
||||
/* Target address to relocate to for decompression */
|
||||
leaq z_extract_offset(%rbp), %rbx
|
||||
|
||||
/* Set up the stack */
|
||||
leaq boot_stack_end(%rbx), %rsp
|
||||
|
||||
/* Zero EFLAGS */
|
||||
pushq $0
|
||||
popfq
|
||||
|
||||
/*
|
||||
* Copy the compressed kernel to the end of our buffer
|
||||
* where decompression in place becomes safe.
|
||||
*/
|
||||
pushq %rsi
|
||||
leaq (_bss-8)(%rip), %rsi
|
||||
leaq (_bss-8)(%rbx), %rdi
|
||||
movq $_bss /* - $startup_32 */, %rcx
|
||||
shrq $3, %rcx
|
||||
std
|
||||
rep movsq
|
||||
cld
|
||||
popq %rsi
|
||||
|
||||
/*
|
||||
* Jump to the relocated address.
|
||||
*/
|
||||
leaq relocated(%rbx), %rax
|
||||
jmp *%rax
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
.org 0x390
|
||||
ENTRY(efi64_stub_entry)
|
||||
movq %rdi, efi64_config(%rip) /* Handle */
|
||||
movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */
|
||||
|
||||
leaq efi64_config(%rip), %rax
|
||||
movq %rax, efi_config(%rip)
|
||||
|
||||
movq %rdx, %rsi
|
||||
jmp handover_entry
|
||||
ENDPROC(efi64_stub_entry)
|
||||
#endif
|
||||
|
||||
.text
|
||||
relocated:
|
||||
|
||||
/*
|
||||
* Clear BSS (stack is currently empty)
|
||||
*/
|
||||
xorl %eax, %eax
|
||||
leaq _bss(%rip), %rdi
|
||||
leaq _ebss(%rip), %rcx
|
||||
subq %rdi, %rcx
|
||||
shrq $3, %rcx
|
||||
rep stosq
|
||||
|
||||
/*
|
||||
* Adjust our own GOT
|
||||
*/
|
||||
leaq _got(%rip), %rdx
|
||||
leaq _egot(%rip), %rcx
|
||||
1:
|
||||
cmpq %rcx, %rdx
|
||||
jae 2f
|
||||
addq %rbx, (%rdx)
|
||||
addq $8, %rdx
|
||||
jmp 1b
|
||||
2:
|
||||
|
||||
/*
|
||||
* Do the decompression, and jump to the new kernel..
|
||||
*/
|
||||
pushq %rsi /* Save the real mode argument */
|
||||
movq $z_run_size, %r9 /* size of kernel with .bss and .brk */
|
||||
pushq %r9
|
||||
movq %rsi, %rdi /* real mode address */
|
||||
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
|
||||
leaq input_data(%rip), %rdx /* input_data */
|
||||
movl $z_input_len, %ecx /* input_len */
|
||||
movq %rbp, %r8 /* output target address */
|
||||
movq $z_output_len, %r9 /* decompressed length, end of relocs */
|
||||
call decompress_kernel /* returns kernel location in %rax */
|
||||
popq %r9
|
||||
popq %rsi
|
||||
|
||||
/*
|
||||
* Jump to the decompressed kernel.
|
||||
*/
|
||||
jmp *%rax
|
||||
|
||||
.code32
|
||||
no_longmode:
|
||||
/* This isn't an x86-64 CPU so hang */
|
||||
1:
|
||||
hlt
|
||||
jmp 1b
|
||||
|
||||
#include "../../kernel/verify_cpu.S"
|
||||
|
||||
.data
|
||||
gdt:
|
||||
.word gdt_end - gdt
|
||||
.long gdt
|
||||
.word 0
|
||||
.quad 0x0000000000000000 /* NULL descriptor */
|
||||
.quad 0x00af9a000000ffff /* __KERNEL_CS */
|
||||
.quad 0x00cf92000000ffff /* __KERNEL_DS */
|
||||
.quad 0x0080890000000000 /* TS descriptor */
|
||||
.quad 0x0000000000000000 /* TS continued */
|
||||
gdt_end:
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
efi_config:
|
||||
.quad 0
|
||||
|
||||
#ifdef CONFIG_EFI_MIXED
|
||||
.global efi32_config
|
||||
efi32_config:
|
||||
.fill 11,8,0
|
||||
.quad efi64_thunk
|
||||
.byte 0
|
||||
#endif
|
||||
|
||||
.global efi64_config
|
||||
efi64_config:
|
||||
.fill 11,8,0
|
||||
.quad efi_call
|
||||
.byte 1
|
||||
#endif /* CONFIG_EFI_STUB */
|
||||
|
||||
/*
|
||||
* Stack and heap for uncompression
|
||||
*/
|
||||
.bss
|
||||
.balign 4
|
||||
boot_heap:
|
||||
.fill BOOT_HEAP_SIZE, 1, 0
|
||||
boot_stack:
|
||||
.fill BOOT_STACK_SIZE, 1, 0
|
||||
boot_stack_end:
|
||||
|
||||
/*
|
||||
* Space for page tables (not in .bss so not zeroed)
|
||||
*/
|
||||
.section ".pgtable","a",@nobits
|
||||
.balign 4096
|
||||
pgtable:
|
||||
.fill 6*4096, 1, 0
|
422
arch/x86/boot/compressed/misc.c
Normal file
422
arch/x86/boot/compressed/misc.c
Normal file
|
@ -0,0 +1,422 @@
|
|||
/*
|
||||
* misc.c
|
||||
*
|
||||
* This is a collection of several routines from gzip-1.0.3
|
||||
* adapted for Linux.
|
||||
*
|
||||
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
|
||||
* puts by Nick Holloway 1993, better puts by Martin Mares 1995
|
||||
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
||||
*/
|
||||
|
||||
#include "misc.h"
|
||||
#include "../string.h"
|
||||
|
||||
/* WARNING!!
|
||||
* This code is compiled with -fPIC and it is relocated dynamically
|
||||
* at run time, but no relocation processing is performed.
|
||||
* This means that it is not safe to place pointers in static structures.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Getting to provable safe in place decompression is hard.
|
||||
* Worst case behaviours need to be analyzed.
|
||||
* Background information:
|
||||
*
|
||||
* The file layout is:
|
||||
* magic[2]
|
||||
* method[1]
|
||||
* flags[1]
|
||||
* timestamp[4]
|
||||
* extraflags[1]
|
||||
* os[1]
|
||||
* compressed data blocks[N]
|
||||
* crc[4] orig_len[4]
|
||||
*
|
||||
* resulting in 18 bytes of non compressed data overhead.
|
||||
*
|
||||
* Files divided into blocks
|
||||
* 1 bit (last block flag)
|
||||
* 2 bits (block type)
|
||||
*
|
||||
* 1 block occurs every 32K -1 bytes or when there 50% compression
|
||||
* has been achieved. The smallest block type encoding is always used.
|
||||
*
|
||||
* stored:
|
||||
* 32 bits length in bytes.
|
||||
*
|
||||
* fixed:
|
||||
* magic fixed tree.
|
||||
* symbols.
|
||||
*
|
||||
* dynamic:
|
||||
* dynamic tree encoding.
|
||||
* symbols.
|
||||
*
|
||||
*
|
||||
* The buffer for decompression in place is the length of the
|
||||
* uncompressed data, plus a small amount extra to keep the algorithm safe.
|
||||
* The compressed data is placed at the end of the buffer. The output
|
||||
* pointer is placed at the start of the buffer and the input pointer
|
||||
* is placed where the compressed data starts. Problems will occur
|
||||
* when the output pointer overruns the input pointer.
|
||||
*
|
||||
* The output pointer can only overrun the input pointer if the input
|
||||
* pointer is moving faster than the output pointer. A condition only
|
||||
* triggered by data whose compressed form is larger than the uncompressed
|
||||
* form.
|
||||
*
|
||||
* The worst case at the block level is a growth of the compressed data
|
||||
* of 5 bytes per 32767 bytes.
|
||||
*
|
||||
* The worst case internal to a compressed block is very hard to figure.
|
||||
* The worst case can at least be boundined by having one bit that represents
|
||||
* 32764 bytes and then all of the rest of the bytes representing the very
|
||||
* very last byte.
|
||||
*
|
||||
* All of which is enough to compute an amount of extra data that is required
|
||||
* to be safe. To avoid problems at the block level allocating 5 extra bytes
|
||||
* per 32767 bytes of data is sufficient. To avoind problems internal to a
|
||||
* block adding an extra 32767 bytes (the worst case uncompressed block size)
|
||||
* is sufficient, to ensure that in the worst case the decompressed data for
|
||||
* block will stop the byte before the compressed data for a block begins.
|
||||
* To avoid problems with the compressed data's meta information an extra 18
|
||||
* bytes are needed. Leading to the formula:
|
||||
*
|
||||
* extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
|
||||
*
|
||||
* Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
|
||||
* Adding 32768 instead of 32767 just makes for round numbers.
|
||||
* Adding the decompressor_size is necessary as it musht live after all
|
||||
* of the data as well. Last I measured the decompressor is about 14K.
|
||||
* 10K of actual data and 4K of bss.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* gzip declarations
|
||||
*/
|
||||
#define STATIC static
|
||||
|
||||
#undef memcpy
|
||||
|
||||
/*
|
||||
* Use a normal definition of memset() from string.c. There are already
|
||||
* included header files which expect a definition of memset() and by
|
||||
* the time we define memset macro, it is too late.
|
||||
*/
|
||||
#undef memset
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
|
||||
static void error(char *m);
|
||||
|
||||
/*
|
||||
* This is set up by the setup-routine at boot-time
|
||||
*/
|
||||
struct boot_params *real_mode; /* Pointer to real-mode data */
|
||||
|
||||
memptr free_mem_ptr;
|
||||
memptr free_mem_end_ptr;
|
||||
|
||||
static char *vidmem;
|
||||
static int vidport;
|
||||
static int lines, cols;
|
||||
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#include "../../../../lib/decompress_bunzip2.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZMA
|
||||
#include "../../../../lib/decompress_unlzma.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_XZ
|
||||
#include "../../../../lib/decompress_unxz.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZO
|
||||
#include "../../../../lib/decompress_unlzo.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZ4
|
||||
#include "../../../../lib/decompress_unlz4.c"
|
||||
#endif
|
||||
|
||||
static void scroll(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
|
||||
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
|
||||
vidmem[i] = ' ';
|
||||
}
|
||||
|
||||
#define XMTRDY 0x20
|
||||
|
||||
#define TXR 0 /* Transmit register (WRITE) */
|
||||
#define LSR 5 /* Line Status */
|
||||
static void serial_putchar(int ch)
|
||||
{
|
||||
unsigned timeout = 0xffff;
|
||||
|
||||
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
|
||||
cpu_relax();
|
||||
|
||||
outb(ch, early_serial_base + TXR);
|
||||
}
|
||||
|
||||
void __putstr(const char *s)
|
||||
{
|
||||
int x, y, pos;
|
||||
char c;
|
||||
|
||||
if (early_serial_base) {
|
||||
const char *str = s;
|
||||
while (*str) {
|
||||
if (*str == '\n')
|
||||
serial_putchar('\r');
|
||||
serial_putchar(*str++);
|
||||
}
|
||||
}
|
||||
|
||||
if (real_mode->screen_info.orig_video_mode == 0 &&
|
||||
lines == 0 && cols == 0)
|
||||
return;
|
||||
|
||||
x = real_mode->screen_info.orig_x;
|
||||
y = real_mode->screen_info.orig_y;
|
||||
|
||||
while ((c = *s++) != '\0') {
|
||||
if (c == '\n') {
|
||||
x = 0;
|
||||
if (++y >= lines) {
|
||||
scroll();
|
||||
y--;
|
||||
}
|
||||
} else {
|
||||
vidmem[(x + cols * y) * 2] = c;
|
||||
if (++x >= cols) {
|
||||
x = 0;
|
||||
if (++y >= lines) {
|
||||
scroll();
|
||||
y--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
real_mode->screen_info.orig_x = x;
|
||||
real_mode->screen_info.orig_y = y;
|
||||
|
||||
pos = (x + cols * y) * 2; /* Update cursor position */
|
||||
outb(14, vidport);
|
||||
outb(0xff & (pos >> 9), vidport+1);
|
||||
outb(15, vidport);
|
||||
outb(0xff & (pos >> 1), vidport+1);
|
||||
}
|
||||
|
||||
static void error(char *x)
|
||||
{
|
||||
error_putstr("\n\n");
|
||||
error_putstr(x);
|
||||
error_putstr("\n\n -- System halted");
|
||||
|
||||
while (1)
|
||||
asm("hlt");
|
||||
}
|
||||
|
||||
#if CONFIG_X86_NEED_RELOCS
|
||||
static void handle_relocations(void *output, unsigned long output_len)
|
||||
{
|
||||
int *reloc;
|
||||
unsigned long delta, map, ptr;
|
||||
unsigned long min_addr = (unsigned long)output;
|
||||
unsigned long max_addr = min_addr + output_len;
|
||||
|
||||
/*
|
||||
* Calculate the delta between where vmlinux was linked to load
|
||||
* and where it was actually loaded.
|
||||
*/
|
||||
delta = min_addr - LOAD_PHYSICAL_ADDR;
|
||||
if (!delta) {
|
||||
debug_putstr("No relocation needed... ");
|
||||
return;
|
||||
}
|
||||
debug_putstr("Performing relocations... ");
|
||||
|
||||
/*
|
||||
* The kernel contains a table of relocation addresses. Those
|
||||
* addresses have the final load address of the kernel in virtual
|
||||
* memory. We are currently working in the self map. So we need to
|
||||
* create an adjustment for kernel memory addresses to the self map.
|
||||
* This will involve subtracting out the base address of the kernel.
|
||||
*/
|
||||
map = delta - __START_KERNEL_map;
|
||||
|
||||
/*
|
||||
* Process relocations: 32 bit relocations first then 64 bit after.
|
||||
* Two sets of binary relocations are added to the end of the kernel
|
||||
* before compression. Each relocation table entry is the kernel
|
||||
* address of the location which needs to be updated stored as a
|
||||
* 32-bit value which is sign extended to 64 bits.
|
||||
*
|
||||
* Format is:
|
||||
*
|
||||
* kernel bits...
|
||||
* 0 - zero terminator for 64 bit relocations
|
||||
* 64 bit relocation repeated
|
||||
* 0 - zero terminator for 32 bit relocations
|
||||
* 32 bit relocation repeated
|
||||
*
|
||||
* So we work backwards from the end of the decompressed image.
|
||||
*/
|
||||
for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
|
||||
int extended = *reloc;
|
||||
extended += map;
|
||||
|
||||
ptr = (unsigned long)extended;
|
||||
if (ptr < min_addr || ptr > max_addr)
|
||||
error("32-bit relocation outside of kernel!\n");
|
||||
|
||||
*(uint32_t *)ptr += delta;
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
for (reloc--; *reloc; reloc--) {
|
||||
long extended = *reloc;
|
||||
extended += map;
|
||||
|
||||
ptr = (unsigned long)extended;
|
||||
if (ptr < min_addr || ptr > max_addr)
|
||||
error("64-bit relocation outside of kernel!\n");
|
||||
|
||||
*(uint64_t *)ptr += delta;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static inline void handle_relocations(void *output, unsigned long output_len)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
static void parse_elf(void *output)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
Elf64_Ehdr ehdr;
|
||||
Elf64_Phdr *phdrs, *phdr;
|
||||
#else
|
||||
Elf32_Ehdr ehdr;
|
||||
Elf32_Phdr *phdrs, *phdr;
|
||||
#endif
|
||||
void *dest;
|
||||
int i;
|
||||
|
||||
memcpy(&ehdr, output, sizeof(ehdr));
|
||||
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
|
||||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
|
||||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
|
||||
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
|
||||
error("Kernel is not a valid ELF file");
|
||||
return;
|
||||
}
|
||||
|
||||
debug_putstr("Parsing ELF... ");
|
||||
|
||||
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
|
||||
if (!phdrs)
|
||||
error("Failed to allocate space for phdrs");
|
||||
|
||||
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
|
||||
|
||||
for (i = 0; i < ehdr.e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
switch (phdr->p_type) {
|
||||
case PT_LOAD:
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
dest = output;
|
||||
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
||||
#else
|
||||
dest = (void *)(phdr->p_paddr);
|
||||
#endif
|
||||
memcpy(dest,
|
||||
output + phdr->p_offset,
|
||||
phdr->p_filesz);
|
||||
break;
|
||||
default: /* Ignore other PT_* */ break;
|
||||
}
|
||||
}
|
||||
|
||||
free(phdrs);
|
||||
}
|
||||
|
||||
asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
|
||||
unsigned char *input_data,
|
||||
unsigned long input_len,
|
||||
unsigned char *output,
|
||||
unsigned long output_len,
|
||||
unsigned long run_size)
|
||||
{
|
||||
unsigned char *output_orig = output;
|
||||
|
||||
real_mode = rmode;
|
||||
|
||||
sanitize_boot_params(real_mode);
|
||||
|
||||
if (real_mode->screen_info.orig_video_mode == 7) {
|
||||
vidmem = (char *) 0xb0000;
|
||||
vidport = 0x3b4;
|
||||
} else {
|
||||
vidmem = (char *) 0xb8000;
|
||||
vidport = 0x3d4;
|
||||
}
|
||||
|
||||
lines = real_mode->screen_info.orig_video_lines;
|
||||
cols = real_mode->screen_info.orig_video_cols;
|
||||
|
||||
console_init();
|
||||
debug_putstr("early console in decompress_kernel\n");
|
||||
|
||||
free_mem_ptr = heap; /* Heap */
|
||||
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
|
||||
|
||||
/*
|
||||
* The memory hole needed for the kernel is the larger of either
|
||||
* the entire decompressed kernel plus relocation table, or the
|
||||
* entire decompressed kernel plus .bss and .brk sections.
|
||||
*/
|
||||
output = choose_kernel_location(input_data, input_len, output,
|
||||
output_len > run_size ? output_len
|
||||
: run_size);
|
||||
|
||||
/* Validate memory location choices. */
|
||||
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
|
||||
error("Destination address inappropriately aligned");
|
||||
#ifdef CONFIG_X86_64
|
||||
if (heap > 0x3fffffffffffUL)
|
||||
error("Destination address too large");
|
||||
#else
|
||||
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
|
||||
error("Destination address too large");
|
||||
#endif
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
|
||||
error("Wrong destination address");
|
||||
#endif
|
||||
|
||||
debug_putstr("\nDecompressing Linux... ");
|
||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
||||
parse_elf(output);
|
||||
/*
|
||||
* 32-bit always performs relocations. 64-bit relocations are only
|
||||
* needed if kASLR has chosen a different load address.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
|
||||
handle_relocations(output, output_len);
|
||||
debug_putstr("done.\nBooting the kernel.\n");
|
||||
return output;
|
||||
}
|
86
arch/x86/boot/compressed/misc.h
Normal file
86
arch/x86/boot/compressed/misc.h
Normal file
|
@ -0,0 +1,86 @@
|
|||
#ifndef BOOT_COMPRESSED_MISC_H
|
||||
#define BOOT_COMPRESSED_MISC_H
|
||||
|
||||
/*
|
||||
* we have to be careful, because no indirections are allowed here, and
|
||||
* paravirt_ops is a kind of one. As it will only run in baremetal anyway,
|
||||
* we just keep it from happening
|
||||
*/
|
||||
#undef CONFIG_PARAVIRT
|
||||
#ifdef CONFIG_X86_32
|
||||
#define _ASM_X86_DESC_H 1
|
||||
#endif
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/bootparam_utils.h>
|
||||
|
||||
#define BOOT_BOOT_H
|
||||
#include "../ctype.h"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define memptr long
|
||||
#else
|
||||
#define memptr unsigned
|
||||
#endif
|
||||
|
||||
/* misc.c */
|
||||
extern memptr free_mem_ptr;
|
||||
extern memptr free_mem_end_ptr;
|
||||
extern struct boot_params *real_mode; /* Pointer to real-mode data */
|
||||
void __putstr(const char *s);
|
||||
#define error_putstr(__x) __putstr(__x)
|
||||
|
||||
#ifdef CONFIG_X86_VERBOSE_BOOTUP
|
||||
|
||||
#define debug_putstr(__x) __putstr(__x)
|
||||
|
||||
#else
|
||||
|
||||
static inline void debug_putstr(const char *s)
|
||||
{ }
|
||||
|
||||
#endif
|
||||
|
||||
#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
|
||||
/* cmdline.c */
|
||||
int cmdline_find_option(const char *option, char *buffer, int bufsize);
|
||||
int cmdline_find_option_bool(const char *option);
|
||||
#endif
|
||||
|
||||
|
||||
#if CONFIG_RANDOMIZE_BASE
|
||||
/* aslr.c */
|
||||
unsigned char *choose_kernel_location(unsigned char *input,
|
||||
unsigned long input_size,
|
||||
unsigned char *output,
|
||||
unsigned long output_size);
|
||||
/* cpuflags.c */
|
||||
bool has_cpuflag(int flag);
|
||||
#else
|
||||
static inline
|
||||
unsigned char *choose_kernel_location(unsigned char *input,
|
||||
unsigned long input_size,
|
||||
unsigned char *output,
|
||||
unsigned long output_size)
|
||||
{
|
||||
return output;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
/* early_serial_console.c */
|
||||
extern int early_serial_base;
|
||||
void console_init(void);
|
||||
#else
|
||||
static const int early_serial_base;
|
||||
static inline void console_init(void)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
#endif
|
104
arch/x86/boot/compressed/mkpiggy.c
Normal file
104
arch/x86/boot/compressed/mkpiggy.c
Normal file
|
@ -0,0 +1,104 @@
|
|||
/* ----------------------------------------------------------------------- *
|
||||
*
|
||||
* Copyright (C) 2009 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*
|
||||
* H. Peter Anvin <hpa@linux.intel.com>
|
||||
*
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* Compute the desired load offset from a compressed program; outputs
|
||||
* a small assembly wrapper with the appropriate symbols defined.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include <tools/le_byteshift.h>
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
uint32_t olen;
|
||||
long ilen;
|
||||
unsigned long offs;
|
||||
unsigned long run_size;
|
||||
FILE *f = NULL;
|
||||
int retval = 1;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s compressed_file run_size\n",
|
||||
argv[0]);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Get the information for the compressed kernel image first */
|
||||
|
||||
f = fopen(argv[1], "r");
|
||||
if (!f) {
|
||||
perror(argv[1]);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
||||
if (fseek(f, -4L, SEEK_END)) {
|
||||
perror(argv[1]);
|
||||
}
|
||||
|
||||
if (fread(&olen, sizeof(olen), 1, f) != 1) {
|
||||
perror(argv[1]);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ilen = ftell(f);
|
||||
olen = get_unaligned_le32(&olen);
|
||||
|
||||
/*
|
||||
* Now we have the input (compressed) and output (uncompressed)
|
||||
* sizes, compute the necessary decompression offset...
|
||||
*/
|
||||
|
||||
offs = (olen > ilen) ? olen - ilen : 0;
|
||||
offs += olen >> 12; /* Add 8 bytes for each 32K block */
|
||||
offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */
|
||||
offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
|
||||
run_size = atoi(argv[2]);
|
||||
|
||||
printf(".section \".rodata..compressed\",\"a\",@progbits\n");
|
||||
printf(".globl z_input_len\n");
|
||||
printf("z_input_len = %lu\n", ilen);
|
||||
printf(".globl z_output_len\n");
|
||||
printf("z_output_len = %lu\n", (unsigned long)olen);
|
||||
printf(".globl z_extract_offset\n");
|
||||
printf("z_extract_offset = 0x%lx\n", offs);
|
||||
/* z_extract_offset_negative allows simplification of head_32.S */
|
||||
printf(".globl z_extract_offset_negative\n");
|
||||
printf("z_extract_offset_negative = -0x%lx\n", offs);
|
||||
printf(".globl z_run_size\n");
|
||||
printf("z_run_size = %lu\n", run_size);
|
||||
|
||||
printf(".globl input_data, input_data_end\n");
|
||||
printf("input_data:\n");
|
||||
printf(".incbin \"%s\"\n", argv[1]);
|
||||
printf("input_data_end:\n");
|
||||
|
||||
retval = 0;
|
||||
bail:
|
||||
if (f)
|
||||
fclose(f);
|
||||
return retval;
|
||||
}
|
41
arch/x86/boot/compressed/string.c
Normal file
41
arch/x86/boot/compressed/string.c
Normal file
|
@ -0,0 +1,41 @@
|
|||
#include "../string.c"
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
void *memcpy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
int d0, d1, d2;
|
||||
asm volatile(
|
||||
"rep ; movsl\n\t"
|
||||
"movl %4,%%ecx\n\t"
|
||||
"rep ; movsb\n\t"
|
||||
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
||||
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
|
||||
: "memory");
|
||||
|
||||
return dest;
|
||||
}
|
||||
#else
|
||||
void *memcpy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
long d0, d1, d2;
|
||||
asm volatile(
|
||||
"rep ; movsq\n\t"
|
||||
"movq %4,%%rcx\n\t"
|
||||
"rep ; movsb\n\t"
|
||||
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
||||
: "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src)
|
||||
: "memory");
|
||||
|
||||
return dest;
|
||||
}
|
||||
#endif
|
||||
|
||||
void *memset(void *s, int c, size_t n)
|
||||
{
|
||||
int i;
|
||||
char *ss = s;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
ss[i] = c;
|
||||
return s;
|
||||
}
|
74
arch/x86/boot/compressed/vmlinux.lds.S
Normal file
74
arch/x86/boot/compressed/vmlinux.lds.S
Normal file
|
@ -0,0 +1,74 @@
|
|||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
|
||||
|
||||
#undef i386
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/page_types.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
OUTPUT_ARCH(i386:x86-64)
|
||||
ENTRY(startup_64)
|
||||
#else
|
||||
OUTPUT_ARCH(i386)
|
||||
ENTRY(startup_32)
|
||||
#endif
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
/* Be careful parts of head_64.S assume startup_32 is at
|
||||
* address 0.
|
||||
*/
|
||||
. = 0;
|
||||
.head.text : {
|
||||
_head = . ;
|
||||
HEAD_TEXT
|
||||
_ehead = . ;
|
||||
}
|
||||
.rodata..compressed : {
|
||||
*(.rodata..compressed)
|
||||
}
|
||||
.text : {
|
||||
_text = .; /* Text */
|
||||
*(.text)
|
||||
*(.text.*)
|
||||
_etext = . ;
|
||||
}
|
||||
.rodata : {
|
||||
_rodata = . ;
|
||||
*(.rodata) /* read-only data */
|
||||
*(.rodata.*)
|
||||
_erodata = . ;
|
||||
}
|
||||
.got : {
|
||||
_got = .;
|
||||
KEEP(*(.got.plt))
|
||||
KEEP(*(.got))
|
||||
_egot = .;
|
||||
}
|
||||
.data : {
|
||||
_data = . ;
|
||||
*(.data)
|
||||
*(.data.*)
|
||||
_edata = . ;
|
||||
}
|
||||
. = ALIGN(L1_CACHE_BYTES);
|
||||
.bss : {
|
||||
_bss = . ;
|
||||
*(.bss)
|
||||
*(.bss.*)
|
||||
*(COMMON)
|
||||
. = ALIGN(8); /* For convenience during zeroing */
|
||||
_ebss = .;
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.pgtable : {
|
||||
_pgtable = . ;
|
||||
*(.pgtable)
|
||||
_epgtable = . ;
|
||||
}
|
||||
#endif
|
||||
_end = .;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue