Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,12 @@
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
generic-y += scatterlist.h

View file

@ -0,0 +1,65 @@
/* a.out coredump register dumper
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_X86_A_OUT_CORE_H
#define _ASM_X86_A_OUT_CORE_H
#ifdef __KERNEL__
#ifdef CONFIG_X86_32
#include <linux/user.h>
#include <linux/elfcore.h>
#include <asm/debugreg.h>
/*
* fill in the user structure for an a.out core dump
*/
static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
{
/* changed the size calculations - should hopefully work better. lbt */
dump->magic = CMAGIC;
dump->start_code = 0;
dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
>> PAGE_SHIFT;
dump->u_dsize -= dump->u_tsize;
dump->u_ssize = 0;
aout_dump_debugregs(dump);
if (dump->start_stack < TASK_SIZE)
dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
>> PAGE_SHIFT;
dump->regs.bx = regs->bx;
dump->regs.cx = regs->cx;
dump->regs.dx = regs->dx;
dump->regs.si = regs->si;
dump->regs.di = regs->di;
dump->regs.bp = regs->bp;
dump->regs.ax = regs->ax;
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
dump->regs.gs = get_user_gs(regs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
dump->regs.flags = regs->flags;
dump->regs.sp = regs->sp;
dump->regs.ss = (u16)regs->ss;
dump->u_fpvalid = dump_fpu(regs, &dump->i387);
}
#endif /* CONFIG_X86_32 */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_A_OUT_CORE_H */

View file

@ -0,0 +1,45 @@
/*
* X86 specific ACPICA environments and implementation
*
* Copyright (C) 2014, Intel Corporation
* Author: Lv Zheng <lv.zheng@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_X86_ACENV_H
#define _ASM_X86_ACENV_H
#include <asm/special_insns.h>
/* Asm macros */
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
int __acpi_acquire_global_lock(unsigned int *lock);
int __acpi_release_global_lock(unsigned int *lock);
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_release_global_lock(&facs->global_lock))
/*
* Math helper asm macros
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
: "=a"(q32), "=d"(r32) \
: "r"(d32), \
"0"(n_lo), "1"(n_hi))
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2 ;" \
"rcrl $1,%3;" \
: "=r"(n_hi), "=r"(n_lo) \
: "0"(n_hi), "1"(n_lo))
#endif /* _ASM_X86_ACENV_H */

149
arch/x86/include/asm/acpi.h Normal file
View file

@ -0,0 +1,149 @@
#ifndef _ASM_X86_ACPI_H
#define _ASM_X86_ACPI_H
/*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <acpi/pdc_intel.h>
#include <asm/numa.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
#include <asm/realmode.h>
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
/* Low-level suspend routine. */
extern int (*acpi_suspend_lowlevel)(void);
/* Physical address to resume after wakeup */
#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
/*
* Check if the CPU can handle C2 and deeper
*/
static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
{
/*
* Early models (<=5) of AMD Opterons are not supposed to go into
* C2 state.
*
* Steppings 0x0A and later are good
*/
if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
else if (amd_e400_c1e_detected)
return 1;
else
return max_cstate;
}
static inline bool arch_has_acpi_pdc(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
return (c->x86_vendor == X86_VENDOR_INTEL ||
c->x86_vendor == X86_VENDOR_CENTAUR);
}
static inline void arch_acpi_set_pdc_bits(u32 *buf)
{
struct cpuinfo_x86 *c = &cpu_data(0);
buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
if (cpu_has(c, X86_FEATURE_ACPI))
buf[2] |= ACPI_PDC_T_FFH;
/*
* If mwait/monitor is unsupported, C2/C3_FFH will be disabled
*/
if (!cpu_has(c, X86_FEATURE_MWAIT))
buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
}
static inline bool acpi_has_cpu_in_madt(void)
{
return !!acpi_lapic;
}
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
#define acpi_disable_cmcff 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
#ifdef CONFIG_ACPI_NUMA
extern int acpi_numa;
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
#define acpi_unlazy_tlb(x) leave_mm(x)
#endif /* _ASM_X86_ACPI_H */

View file

@ -0,0 +1,31 @@
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same
* page. This avoids data corruption on some CPUs.
*/
#define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
* need to be called for each cacheline of the whole page so it may
* not be worth it. Would need a page for it.
*/
#define flush_agp_cache() wbinvd()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_X86_AGP_H */

View file

@ -0,0 +1,31 @@
#ifndef _ASM_X86_ALTERNATIVE_ASM_H
#define _ASM_X86_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
672: lock
.pushsection .smp_locks,"a"
.balign 4
.long 672b - .
.popsection
.endm
#else
.macro LOCK_PREFIX
.endm
#endif
.macro altinstruction_entry orig alt feature orig_len alt_len
.long \orig - .
.long \alt - .
.word \feature
.byte \orig_len
.byte \alt_len
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_ALTERNATIVE_ASM_H */

View file

@ -0,0 +1,245 @@
#ifndef _ASM_X86_ALTERNATIVE_H
#define _ASM_X86_ALTERNATIVE_H
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/ptrace.h>
/*
* Alternative inline assembly for SMP.
*
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
* SMP alternatives use the same data structures as the other
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
* UP system running a SMP kernel. The existing apply_alternatives()
* works fine for patching a SMP kernel for UP.
*
* The SMP alternative tables can be kept after boot and contain both
* UP and SMP versions of the instructions to allow switching back to
* SMP at runtime, when hotplugging in a new CPU, which is especially
* useful in virtualized environments.
*
* The very common lock prefix is handled as special case in a
* separate table which is a pure address list without replacement ptr
* and size information. That keeps the table sizes small.
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE \
".pushsection .smp_locks,\"a\"\n" \
".balign 4\n" \
".long 671f - .\n" /* offset */ \
".popsection\n" \
"671:"
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
#endif
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
};
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_enable_smp(void);
extern int alternatives_text_reserved(void *start, void *end);
extern bool skip_smp_alternatives;
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_enable_smp(void) {}
static inline int alternatives_text_reserved(void *start, void *end)
{
return 0;
}
#endif /* CONFIG_SMP */
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
#define b_replacement(number) "663"#number
#define e_replacement(number) "664"#number
#define alt_slen "662b-661b"
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
#define ALTINSTR_ENTRY(feature, number) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(number)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_slen "\n" /* source len */ \
" .byte " alt_rlen(number) "\n" /* replacement len */
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
DISCARD_ENTRY(2) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection"
/*
* This must be included *after* the definition of ALTERNATIVE due to
* <asm/arch_hweight.h>
*/
#include <asm/cpufeature.h>
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement make sure to pad to the worst case length.
* Leaving an unused argument 0 to keep API compatibility.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
: : "i" (0), ## input)
/*
* This is similar to alternative_input. But it has two features and
* respective instructions.
*
* If CPU has feature2, newinstr2 is used.
* Otherwise, if CPU has feature1, newinstr1 is used.
* Otherwise, oldinstr is used.
*/
#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2, \
feature2, input...) \
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \
newinstr2, feature2) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
: output : "i" (0), ## input)
/* Like alternative_io, but for replacing a direct call with another one. */
#define alternative_call(oldfunc, newfunc, feature, output, input...) \
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
* Like alternative_call, but there are two features and respective functions.
* If CPU has feature2, function2 is used.
* Otherwise, if CPU has feature1, function1 is used.
* Otherwise, old function is used.
*/
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
output, input...) \
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2) \
: output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input)
/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a...) a
/*
* use this macro if you need clobbers but no inputs in
* alternative_{input,io,call}()
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
static inline void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
#endif
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
* Side-effect: any interrupt handler running between save and restore will have
* the ability to write to read-only pages.
*
* Warning:
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
* no thread can be preempted in the instructions being modified (no iret to an
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
#endif /* _ASM_X86_ALTERNATIVE_H */

View file

@ -0,0 +1,110 @@
#ifndef _ASM_X86_AMD_NB_H
#define _ASM_X86_AMD_NB_H
#include <linux/ioport.h>
#include <linux/pci.h>
struct amd_nb_bus_dev_range {
u8 bus;
u8 dev_base;
u8 dev_limit;
};
extern const struct pci_device_id amd_nb_misc_ids[];
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
extern bool early_is_amd_nb(u32 value);
extern struct resource *amd_get_mmconfig_range(struct resource *res);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache {
unsigned indices;
u8 subcaches[4];
};
struct threshold_block {
unsigned int block;
unsigned int bank;
unsigned int cpu;
u32 address;
u16 interrupt_enable;
bool interrupt_capable;
u16 threshold_limit;
struct kobject kobj;
struct list_head miscj;
};
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
/* initialized to the number of CPUs on the node sharing this bank */
atomic_t cpus;
};
struct amd_northbridge {
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
struct threshold_bank *bank4;
};
struct amd_northbridge_info {
u16 num;
u64 flags;
struct amd_northbridge *nb;
};
extern struct amd_northbridge_info amd_northbridges;
#define AMD_NB_GART BIT(0)
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
#define AMD_NB_L3_PARTITIONING BIT(2)
#ifdef CONFIG_AMD_NB
static inline u16 amd_nb_num(void)
{
return amd_northbridges.num;
}
static inline bool amd_nb_has_feature(unsigned feature)
{
return ((amd_northbridges.flags & feature) == feature);
}
static inline struct amd_northbridge *node_to_amd_nb(int node)
{
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
}
static inline u16 amd_get_node_id(struct pci_dev *pdev)
{
struct pci_dev *misc;
int i;
for (i = 0; i != amd_nb_num(); i++) {
misc = node_to_amd_nb(i)->misc;
if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
return i;
}
WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
return 0;
}
#else
#define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false
#define node_to_amd_nb(x) NULL
#endif
#endif /* _ASM_X86_AMD_NB_H */

View file

@ -0,0 +1,49 @@
/*
* apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
*
* (C) Copyright 2009 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
* Note:
*/
#ifndef ASM_X86_APBT_H
#define ASM_X86_APBT_H
#include <linux/sfi.h>
#ifdef CONFIG_APB_TIMER
/* default memory mapped register base */
#define LNW_SCU_ADDR 0xFF100000
#define LNW_EXT_TIMER_OFFSET 0x1B800
#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
#define LNW_EXT_TIMER_PGOFFSET 0x800
/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
#define APBT_MAX_FREQ 50000000
#define APBT_MIN_FREQ 1000000
#define APBT_MMAP_SIZE 1024
#define APBT_DEV_USED 1
extern void apbt_time_init(void);
extern unsigned long apbt_quick_calibrate(void);
extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
extern void apbt_setup_secondary_clock(void);
extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
extern int sfi_mtimer_num;
#else /* CONFIG_APB_TIMER */
static inline unsigned long apbt_quick_calibrate(void) {return 0; }
static inline void apbt_time_init(void) { }
#endif
#endif /* ASM_X86_APBT_H */

672
arch/x86/include/asm/apic.h Normal file
View file

@ -0,0 +1,672 @@
#ifndef _ASM_X86_APIC_H
#define _ASM_X86_APIC_H
#include <linux/cpumask.h>
#include <linux/pm.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/apicdef.h>
#include <linux/atomic.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/idle.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
/*
* Debugging macros
*/
#define APIC_QUIET 0
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
* information and apic=debug for _lots_ of information.
* apic_verbosity is defined in apic.c
*/
#define apic_printk(v, s, a...) do { \
if ((v) <= apic_verbosity) \
printk(s, ##a); \
} while (0)
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
extern void generic_apic_probe(void);
#else
static inline void generic_apic_probe(void)
{
}
#endif
#ifdef CONFIG_X86_LOCAL_APIC
extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
extern unsigned int lapic_timer_frequency;
#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
#else /* CONFIG_SMP */
static inline void __inquire_remote_apic(int apicid)
{
}
#endif /* CONFIG_SMP */
static inline void default_inquire_remote_apic(int apicid)
{
if (apic_verbosity >= APIC_DEBUG)
__inquire_remote_apic(apicid);
}
/*
* With 82489DX we can't rely on apic feature bit
* retrieved via cpuid but still have to deal with
* such an apic chip so we assume that SMP configuration
* is found from MP table (64bit case uses ACPI mostly
* which set smp presence flag as well so we are safe
* to use this helper too).
*/
static inline bool apic_from_smp_config(void)
{
return smp_found_config && !disable_apic;
}
/*
* Basic functions accessing APICs.
*/
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
extern int setup_profiling_timer(unsigned int);
static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);
extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
extern int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
/*
* Make previous memory operations globally visible before
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
* mfence for this.
*/
static inline void x2apic_wrmsr_fence(void)
{
asm volatile("mfence" : : : "memory");
}
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
reg == APIC_LVR)
return;
wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
}
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
{
wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
{
u64 msr;
if (reg == APIC_DFR)
return -1;
rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
return (u32)msr;
}
static inline void native_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return;
}
static inline u32 native_safe_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return 0;
}
static inline void native_x2apic_icr_write(u32 low, u32 id)
{
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static inline u64 native_x2apic_icr_read(void)
{
unsigned long val;
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
extern int x2apic_phys;
extern int x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
static inline int x2apic_enabled(void)
{
u64 msr;
if (!cpu_has_x2apic)
return 0;
rdmsrl(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return 1;
return 0;
}
#define x2apic_supported() (cpu_has_x2apic)
static inline void x2apic_force_phys(void)
{
x2apic_phys = 1;
}
#else
static inline void disable_x2apic(void)
{
}
static inline void check_x2apic(void)
{
}
static inline void enable_x2apic(void)
{
}
static inline int x2apic_enabled(void)
{
return 0;
}
static inline void x2apic_force_phys(void)
{
}
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
extern void enable_IR_x2apic(void);
extern int get_physical_broadcast(void);
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
extern int verify_local_APIC(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
extern void end_local_APIC_setup(void);
extern void bsp_end_local_APIC_setup(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern int APIC_init_uniprocessor(void);
extern int apic_force_enable(unsigned long addr);
/*
* On 32bit this is mach-xxx local
*/
#ifdef CONFIG_X86_64
extern int apic_is_clustered_box(void);
#else
static inline int apic_is_clustered_box(void)
{
return 0;
}
#endif
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
#define local_apic_timer_c2_ok 1
static inline void init_apic_mappings(void) { }
static inline void disable_local_APIC(void) { }
# define setup_boot_APIC_clock x86_init_noop
# define setup_secondary_APIC_clock x86_init_noop
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_64
#define SET_APIC_ID(x) (apic->set_apic_id(x))
#else
#endif
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct apic {
char *name;
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
int (*apic_id_valid)(int apicid);
int (*apic_id_registered)(void);
u32 irq_delivery_mode;
u32 irq_dest_mode;
const struct cpumask *(*target_cpus)(void);
int disable_esr;
int dest_logical;
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
const struct cpumask *mask);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
void (*setup_apic_routing)(void);
int (*cpu_present_to_apicid)(int mps_cpu);
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
int (*check_phys_apicid_present)(int phys_apicid);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* wakeup_secondary_cpu */
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
bool wait_for_init_deassert;
void (*inquire_remote_apic)(int apicid);
/* apic ops */
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
/*
* ->eoi_write() has the same signature as ->write().
*
* Drivers can support both ->eoi_write() and ->write() by passing the same
* callback value. Kernel can override ->eoi_write() and fall back
* on write for EOI.
*/
void (*eoi_write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
#ifdef CONFIG_X86_32
/*
* Called very early during boot from get_smp_config(). It should
* return the logical apicid. x86_[bios]_cpu_to_apicid is
* initialized before this function is called.
*
* If logical apicid can't be determined that early, the function
* may return BAD_APICID. Logical apicid will be configured after
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
* won't be applied properly during early boot in this case.
*/
int (*x86_32_early_logical_apicid)(int cpu);
#endif
};
/*
* Pointer to the local APIC driver in use on this system (there's
* always just one such driver in use - the kernel decides via an
* early probing process which one it picks - and then sticks to it):
*/
extern struct apic *apic;
/*
* APIC drivers are probed based on how they are listed in the .apicdrivers
* section. So the order is important and enforced by the ordering
* of different apic driver files in the Makefile.
*
* For the files having two apic drivers, we use apic_drivers()
* to enforce the order with in them.
*/
#define apic_driver(sym) \
static const struct apic *__apicdrivers_##sym __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym }
#define apic_drivers(sym1, sym2) \
static struct apic *__apicdrivers_##sym1##sym2[2] __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym1, &sym2 }
extern struct apic *__apicdrivers[], *__apicdrivers_end[];
/*
* APIC functionality to boot other CPUs - only used on SMP:
*/
#ifdef CONFIG_SMP
extern atomic_t init_deasserted;
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
}
static inline void apic_write(u32 reg, u32 val)
{
apic->write(reg, val);
}
static inline void apic_eoi(void)
{
apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
}
static inline u64 apic_icr_read(void)
{
return apic->icr_read();
}
static inline void apic_icr_write(u32 low, u32 high)
{
apic->icr_write(low, high);
}
static inline void apic_wait_icr_idle(void)
{
apic->wait_icr_idle();
}
static inline u32 safe_apic_wait_icr_idle(void)
{
return apic->safe_wait_icr_idle();
}
extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
#else /* CONFIG_X86_LOCAL_APIC */
static inline u32 apic_read(u32 reg) { return 0; }
static inline void apic_write(u32 reg, u32 val) { }
static inline void apic_eoi(void) { }
static inline u64 apic_icr_read(void) { return 0; }
static inline void apic_icr_write(u32 low, u32 high) { }
static inline void apic_wait_icr_idle(void) { }
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
#endif /* CONFIG_X86_LOCAL_APIC */
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
apic_eoi();
}
static inline unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
return (x >> 24) & 0xFF;
else
return (x >> 24) & 0x0F;
}
/*
* Warm reset vector position:
*/
#define TRAMPOLINE_PHYS_LOW 0x467
#define TRAMPOLINE_PHYS_HIGH 0x469
#ifdef CONFIG_X86_64
extern void apic_send_IPI_self(int vector);
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
#endif
extern void generic_bigsmp_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
static inline const struct cpumask *online_target_cpus(void)
{
return cpu_online_mask;
}
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void)
{
unsigned int reg;
reg = apic_read(APIC_ID);
return apic->get_apic_id(reg);
}
static inline int default_apic_id_valid(int apicid)
{
return (apicid < 255);
}
extern int default_acpi_madt_oem_check(char *, char *);
extern void default_setup_apic_routing(void);
extern struct apic apic_noop;
#ifdef CONFIG_X86_32
static inline int noop_x86_32_early_logical_apicid(int cpu)
{
return BAD_APICID;
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
extern void default_init_apic_ldr(void);
static inline int default_apic_id_registered(void)
{
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif
static inline int
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
cpumask_bits(cpu_online_mask)[0] &
APIC_ALL_CPUS;
if (likely(cpu_mask)) {
*apicid = (unsigned int)cpu_mask;
return 0;
} else {
return -EINVAL;
}
}
extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
static inline void
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_clear(retmask);
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}
static inline void
default_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_copy(retmask, cpumask_of(cpu));
}
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
{
return physid_isset(apicid, *map);
}
static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
*retmap = *phys_map;
}
static inline int __default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline int
__default_check_phys_apicid_present(int phys_apicid)
{
return physid_isset(phys_apicid, phys_cpu_present_map);
}
#ifdef CONFIG_X86_32
static inline int default_cpu_present_to_apicid(int mps_cpu)
{
return __default_cpu_present_to_apicid(mps_cpu);
}
static inline int
default_check_phys_apicid_present(int phys_apicid)
{
return __default_check_phys_apicid_present(phys_apicid);
}
#else
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
#endif
#endif /* CONFIG_X86_LOCAL_APIC */
extern void irq_enter(void);
extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
exit_idle();
}
static inline void entering_ack_irq(void)
{
ack_APIC_irq();
entering_irq();
}
static inline void exiting_irq(void)
{
irq_exit();
}
static inline void exiting_ack_irq(void)
{
irq_exit();
/* Ack only at the end to avoid potential reentry */
ack_APIC_irq();
}
extern void ioapic_zap_locks(void);
#endif /* _ASM_X86_APIC_H */

View file

@ -0,0 +1,7 @@
#ifndef _ASM_X86_APIC_FLAT_64_H
#define _ASM_X86_APIC_FLAT_64_H
extern void flat_init_apic_ldr(void);
#endif

View file

@ -0,0 +1,445 @@
#ifndef _ASM_X86_APICDEF_H
#define _ASM_X86_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
/*
* This is the IO-APIC register space as specified
* by Intel docs:
*/
#define IO_APIC_SLOT_SIZE 1024
#define APIC_ID 0x20
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define APIC_LVR_DIRECTED_EOI (1 << 24)
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#ifdef CONFIG_X86_32
# define APIC_INTEGRATED(x) ((x) & 0xF0u)
#else
# define APIC_INTEGRATED(x) (1)
#endif
#define APIC_XAPIC(x) ((x) >= 0x14)
#define APIC_EXT_SPACE(x) ((x) & 0x80000000)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu << 24)
#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_DIRECTED_EOI (1 << 12)
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_LVTCMCI 0x2f0
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_FIXED_MASK 0x00700
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_ONESHOT (0 << 17)
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
#define APIC_LVT_REMOTE_IRR (1 << 14)
#define APIC_INPUT_POLARITY (1 << 13)
#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_EFEAT 0x400
#define APIC_ECTRL 0x410
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
#define APIC_EILVT_MASKED (1 << 16)
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800
#define XAPIC_ENABLE (1UL << 11)
#define X2APIC_ENABLE (1UL << 10)
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
# define MAX_LOCAL_APIC 256
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
/*
* All x86-64 systems are xAPIC compatible.
* In the following, "apicid" is a physical APIC ID.
*/
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved[4]; } __reserved_02;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
#undef u32
#ifdef CONFIG_X86_32
#define BAD_APICID 0xFFu
#else
#define BAD_APICID 0xFFFFu
#endif
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
#endif /* _ASM_X86_APICDEF_H */

View file

@ -0,0 +1,73 @@
/*
* Machine specific APM BIOS functions for generic.
* Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
*/
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
"pushl %%es\n\t" \
"xorl %%edx, %%edx\n\t" \
"mov %%dx, %%ds\n\t" \
"mov %%dx, %%es\n\t" \
"mov %%dx, %%fs\n\t" \
"mov %%dx, %%gs\n\t"
# define APM_DO_POP_SEGS \
"popl %%es\n\t" \
"popl %%ds\n\t"
#else
# define APM_DO_ZERO_SEGS
# define APM_DO_POP_SEGS
#endif
static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
u32 *eax, u32 *ebx, u32 *ecx,
u32 *edx, u32 *esi)
{
/*
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
"lcall *%%cs:apm_bios_entry\n\t"
"setc %%al\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
APM_DO_POP_SEGS
: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
}
static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
u32 ecx_in, u32 *eax)
{
int cx, dx, si;
u8 error;
/*
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
"lcall *%%cs:apm_bios_entry\n\t"
"setc %%bl\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
APM_DO_POP_SEGS
: "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
return error;
}
#endif /* _ASM_X86_MACH_DEFAULT_APM_H */

View file

@ -0,0 +1,61 @@
#ifndef _ASM_X86_HWEIGHT_H
#define _ASM_X86_HWEIGHT_H
#ifdef CONFIG_64BIT
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
/* popcnt %rdi, %rax */
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
#define REG_IN "D"
#define REG_OUT "a"
#else
/* popcnt %eax, %eax */
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
#define REG_IN "a"
#define REG_OUT "a"
#endif
/*
* __sw_hweightXX are called from within the alternatives below
* and callee-clobbered registers need to be taken care of. See
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
* compiler switches.
*/
static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res = 0;
asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
return res;
}
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __arch_hweight32(w & 0xffff);
}
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __arch_hweight32(w & 0xff);
}
static inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
#ifdef CONFIG_X86_32
return __arch_hweight32((u32)w) +
__arch_hweight32((u32)(w >> 32));
#else
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
#endif /* CONFIG_X86_32 */
return res;
}
#endif

View file

@ -0,0 +1,136 @@
/*
* This file is part of the Linux kernel.
*
* Copyright (c) 2011-2014, Intel Corporation
* Authors: Fenghua Yu <fenghua.yu@intel.com>,
* H. Peter Anvin <hpa@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef ASM_X86_ARCHRANDOM_H
#define ASM_X86_ARCHRANDOM_H
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/nops.h>
#define RDRAND_RETRY_LOOPS 10
#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
#define RDSEED_INT ".byte 0x0f,0xc7,0xf8"
#ifdef CONFIG_X86_64
# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
# define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8"
#else
# define RDRAND_LONG RDRAND_INT
# define RDSEED_LONG RDSEED_INT
#endif
#ifdef CONFIG_ARCH_RANDOM
/* Instead of arch_get_random_long() when alternatives haven't run. */
static inline int rdrand_long(unsigned long *v)
{
int ok;
asm volatile("1: " RDRAND_LONG "\n\t"
"jc 2f\n\t"
"decl %0\n\t"
"jnz 1b\n\t"
"2:"
: "=r" (ok), "=a" (*v)
: "0" (RDRAND_RETRY_LOOPS));
return ok;
}
/* A single attempt at RDSEED */
static inline bool rdseed_long(unsigned long *v)
{
unsigned char ok;
asm volatile(RDSEED_LONG "\n\t"
"setc %0"
: "=qm" (ok), "=a" (*v));
return ok;
}
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
int ok; \
alternative_io("movl $0, %0\n\t" \
nop, \
"\n1: " rdrand "\n\t" \
"jc 2f\n\t" \
"decl %0\n\t" \
"jnz 1b\n\t" \
"2:", \
X86_FEATURE_RDRAND, \
ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
"0" (RDRAND_RETRY_LOOPS)); \
return ok; \
}
#define GET_SEED(name, type, rdseed, nop) \
static inline int name(type *v) \
{ \
unsigned char ok; \
alternative_io("movb $0, %0\n\t" \
nop, \
rdseed "\n\t" \
"setc %0", \
X86_FEATURE_RDSEED, \
ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
return ok; \
}
#ifdef CONFIG_X86_64
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
#else
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
#endif /* CONFIG_X86_64 */
#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
#else
static inline int rdrand_long(unsigned long *v)
{
return 0;
}
static inline bool rdseed_long(unsigned long *v)
{
return 0;
}
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
#endif /* ASM_X86_ARCHRANDOM_H */

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,83 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
# define __ASM_FORM_RAW(x) x
# define __ASM_FORM_COMMA(x) x,
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_FORM_RAW(x) #x
# define __ASM_FORM_COMMA(x) " " #x ","
#endif
#ifdef CONFIG_X86_32
# define __ASM_SEL(a,b) __ASM_FORM(a)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
# define __ASM_SEL(a,b) __ASM_FORM(b)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
inst##q##__VA_ARGS__)
#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_MOV __ASM_SIZE(mov)
#define _ASM_INC __ASM_SIZE(inc)
#define _ASM_DEC __ASM_SIZE(dec)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
#define _ASM_SP __ASM_REG(sp)
#define _ASM_BP __ASM_REG(bp)
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . ; \
.popsection
# define _ASM_EXTABLE_EX(from,to) \
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . + 0x7ffffff0 ; \
.popsection
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
_ASM_PTR (entry); \
.popsection
#else
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .popsection\n"
# define _ASM_EXTABLE_EX(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#endif /* _ASM_X86_ASM_H */

View file

@ -0,0 +1,238 @@
#ifndef _ASM_X86_ATOMIC_H
#define _ASM_X86_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
/**
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
/**
* atomic_add_return - add integer and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
/**
* atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
static inline int atomic_xchg(atomic_t *v, int new)
{
return xchg(&v->counter, new);
}
/**
* __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
# include <asm/atomic64_64.h>
#endif
#endif /* _ASM_X86_ATOMIC_H */

View file

@ -0,0 +1,316 @@
#ifndef _ASM_X86_ATOMIC64_32_H
#define _ASM_X86_ATOMIC64_32_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
//#include <asm/cmpxchg.h>
/* An 64bit atomic type */
typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
#ifndef ATOMIC64_EXPORT
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
#else
#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
ATOMIC64_EXPORT(atomic64_##sym)
#endif
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %P[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else
#define __alternative_atomic64(f, g, out, in...) \
alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
ATOMIC64_DECL_ONE(sym##_386)
ATOMIC64_DECL_ONE(add_386);
ATOMIC64_DECL_ONE(sub_386);
ATOMIC64_DECL_ONE(inc_386);
ATOMIC64_DECL_ONE(dec_386);
#endif
#define alternative_atomic64(f, out, in...) \
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
ATOMIC64_DECL(read);
ATOMIC64_DECL(set);
ATOMIC64_DECL(xchg);
ATOMIC64_DECL(add_return);
ATOMIC64_DECL(sub_return);
ATOMIC64_DECL(inc_return);
ATOMIC64_DECL(dec_return);
ATOMIC64_DECL(dec_if_positive);
ATOMIC64_DECL(inc_not_zero);
ATOMIC64_DECL(add_unless);
#undef ATOMIC64_DECL
#undef ATOMIC64_DECL_ONE
#undef __ATOMIC64_DECL
#undef ATOMIC64_EXPORT
/**
* atomic64_cmpxchg - cmpxchg atomic64 variable
* @v: pointer to type atomic64_t
* @o: expected value
* @n: new value
*
* Atomically sets @v to @n if it was equal to @o and returns
* the old value.
*/
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
return cmpxchg64(&v->counter, o, n);
}
/**
* atomic64_xchg - xchg atomic64 variable
* @v: pointer to type atomic64_t
* @n: value to assign
*
* Atomically xchgs the value of @v to @n and returns
* the old value.
*/
static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
long long o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
alternative_atomic64(xchg, "=&A" (o),
"S" (v), "b" (low), "c" (high)
: "memory");
return o;
}
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: value to assign
*
* Atomically sets the value of @v to @n.
*/
static inline void atomic64_set(atomic64_t *v, long long i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
alternative_atomic64(set, /* no output */,
"S" (v), "b" (low), "c" (high)
: "eax", "edx", "memory");
}
/**
* atomic64_read - read atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically reads the value of @v and returns it.
*/
static inline long long atomic64_read(const atomic64_t *v)
{
long long r;
alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
return r;
}
/**
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + *@v
*/
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
alternative_atomic64(add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/*
* Other variants with different arithmetic operators:
*/
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
alternative_atomic64(sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
static inline long long atomic64_inc_return(atomic64_t *v)
{
long long a;
alternative_atomic64(inc_return, "=&A" (a),
"S" (v) : "memory", "ecx");
return a;
}
static inline long long atomic64_dec_return(atomic64_t *v)
{
long long a;
alternative_atomic64(dec_return, "=&A" (a),
"S" (v) : "memory", "ecx");
return a;
}
/**
* atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static inline long long atomic64_add(long long i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/**
* atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static inline long long atomic64_sub(long long i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
{
return atomic64_sub_return(i, v) == 0;
}
/**
* atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static inline void atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
/**
* atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
/**
* atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
return atomic64_dec_return(v) == 0;
}
/**
* atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
{
return atomic64_inc_return(v) == 0;
}
/**
* atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic64_add_negative(long long i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
}
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if the add was done, zero otherwise.
*/
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32);
alternative_atomic64(add_unless,
ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
"S" (v) : "memory");
return (int)a;
}
static inline int atomic64_inc_not_zero(atomic64_t *v)
{
int r;
alternative_atomic64(inc_not_zero, "=&a" (r),
"S" (v) : "ecx", "edx", "memory");
return r;
}
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
alternative_atomic64(dec_if_positive, "=&A" (r),
"S" (v) : "ecx", "memory");
return r;
}
#undef alternative_atomic64
#undef __alternative_atomic64
#endif /* _ASM_X86_ATOMIC64_32_H */

View file

@ -0,0 +1,223 @@
#ifndef _ASM_X86_ATOMIC64_64_H
#define _ASM_X86_ATOMIC64_64_H
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
/* The 64-bit atomic type */
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
static inline long atomic64_read(const atomic64_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic64_set(atomic64_t *v, long i)
{
v->counter = i;
}
/**
* atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static inline void atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
/**
* atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
}
/**
* atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static inline void atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
}
/**
* atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
}
/**
* atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
}
/**
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline long atomic64_add_return(long i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
{
return atomic64_add_return(-i, v);
}
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return cmpxchg(&v->counter, old, new);
}
static inline long atomic64_xchg(atomic64_t *v, long new)
{
return xchg(&v->counter, new);
}
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long c, old, dec;
c = atomic64_read(v);
for (;;) {
dec = c - 1;
if (unlikely(dec < 0))
break;
old = atomic64_cmpxchg((v), c, dec);
if (likely(old == c))
break;
c = old;
}
return dec;
}
#endif /* _ASM_X86_ATOMIC64_64_H */

View file

@ -0,0 +1,157 @@
#ifndef _ASM_X86_BARRIER_H
#define _ASM_X86_BARRIER_H
#include <asm/alternative.h>
#include <asm/nops.h>
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For this option x86 doesn't have a strong TSO memory
* model and we should fall back to full barriers.
*/
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#else /* regular x86 TSO memory ordering */
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static __always_inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif /* _ASM_X86_BARRIER_H */

View file

@ -0,0 +1,60 @@
#ifndef _ASM_X86_BIOS_EBDA_H
#define _ASM_X86_BIOS_EBDA_H
#include <asm/io.h>
/*
* Returns physical address of EBDA. Returns 0 if there is no EBDA.
*/
static inline unsigned int get_bios_ebda(void)
{
/*
* There is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E.
*/
unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
address <<= 4;
return address; /* 0 means none */
}
/*
* Return the sanitized length of the EBDA in bytes, if it exists.
*/
static inline unsigned int get_bios_ebda_length(void)
{
unsigned int address;
unsigned int length;
address = get_bios_ebda();
if (!address)
return 0;
/* EBDA length is byte 0 of the EBDA (stored in KiB) */
length = *(unsigned char *)phys_to_virt(address);
length <<= 10;
/* Trim the length if it extends beyond 640KiB */
length = min_t(unsigned int, (640 * 1024) - address, length);
return length;
}
void reserve_ebda_region(void);
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
/*
* This is obviously not a great place for this, but we want to be
* able to scatter it around anywhere in the kernel.
*/
void check_for_bios_corruption(void);
void start_periodic_check_for_corruption(void);
#else
static inline void check_for_bios_corruption(void)
{
}
static inline void start_periodic_check_for_corruption(void)
{
}
#endif
#endif /* _ASM_X86_BIOS_EBDA_H */

View file

@ -0,0 +1,509 @@
#ifndef _ASM_X86_BITOPS_H
#define _ASM_X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/alternative.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>
#if BITS_PER_LONG == 32
# define _BITOPS_LONG_SHIFT 5
#elif BITS_PER_LONG == 64
# define _BITOPS_LONG_SHIFT 6
#else
# error "Unexpected BITS_PER_LONG"
#endif
#define BIT_64(n) (U64_C(1) << (n))
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
#define ADDR BITOP_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __always_inline void
set_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr))
: "memory");
} else {
asm volatile(LOCK_PREFIX "bts %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
}
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static __always_inline void
clear_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX "btr %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
}
}
/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX "btc %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
}
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on x86.
*/
static __always_inline int
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
asm("bts %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
* KVM relies on this behaviour on x86 for modifying memory that is also
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btr %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btc %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr) : "memory");
return oldbit;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{
int oldbit;
asm volatile("bt %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
#define test_bit(nr, addr) \
(__builtin_constant_p((nr)) \
? constant_test_bit((nr), (addr)) \
: variable_test_bit((nr), (addr)))
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline unsigned long ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
/*
* __fls: find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
#undef ADDR
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
{
int r;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsfl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static inline int fls(int x)
{
int r;
#ifdef CONFIG_X86_64
/*
* AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before, except that the
* top 32 bits will be cleared.
*
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
asm("bsrl %1,%0"
: "=r" (r)
: "rm" (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
#else
asm("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#ifdef CONFIG_X86_64
static __always_inline int fls64(__u64 x)
{
int bitpos = -1;
/*
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before.
*/
asm("bsrq %1,%q0"
: "+r" (bitpos)
: "rm" (x));
return bitpos + 1;
}
#else
#include <asm-generic/bitops/fls64.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */

View file

@ -0,0 +1,40 @@
#ifndef _ASM_X86_BOOT_H
#define _ASM_X86_BOOT_H
#include <asm/pgtable_types.h>
#include <uapi/asm/boot.h>
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
/* Minimum kernel alignment, as a power of two */
#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
#else
#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
#endif
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
(CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#define BOOT_HEAP_SIZE 0x400000
#else /* !CONFIG_KERNEL_BZIP2 */
#define BOOT_HEAP_SIZE 0x8000
#endif /* !CONFIG_KERNEL_BZIP2 */
#ifdef CONFIG_X86_64
#define BOOT_STACK_SIZE 0x4000
#else
#define BOOT_STACK_SIZE 0x1000
#endif
#endif /* _ASM_X86_BOOT_H */

View file

@ -0,0 +1,54 @@
#ifndef _ASM_X86_BOOTPARAM_UTILS_H
#define _ASM_X86_BOOTPARAM_UTILS_H
#include <asm/bootparam.h>
/*
* This file is included from multiple environments. Do not
* add completing #includes to make it standalone.
*/
/*
* Deal with bootloaders which fail to initialize unknown fields in
* boot_params to zero. The list fields in this list are taken from
* analysis of kexec-tools; if other broken bootloaders initialize a
* different set of fields we will need to figure out how to disambiguate.
*
* Note: efi_info is commonly left uninitialized, but that field has a
* private magic, so it is better to leave it unchanged.
*/
static void sanitize_boot_params(struct boot_params *boot_params)
{
/*
* IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
* this field. The purpose of this field is to guarantee
* compliance with the x86 boot spec located in
* Documentation/x86/boot.txt . That spec says that the
* *whole* structure should be cleared, after which only the
* portion defined by struct setup_header (boot_params->hdr)
* should be copied in.
*
* If you're having an issue because the sentinel is set, you
* need to change the whole structure to be cleared, not this
* (or any other) individual field, or you will soon have
* problems again.
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
(char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
memset(&boot_params->_pad7[0], 0,
(char *)&boot_params->edd_mbr_sig_buffer[0] -
(char *)&boot_params->_pad7[0]);
memset(&boot_params->_pad8[0], 0,
(char *)&boot_params->eddbuf[0] -
(char *)&boot_params->_pad8[0]);
memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
}
}
#endif /* _ASM_X86_BOOTPARAM_UTILS_H */

View file

@ -0,0 +1,37 @@
#ifndef _ASM_X86_BUG_H
#define _ASM_X86_BUG_H
#define HAVE_ARCH_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifdef CONFIG_X86_32
# define __BUG_C0 "2:\t.long 1b, %c0\n"
#else
# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n"
#endif
#define BUG() \
do { \
asm volatile("1:\tud2\n" \
".pushsection __bug_table,\"a\"\n" \
__BUG_C0 \
"\t.word %c1, 0\n" \
"\t.org 2b+%c2\n" \
".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (sizeof(struct bug_entry))); \
unreachable(); \
} while (0)
#else
#define BUG() \
do { \
asm volatile("ud2"); \
unreachable(); \
} while (0)
#endif
#include <asm-generic/bug.h>
#endif /* _ASM_X86_BUG_H */

View file

@ -0,0 +1,12 @@
#ifndef _ASM_X86_BUGS_H
#define _ASM_X86_BUGS_H
extern void check_bugs(void);
#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
int ppro_with_ram_bug(void);
#else
static inline int ppro_with_ram_bug(void) { return 0; }
#endif
#endif /* _ASM_X86_BUGS_H */

View file

@ -0,0 +1,23 @@
#ifndef _ASM_X86_CACHE_H
#define _ASM_X86_CACHE_H
#include <linux/linkage.h>
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
#ifdef CONFIG_X86_VSMP
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp \
__attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \
__page_aligned_data
#endif
#endif
#endif /* _ASM_X86_CACHE_H */

View file

@ -0,0 +1,166 @@
#ifndef _ASM_X86_CACHEFLUSH_H
#define _ASM_X86_CACHEFLUSH_H
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
#ifdef CONFIG_X86_PAT
/*
* X86 PAT uses page flags WC and Uncached together to keep track of
* memory type of pages that have backing page struct. X86 PAT supports 3
* different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
* been changed from its default (value of -1 used to denote this).
* Note we do not support _PAGE_CACHE_UC here.
*/
#define _PGMT_DEFAULT 0
#define _PGMT_WC (1UL << PG_arch_1)
#define _PGMT_UC_MINUS (1UL << PG_uncached)
#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
static inline unsigned long get_page_memtype(struct page *pg)
{
unsigned long pg_flags = pg->flags & _PGMT_MASK;
if (pg_flags == _PGMT_DEFAULT)
return -1;
else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
else if (pg_flags == _PGMT_UC_MINUS)
return _PAGE_CACHE_UC_MINUS;
else
return _PAGE_CACHE_WB;
}
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
{
unsigned long memtype_flags = _PGMT_DEFAULT;
unsigned long old_flags;
unsigned long new_flags;
switch (memtype) {
case _PAGE_CACHE_WC:
memtype_flags = _PGMT_WC;
break;
case _PAGE_CACHE_UC_MINUS:
memtype_flags = _PGMT_UC_MINUS;
break;
case _PAGE_CACHE_WB:
memtype_flags = _PGMT_WB;
break;
}
do {
old_flags = pg->flags;
new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
#endif
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteBack
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
*
* Within a category, the attributes are mutually exclusive.
*
* The implementation of this API will take care of various aspects that
* are associated with changing such attributes, such as:
* - Flushing TLBs
* - Flushing CPU caches
* - Making sure aliases of the memory behind the mapping don't violate
* coherency rules as defined by the CPU in the system.
*
* What this API does not do:
* - Provide exclusion between various callers - including callers that
* operation on other mappings of the same physical page
* - Restore default attributes when a page is freed
* - Guarantee that mappings other than the requested one are
* in any state, other than that these do not violate rules for
* the CPU you have. Do not depend on any effects on other mappings,
* CPUs other than the one you have may have more relaxed rules.
* The caller is required to take care of these.
*/
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
* These functions operate ONLY on the 1:1 kernel mapping of the
* memory that the struct page represents, and internally just
* call the set_memory_* function. See the description of the
* set_memory_* function for more details on conventions.
*
* These APIs should be considered *deprecated* and are likely going to
* be removed in the future.
* The reason for this is the implicit operation on the 1:1 mapping only,
* making this not a generally useful API.
*
* Specifically, many users of the old APIs had a virtual address,
* called virt_to_page() or vmalloc_to_page() on that address to
* get a struct page* that the old API required.
* To convert these cases, use set_memory_*() on the original
* virtual address, do not use these functions.
*/
int set_pages_uc(struct page *page, int numpages);
int set_pages_wb(struct page *page, int numpages);
int set_pages_x(struct page *page, int numpages);
int set_pages_nx(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);
void clflush_cache_range(void *addr, unsigned int size);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#else
static inline void set_kernel_text_rw(void) { }
static inline void set_kernel_text_ro(void) { }
#endif
#ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void);
#else
static inline int rodata_test(void)
{
return 0;
}
#endif
#endif /* _ASM_X86_CACHEFLUSH_H */

View file

@ -0,0 +1,70 @@
/*
* Derived from include/asm-powerpc/iommu.h
*
* Copyright IBM Corporation, 2006-2007
*
* Author: Jon Mason <jdmason@us.ibm.com>
* Author: Muli Ben-Yehuda <muli@il.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_CALGARY_H
#define _ASM_X86_CALGARY_H
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <asm/types.h>
struct iommu_table {
struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
unsigned long it_base; /* mapped address of tce table */
unsigned long it_hint; /* Hint for next alloc */
unsigned long *it_map; /* A simple allocation bitmap for now */
void __iomem *bbar; /* Bridge BAR */
u64 tar_val; /* Table Address Register */
struct timer_list watchdog_timer;
spinlock_t it_lock; /* Protects it_map */
unsigned int it_size; /* Size of iommu table in entries */
unsigned char it_busno; /* Bus number this table belongs to */
};
struct cal_chipset_ops {
void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
void (*tce_cache_blast)(struct iommu_table *tbl);
void (*dump_error_regs)(struct iommu_table *tbl);
};
#define TCE_TABLE_SIZE_UNSPECIFIED ~0
#define TCE_TABLE_SIZE_64K 0
#define TCE_TABLE_SIZE_128K 1
#define TCE_TABLE_SIZE_256K 2
#define TCE_TABLE_SIZE_512K 3
#define TCE_TABLE_SIZE_1M 4
#define TCE_TABLE_SIZE_2M 5
#define TCE_TABLE_SIZE_4M 6
#define TCE_TABLE_SIZE_8M 7
extern int use_calgary;
#ifdef CONFIG_CALGARY_IOMMU
extern int detect_calgary(void);
#else
static inline int detect_calgary(void) { return -ENODEV; }
#endif
#endif /* _ASM_X86_CALGARY_H */

View file

@ -0,0 +1,248 @@
/*
x86 function call convention, 64-bit:
-------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
---------------------------------------------------------------------------
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
functions when it sees tail-call optimization possibilities) rflags is
clobbered. Leftover arguments are passed over the stack frame.)
[*] In the frame-pointers case rbp is fixed to the stack frame.
[**] for struct return values wider than 64 bits the return convention is a
bit more complex: up to 128 bits width we return small structures
straight in rax, rdx. For structures larger than that (3 words or
larger) the caller puts a pointer to an on-stack return struct
[allocated in the caller's stack frame] into the first argument - i.e.
into rdi. All other arguments shift up by one in this case.
Fortunately this case is rare in the kernel.
For 32-bit we have the following conventions - kernel is built with
-mregparm=3 and -freg-struct-return:
x86 function calling convention, 32-bit:
----------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
-------------------------------------------------------------------------
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
( here too esp is obviously invariant across normal function calls. eflags
is clobbered. Leftover arguments are passed over the stack frame. )
[*] In the frame-pointers case ebp is fixed to the stack frame.
[**] We build with -freg-struct-return, which on 32-bit means similar
semantics as on 64-bit: edx can be used for a second return value
(i.e. covering integer and structure sizes up to 64 bits) - after that
it gets more complex and more expensive: 3-word or larger struct returns
get done in the caller's frame and the pointer to the return struct goes
into regparm0, i.e. eax - the other arguments shift up and the
function's register parameters degenerate to regparm=2 in essence.
*/
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64
/*
* 64-bit system call stack frame layout defines and helpers,
* for assembly code:
*/
#define R15 0
#define R14 8
#define R13 16
#define R12 24
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save up to here: */
#define R11 48
#define R10 56
#define R9 64
#define R8 72
#define RAX 80
#define RCX 88
#define RDX 96
#define RSI 104
#define RDI 112
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall: */
#define RIP 128
#define CS 136
#define EFLAGS 144
#define RSP 152
#define SS 160
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
subq $9*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip
movq_cfi rdi, 8*8
movq_cfi rsi, 7*8
movq_cfi rdx, 6*8
.if \save_rcx
movq_cfi rcx, 5*8
.endif
.if \rax_enosys
movq $-ENOSYS, 4*8(%rsp)
.else
movq_cfi rax, 4*8
.endif
.if \save_r891011
movq_cfi r8, 3*8
movq_cfi r9, 2*8
movq_cfi r10, 1*8
movq_cfi r11, 0*8
.endif
.endm
#define ARG_SKIP (9*8)
.macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
rstor_r8910=1, rstor_rdx=1
.if \rstor_r11
movq_cfi_restore 0*8, r11
.endif
.if \rstor_r8910
movq_cfi_restore 1*8, r10
movq_cfi_restore 2*8, r9
movq_cfi_restore 3*8, r8
.endif
.if \rstor_rax
movq_cfi_restore 4*8, rax
.endif
.if \rstor_rcx
movq_cfi_restore 5*8, rcx
.endif
.if \rstor_rdx
movq_cfi_restore 6*8, rdx
.endif
movq_cfi_restore 7*8, rsi
movq_cfi_restore 8*8, rdi
.if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
.endif
.endm
.macro LOAD_ARGS offset, skiprax=0
movq \offset(%rsp), %r11
movq \offset+8(%rsp), %r10
movq \offset+16(%rsp), %r9
movq \offset+24(%rsp), %r8
movq \offset+40(%rsp), %rcx
movq \offset+48(%rsp), %rdx
movq \offset+56(%rsp), %rsi
movq \offset+64(%rsp), %rdi
.if \skiprax
.else
movq \offset+72(%rsp), %rax
.endif
.endm
#define REST_SKIP (6*8)
.macro SAVE_REST
subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq_cfi rbx, 5*8
movq_cfi rbp, 4*8
movq_cfi r12, 3*8
movq_cfi r13, 2*8
movq_cfi r14, 1*8
movq_cfi r15, 0*8
.endm
.macro RESTORE_REST
movq_cfi_restore 0*8, r15
movq_cfi_restore 1*8, r14
movq_cfi_restore 2*8, r13
movq_cfi_restore 3*8, r12
movq_cfi_restore 4*8, rbp
movq_cfi_restore 5*8, rbx
addq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm
.macro SAVE_ALL
SAVE_ARGS
SAVE_REST
.endm
.macro RESTORE_ALL addskip=0
RESTORE_REST
RESTORE_ARGS 1, \addskip
.endm
.macro icebp
.byte 0xf1
.endm
#else /* CONFIG_X86_64 */
/*
* For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
* are different from the entry_32.S versions in not changing the segment
* registers. So only suitable for in kernel use, not when transitioning
* from or to user space. The resulting stack frame is not a standard
* pt_regs frame. The main use case is calling C code from assembler
* when all the registers need to be preserved.
*/
.macro SAVE_ALL
pushl_cfi %eax
CFI_REL_OFFSET eax, 0
pushl_cfi %ebp
CFI_REL_OFFSET ebp, 0
pushl_cfi %edi
CFI_REL_OFFSET edi, 0
pushl_cfi %esi
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
.endm
.macro RESTORE_ALL
popl_cfi %ebx
CFI_RESTORE ebx
popl_cfi %ecx
CFI_RESTORE ecx
popl_cfi %edx
CFI_RESTORE edx
popl_cfi %esi
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
.endm
#endif /* CONFIG_X86_64 */

View file

@ -0,0 +1,6 @@
#ifndef _ASM_CE4100_H_
#define _ASM_CE4100_H_
int ce4100_pci_init(void);
#endif

View file

@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include <asm/checksum_32.h>
#else
# include <asm/checksum_64.h>
#endif

View file

@ -0,0 +1,202 @@
#ifndef _ASM_X86_CHECKSUM_32_H
#define _ASM_X86_CHECKSUM_32_H
#include <linux/in6.h>
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline __wsum csum_partial_copy_from_user(const void __user *src,
void *dst,
int len, __wsum sum,
int *err_ptr)
{
__wsum ret;
might_sleep();
stac();
ret = csum_partial_copy_generic((__force void *)src, dst,
len, sum, err_ptr, NULL);
clac();
return ret;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
asm volatile("movl (%1), %0 ;\n"
"subl $4, %2 ;\n"
"jbe 2f ;\n"
"addl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0;\n"
"1: adcl 16(%1), %0 ;\n"
"lea 4(%1), %1 ;\n"
"decl %2 ;\n"
"jne 1b ;\n"
"adcl $0, %0 ;\n"
"movl %0, %2 ;\n"
"shrl $16, %0 ;\n"
"addw %w2, %w0 ;\n"
"adcl $0, %0 ;\n"
"notl %0 ;\n"
"2: ;\n"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
asm("addl %1, %0 ;\n"
"adcl $0xffff, %0 ;\n"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
asm("addl %1, %0 ;\n"
"adcl %2, %0 ;\n"
"adcl %3, %0 ;\n"
"adcl $0, %0 ;\n"
: "=r" (sum)
: "g" (daddr), "g"(saddr),
"g" ((len + proto) << 8), "0" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
asm("addl 0(%1), %0 ;\n"
"adcl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"adcl 0(%2), %0 ;\n"
"adcl 4(%2), %0 ;\n"
"adcl 8(%2), %0 ;\n"
"adcl 12(%2), %0 ;\n"
"adcl %3, %0 ;\n"
"adcl %4, %0 ;\n"
"adcl $0, %0 ;\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "memory");
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
__wsum ret;
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len)) {
stac();
ret = csum_partial_copy_generic(src, (__force void *)dst,
len, sum, NULL, err_ptr);
clac();
return ret;
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif /* _ASM_X86_CHECKSUM_32_H */

View file

@ -0,0 +1,198 @@
#ifndef _ASM_X86_CHECKSUM_64_H
#define _ASM_X86_CHECKSUM_64_H
/*
* Checksums for x86-64
* Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-x86/checksum.h
*/
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
*
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline __sum16 csum_fold(__wsum sum)
{
asm(" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
/**
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
asm(" movl (%1), %0\n"
" subl $4, %2\n"
" jbe 2f\n"
" addl 4(%1), %0\n"
" adcl 8(%1), %0\n"
" adcl 12(%1), %0\n"
"1: adcl 16(%1), %0\n"
" lea 4(%1), %1\n"
" decl %2\n"
" jne 1b\n"
" adcl $0, %0\n"
" movl %0, %2\n"
" shrl $16, %0\n"
" addw %w2, %w0\n"
" adcl $0, %0\n"
" notl %0\n"
"2:"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
asm(" addl %1, %0\n"
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum)
: "g" (daddr), "g" (saddr),
"g" ((len + proto)<<8), "0" (sum));
return sum;
}
/**
* csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/**
* csum_partial - Compute an internet checksum.
* @buff: buffer to be checksummed
* @len: length of buffer.
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1
/* Do not call this directly. Use the wrappers below */
extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user
/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
*
* Returns the 16bit folded/inverted checksum of the passed buffer.
* Ready to fill in.
*/
extern __sum16 ip_compute_csum(const void *buff, int len);
/**
* csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: protocol of packet
* @sum: initial sum (32bit unfolded) to be added in
*
* Computes an IPv6 pseudo header checksum. This sum is added the checksum
* into UDP/TCP packets and contains some link layer information.
* Returns the unfolded 32bit checksum.
*/
struct in6_addr;
#define _HAVE_ARCH_IPV6_CSUM 1
extern __sum16
csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
__u32 len, unsigned short proto, __wsum sum);
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
: "0" (a), "rm" (b));
return a;
}
#define HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
{
return (__force __wsum)add32_with_carry((__force unsigned)csum,
(__force unsigned)addend);
}
#endif /* _ASM_X86_CHECKSUM_64_H */

View file

@ -0,0 +1,15 @@
/* x86-specific clocksource additions */
#ifndef _ASM_X86_CLOCKSOURCE_H
#define _ASM_X86_CLOCKSOURCE_H
#define VCLOCK_NONE 0 /* No vDSO clock available. */
#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
struct arch_clocksource_data {
int vclock_mode;
};
#endif /* _ASM_X86_CLOCKSOURCE_H */

View file

@ -0,0 +1,6 @@
#ifndef _ASM_X86_CMDLINE_H
#define _ASM_X86_CMDLINE_H
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
#endif /* _ASM_X86_CMDLINE_H */

View file

@ -0,0 +1,233 @@
#ifndef ASM_X86_CMPXCHG_H
#define ASM_X86_CMPXCHG_H
#include <linux/compiler.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
#define __HAVE_ARCH_CMPXCHG 1
/*
* Non-existant functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
*/
extern void __xchg_wrong_size(void)
__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
__compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
__compiletime_error("Bad argument size for add");
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
* -1 because sizeof will never return -1, thereby making those switch
* case statements guaranteeed dead code which the compiler will
* eliminate, and allowing the "missing symbol in the default case" to
* indicate a usage error.
*/
#define __X86_CASE_B 1
#define __X86_CASE_W 2
#define __X86_CASE_L 4
#ifdef CONFIG_64BIT
#define __X86_CASE_Q 8
#else
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
#endif
/*
* An exchange-type operation, which takes a value and a pointer, and
* returns the old value.
*/
#define __xchg_op(ptr, arg, op, lock) \
({ \
__typeof__ (*(ptr)) __ret = (arg); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock #op "b %b0, %1\n" \
: "+q" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_W: \
asm volatile (lock #op "w %w0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_L: \
asm volatile (lock #op "l %0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_Q: \
asm volatile (lock #op "q %q0, %1\n" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
default: \
__ ## op ## _wrong_size(); \
} \
__ret; \
})
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Since this is generally used to protect other memory information, we
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __raw_cmpxchg(ptr, old, new, size, lock) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case __X86_CASE_B: \
{ \
volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile(lock "cmpxchgb %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "q" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_W: \
{ \
volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile(lock "cmpxchgw %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_L: \
{ \
volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile(lock "cmpxchgl %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
case __X86_CASE_Q: \
{ \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
asm volatile(lock "cmpxchgq %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})
#define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
#define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "")
#ifdef CONFIG_X86_32
# include <asm/cmpxchg_32.h>
#else
# include <asm/cmpxchg_64.h>
#endif
#define cmpxchg(ptr, old, new) \
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define cmpxchg_local(ptr, old, new) \
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
/*
* xadd() adds "inc" to "*ptr" and atomically returns the previous
* value of "*ptr".
*
* xadd() is locked when multiple CPUs are online
* xadd_sync() is always locked
* xadd_local() is never locked
*/
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
#define __add(ptr, inc, lock) \
({ \
__typeof__ (*(ptr)) __ret = (inc); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock "addb %b1, %0\n" \
: "+m" (*(ptr)) : "qi" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_W: \
asm volatile (lock "addw %w1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_L: \
asm volatile (lock "addl %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_Q: \
asm volatile (lock "addq %1, %0\n" \
: "+m" (*(ptr)) : "ri" (inc) \
: "memory", "cc"); \
break; \
default: \
__add_wrong_size(); \
} \
__ret; \
})
/*
* add_*() adds "inc" to "*ptr"
*
* __add() takes a lock prefix
* add_smp() is locked when multiple CPUs are online
* add_sync() is always locked
*/
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
({ \
bool __ret; \
__typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
__typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
: "=a" (__ret), "+d" (__old2), \
"+m" (*(p1)), "+m" (*(p2)) \
: "i" (2 * sizeof(long)), "a" (__old1), \
"b" (__new1), "c" (__new2)); \
__ret; \
})
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
#endif /* ASM_X86_CMPXCHG_H */

View file

@ -0,0 +1,114 @@
#ifndef _ASM_X86_CMPXCHG_32_H
#define _ASM_X86_CMPXCHG_32_H
/*
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
* you need to test for the feature in boot_cpu_data.
*/
/*
* CMPXCHG8B only writes to the target if we had the previous
* value in registers, otherwise it acts as a read and gives us the
* "new previous" value. That is why there is a loop. Preloading
* EDX:EAX is a performance optimization: in the common case it means
* we need only one locked operation.
*
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
* least an FPU save and/or %cr0.ts manipulation.
*
* cmpxchg8b must be used with the lock prefix here to allow the
* instruction to be executed atomically. We need to have the reader
* side to see the coherent 64bit value.
*/
static inline void set_64bit(volatile u64 *ptr, u64 value)
{
u32 low = value;
u32 high = value >> 32;
u64 prev = *ptr;
asm volatile("\n1:\t"
LOCK_PREFIX "cmpxchg8b %0\n\t"
"jnz 1b"
: "=m" (*ptr), "+A" (prev)
: "b" (low), "c" (high)
: "memory");
}
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#endif
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
u64 prev;
asm volatile(LOCK_PREFIX "cmpxchg8b %1"
: "=A" (prev),
"+m" (*ptr)
: "b" ((u32)new),
"c" ((u32)(new >> 32)),
"0" (old)
: "memory");
return prev;
}
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
u64 prev;
asm volatile("cmpxchg8b %1"
: "=A" (prev),
"+m" (*ptr)
: "b" ((u32)new),
"c" ((u32)(new >> 32)),
"0" (old)
: "memory");
return prev;
}
#ifndef CONFIG_X86_CMPXCHG64
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io(LOCK_PREFIX_HERE \
"call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
__ret; })
#define cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io("call cmpxchg8b_emu", \
"cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
__ret; })
#endif
#define system_has_cmpxchg_double() cpu_has_cx8
#endif /* _ASM_X86_CMPXCHG_32_H */

View file

@ -0,0 +1,23 @@
#ifndef _ASM_X86_CMPXCHG_64_H
#define _ASM_X86_CMPXCHG_64_H
static inline void set_64bit(volatile u64 *ptr, u64 val)
{
*ptr = val;
}
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#define system_has_cmpxchg_double() cpu_has_cx16
#endif /* _ASM_X86_CMPXCHG_64_H */

View file

@ -0,0 +1,324 @@
#ifndef _ASM_X86_COMPAT_H
#define _ASM_X86_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/user32.h>
#include <asm/unistd.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_timer_t;
typedef s32 compat_key_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 __attribute__((aligned(4))) compat_u64;
typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_flags;
int f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
} compat_sigval_t;
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[128/sizeof(int) - 3];
/* kill() */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
int _overrun_incr; /* amount to add to overrun */
} _timer;
/* POSIX.1b signals */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
unsigned int _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGCHLD (x32 version) */
struct {
unsigned int _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_s64 _utime;
compat_s64 _stime;
} _sigchld_x32;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
struct {
unsigned int _call_addr; /* calling insn */
int _syscall; /* triggering system call number */
unsigned int _arch; /* AUDIT_ARCH_* of syscall */
} _sigsys;
} _sifields;
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
#ifdef CONFIG_X86_X32_ABI
typedef struct user_regs_struct compat_elf_gregset_t;
#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
#define SET_PR_FPVALID(S,V) \
do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
while (0)
#define COMPAT_USE_64BIT_TIME \
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
#else
typedef struct user_regs_struct32 compat_elf_gregset_t;
#endif
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static inline void __user *arch_compat_alloc_user_space(long len)
{
compat_uptr_t sp;
if (test_thread_flag(TIF_IA32)) {
sp = task_pt_regs(current)->sp;
} else {
/* -128 for the x32 ABI redzone */
sp = this_cpu_read(old_rsp) - 128;
}
return (void __user *)round_down(sp - len, 16);
}
static inline bool is_x32_task(void)
{
#ifdef CONFIG_X86_X32_ABI
if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
return true;
#endif
return false;
}
static inline bool is_compat_task(void)
{
return is_ia32_task() || is_x32_task();
}
#endif /* _ASM_X86_COMPAT_H */

View file

@ -0,0 +1,10 @@
#ifndef _ASM_X86_CONTEXT_TRACKING_H
#define _ASM_X86_CONTEXT_TRACKING_H
#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER call schedule_user
#else
# define SCHEDULE_USER call schedule
#endif
#endif

View file

@ -0,0 +1,41 @@
#ifndef _ASM_X86_CPU_H
#define _ASM_X86_CPU_H
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
#ifdef CONFIG_SMP
extern void prefill_possible_map(void);
#else /* CONFIG_SMP */
static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
struct x86_cpu {
struct cpu cpu;
};
#ifdef CONFIG_HOTPLUG_CPU
extern int arch_register_cpu(int num);
extern void arch_unregister_cpu(int);
extern void start_cpu0(void);
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
extern int _debug_hotplug_cpu(int cpu, int action);
#endif
#endif
DECLARE_PER_CPU(int, cpu_state);
int mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */

View file

@ -0,0 +1,13 @@
#ifndef _CPU_DEVICE_ID
#define _CPU_DEVICE_ID 1
/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
*/
#include <linux/mod_devicetable.h>
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
#endif

View file

@ -0,0 +1,581 @@
/*
* Defines x86 CPU feature bits
*/
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
#ifndef _ASM_X86_REQUIRED_FEATURES_H
#include <asm/required-features.h>
#endif
#ifndef _ASM_X86_DISABLED_FEATURES_H
#include <asm/disabled-features.h>
#endif
#define NCAPINTS 11 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7
*/
#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <asm/asm.h>
#include <linux/bitops.h>
#ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
#define X86_CAP_FMT "%s"
#define x86_cap_flag(flag) x86_cap_flags[flag]
#else
#define X86_CAP_FMT "%d:%d"
#define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31)
#endif
/*
* In order to save room, we index into this array by doing
* X86_BUG_<name> - NCAPINTS*32.
*/
extern const char * const x86_bug_flags[NBUGINTS*32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
#define REQUIRED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
#define DISABLED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
(((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
(((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
test_cpu_cap(c, bit))
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel
* infrastructure to be used. It may *not* directly test the CPU
* itself. Use the cpu_has() family if you want true runtime
* testing of CPU features, like in hypervisor code where you are
* supporting a possible guest feature where host support for it
* is not relevant.
*/
#define cpu_feature_enabled(bit) \
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
cpu_has(&boot_cpu_data, bit))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
#define setup_clear_cpu_cap(bit) do { \
clear_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_cleared); \
} while (0)
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_set); \
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
* patch the target code for additional performance.
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
/*
* Catch too early usage of this before alternatives
* have run.
*/
asm_volatile_goto("1: jmp %l[t_warn]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
#endif
asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
return true;
t_no:
return false;
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
warn_pre_alternatives();
return false;
#endif
#else /* CC_HAVE_ASM_GOTO */
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 3f - .\n"
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $1,%0\n"
"4:\n"
".previous\n"
: "=qm" (flag) : "i" (bit));
return flag;
#endif /* CC_HAVE_ASM_GOTO */
}
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
__builtin_constant_p(bit) ? \
__static_cpu_has(bit) : \
boot_cpu_has(bit) \
)
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
/*
* We need to spell the jumps to the compiler because, depending on the offset,
* the replacement jump can be bigger than the original jump, and this we cannot
* have. Thus, we force the jump to the widest, 4-byte, signed relative
* offset even though the last would often fit in less bytes.
*/
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */
" .word %P1\n" /* always replace */
" .byte 2b - 1b\n" /* src len */
" .byte 4f - 3f\n" /* repl len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: .byte 0xe9\n .long %l[t_no] - 2b\n"
"4:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
return true;
t_no:
return false;
t_dynamic:
return __static_cpu_has_safe(bit);
#else
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $2,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $0,%0\n"
"4:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 5f - .\n" /* repl offset */
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"5: movb $1,%0\n"
"6:\n"
".previous\n"
: "=qm" (flag)
: "i" (bit), "i" (X86_FEATURE_ALWAYS));
return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
#endif /* CC_HAVE_ASM_GOTO */
}
#define static_cpu_has_safe(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
_static_cpu_has_safe(bit) \
)
#else
/*
* gcc 3.x is too stupid to do the static test; fall back to dynamic.
*/
#define static_cpu_has(bit) boot_cpu_has(bit)
#define static_cpu_has_safe(bit) boot_cpu_has(bit)
#endif
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
#define static_cpu_has_bug(bit) static_cpu_has((bit))
#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
#define MAX_CPU_FEATURES (NCAPINTS * 32)
#define cpu_have_feature boot_cpu_has
#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */

View file

@ -0,0 +1,14 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
extern void setup_cpu_local_masks(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */

View file

@ -0,0 +1,9 @@
#ifndef _ASM_X86_CRASH_H
#define _ASM_X86_CRASH_H
int crash_load_segments(struct kimage *image);
int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
#endif /* _ASM_X86_CRASH_H */

View file

@ -0,0 +1,11 @@
#ifndef ASM_X86_AES_H
#define ASM_X86_AES_H
#include <linux/crypto.h>
#include <crypto/aes.h>
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
const u8 *src);
void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
const u8 *src);
#endif

View file

@ -0,0 +1,101 @@
#ifndef ASM_X86_CAMELLIA_H
#define ASM_X86_CAMELLIA_H
#include <linux/kernel.h>
#include <linux/crypto.h>
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
#define CAMELLIA_BLOCK_SIZE 16
#define CAMELLIA_TABLE_BYTE_LEN 272
#define CAMELLIA_PARALLEL_BLOCKS 2
struct camellia_ctx {
u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
u32 key_length;
};
struct camellia_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct camellia_ctx camellia_ctx;
};
struct camellia_xts_ctx {
struct camellia_ctx tweak_ctx;
struct camellia_ctx crypt_ctx;
};
extern int __camellia_setkey(struct camellia_ctx *cctx,
const unsigned char *key,
unsigned int key_len, u32 *flags);
extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
/* 2-way parallel cipher functions */
asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
/* 16-way parallel cipher functions (avx/aes-ni) */
asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk(ctx, dst, src, false);
}
static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk(ctx, dst, src, true);
}
static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk_2way(ctx, dst, src, false);
}
static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src)
{
__camellia_enc_blk_2way(ctx, dst, src, true);
}
/* glue helpers */
extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
#endif /* ASM_X86_CAMELLIA_H */

View file

@ -0,0 +1,145 @@
/*
* Shared glue code for 128bit block ciphers
*/
#ifndef _CRYPTO_GLUE_HELPER_H
#define _CRYPTO_GLUE_HELPER_H
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <asm/i387.h>
#include <crypto/b128ops.h>
typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
struct common_glue_func_entry {
unsigned int num_blocks; /* number of blocks that @fn will process */
union {
common_glue_func_t ecb;
common_glue_cbc_func_t cbc;
common_glue_ctr_func_t ctr;
common_glue_xts_func_t xts;
} fn_u;
};
struct common_glue_ctx {
unsigned int num_funcs;
int fpu_blocks_limit; /* -1 means fpu not needed at all */
/*
* First funcs entry must have largest num_blocks and last funcs entry
* must have num_blocks == 1!
*/
struct common_glue_func_entry funcs[];
};
static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
struct blkcipher_desc *desc,
bool fpu_enabled, unsigned int nbytes)
{
if (likely(fpu_blocks_limit < 0))
return false;
if (fpu_enabled)
return true;
/*
* Vector-registers are only used when chunk to be processed is large
* enough, so do not enable FPU until it is necessary.
*/
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
return false;
if (desc) {
/* prevent sleeping if FPU is in use */
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
}
kernel_fpu_begin();
return true;
}
static inline void glue_fpu_end(bool fpu_enabled)
{
if (fpu_enabled)
kernel_fpu_end();
}
static inline void le128_to_be128(be128 *dst, const le128 *src)
{
dst->a = cpu_to_be64(le64_to_cpu(src->a));
dst->b = cpu_to_be64(le64_to_cpu(src->b));
}
static inline void be128_to_le128(le128 *dst, const be128 *src)
{
dst->a = cpu_to_le64(be64_to_cpu(src->a));
dst->b = cpu_to_le64(be64_to_cpu(src->b));
}
static inline void le128_inc(le128 *i)
{
u64 a = le64_to_cpu(i->a);
u64 b = le64_to_cpu(i->b);
b++;
if (!b)
a++;
i->a = cpu_to_le64(a);
i->b = cpu_to_le64(b);
}
static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
{
u64 a = le64_to_cpu(src->a);
u64 b = le64_to_cpu(src->b);
u64 _tt = ((s64)a >> 63) & 0x87;
dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
dst->b = cpu_to_le64((b << 1) ^ _tt);
}
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes);
extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes,
common_glue_func_t tweak_fn, void *tweak_ctx,
void *crypt_ctx);
extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
le128 *iv, common_glue_func_t fn);
#endif /* _CRYPTO_GLUE_HELPER_H */

View file

@ -0,0 +1,48 @@
#ifndef ASM_X86_SERPENT_AVX_H
#define ASM_X86_SERPENT_AVX_H
#include <linux/crypto.h>
#include <crypto/serpent.h>
#define SERPENT_PARALLEL_BLOCKS 8
struct serpent_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct serpent_ctx serpent_ctx;
};
struct serpent_xts_ctx {
struct serpent_ctx tweak_ctx;
struct serpent_ctx crypt_ctx;
};
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
#endif

View file

@ -0,0 +1,63 @@
#ifndef ASM_X86_SERPENT_SSE2_H
#define ASM_X86_SERPENT_SSE2_H
#include <linux/crypto.h>
#include <crypto/serpent.h>
#ifdef CONFIG_X86_32
#define SERPENT_PARALLEL_BLOCKS 4
asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, false);
}
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_4way(ctx, dst, src, true);
}
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
serpent_dec_blk_4way(ctx, dst, src);
}
#else
#define SERPENT_PARALLEL_BLOCKS 8
asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, false);
}
static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
__serpent_enc_blk_8way(ctx, dst, src, true);
}
static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
const u8 *src)
{
serpent_dec_blk_8way(ctx, dst, src);
}
#endif
#endif

View file

@ -0,0 +1,46 @@
#ifndef ASM_X86_TWOFISH_H
#define ASM_X86_TWOFISH_H
#include <linux/crypto.h>
#include <crypto/twofish.h>
#include <crypto/lrw.h>
#include <crypto/b128ops.h>
struct twofish_lrw_ctx {
struct lrw_table_ctx lrw_table;
struct twofish_ctx twofish_ctx;
};
struct twofish_xts_ctx {
struct twofish_ctx tweak_ctx;
struct twofish_ctx crypt_ctx;
};
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
/* 3-way parallel cipher functions */
asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
/* helpers from twofish_x86_64-3way module */
extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
#endif /* ASM_X86_TWOFISH_H */

View file

@ -0,0 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#include <linux/compiler.h>
#include <asm/percpu.h>
#ifndef __ASSEMBLY__
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
return this_cpu_read_stable(current_task);
}
#define current get_current()
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CURRENT_H */

View file

@ -0,0 +1,118 @@
#ifndef _ASM_X86_DEBUGREG_H
#define _ASM_X86_DEBUGREG_H
#include <linux/bug.h>
#include <uapi/asm/debugreg.h>
DECLARE_PER_CPU(unsigned long, cpu_dr7);
#ifndef CONFIG_PARAVIRT
/*
* These special macros can be used to get or set a debugging register
*/
#define get_debugreg(var, register) \
(var) = native_get_debugreg(register)
#define set_debugreg(value, register) \
native_set_debugreg(register, value)
#endif
static inline unsigned long native_get_debugreg(int regno)
{
unsigned long val = 0; /* Damn you, gcc! */
switch (regno) {
case 0:
asm("mov %%db0, %0" :"=r" (val));
break;
case 1:
asm("mov %%db1, %0" :"=r" (val));
break;
case 2:
asm("mov %%db2, %0" :"=r" (val));
break;
case 3:
asm("mov %%db3, %0" :"=r" (val));
break;
case 6:
asm("mov %%db6, %0" :"=r" (val));
break;
case 7:
asm("mov %%db7, %0" :"=r" (val));
break;
default:
BUG();
}
return val;
}
static inline void native_set_debugreg(int regno, unsigned long value)
{
switch (regno) {
case 0:
asm("mov %0, %%db0" ::"r" (value));
break;
case 1:
asm("mov %0, %%db1" ::"r" (value));
break;
case 2:
asm("mov %0, %%db2" ::"r" (value));
break;
case 3:
asm("mov %0, %%db3" ::"r" (value));
break;
case 6:
asm("mov %0, %%db6" ::"r" (value));
break;
case 7:
asm("mov %0, %%db7" ::"r" (value));
break;
default:
BUG();
}
}
static inline void hw_breakpoint_disable(void)
{
/* Zero the control register for HW Breakpoint */
set_debugreg(0UL, 7);
/* Zero-out the individual HW breakpoint address registers */
set_debugreg(0UL, 0);
set_debugreg(0UL, 1);
set_debugreg(0UL, 2);
set_debugreg(0UL, 3);
}
static inline int hw_breakpoint_active(void)
{
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
}
extern void aout_dump_debugregs(struct user *dump);
extern void hw_breakpoint_restore(void);
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(int, debug_stack_usage);
static inline void debug_stack_usage_inc(void)
{
__this_cpu_inc(debug_stack_usage);
}
static inline void debug_stack_usage_dec(void)
{
__this_cpu_dec(debug_stack_usage);
}
int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
void debug_stack_reset(void);
#else /* !X86_64 */
static inline int is_debug_stack(unsigned long addr) { return 0; }
static inline void debug_stack_set_zero(void) { }
static inline void debug_stack_reset(void) { }
static inline void debug_stack_usage_inc(void) { }
static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */
#endif /* _ASM_X86_DEBUGREG_H */

View file

@ -0,0 +1,8 @@
#ifndef _ASM_X86_DELAY_H
#define _ASM_X86_DELAY_H
#include <asm-generic/delay.h>
void use_tsc_delay(void);
#endif /* _ASM_X86_DELAY_H */

514
arch/x86/include/asm/desc.h Normal file
View file

@ -0,0 +1,514 @@
#ifndef _ASM_X86_DESC_H
#define _ASM_X86_DESC_H
#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
{
desc->limit0 = info->limit & 0x0ffff;
desc->base0 = (info->base_addr & 0x0000ffff);
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
desc->type = (info->read_exec_only ^ 1) << 1;
desc->type |= info->contents << 2;
desc->s = 1;
desc->dpl = 0x3;
desc->p = info->seg_not_present ^ 1;
desc->limit = (info->limit & 0xf0000) >> 16;
desc->avl = info->useable;
desc->d = info->seg_32bit;
desc->g = info->limit_in_pages;
desc->base2 = (info->base_addr & 0xff000000) >> 24;
/*
* Don't allow setting of the lm bit. It would confuse
* user_64bit_mode and would get overridden by sysret anyway.
*/
desc->l = 0;
}
extern struct desc_ptr idt_descr;
extern gate_desc idt_table[];
extern struct desc_ptr debug_idt_descr;
extern gate_desc debug_idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
return per_cpu(gdt_page, cpu).gdt;
}
#ifdef CONFIG_X86_64
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg)
{
gate->offset_low = PTR_LOW(func);
gate->segment = __KERNEL_CS;
gate->ist = ist;
gate->p = 1;
gate->dpl = dpl;
gate->zero0 = 0;
gate->zero1 = 0;
gate->type = type;
gate->offset_middle = PTR_MIDDLE(func);
gate->offset_high = PTR_HIGH(func);
}
#else
static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned long base, unsigned dpl, unsigned flags,
unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
}
#endif
static inline int desc_empty(const void *ptr)
{
const u32 *desc = ptr;
return !(desc[0] | desc[1]);
}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define load_TR_desc() native_load_tr_desc()
#define load_gdt(dtr) native_load_gdt(dtr)
#define load_idt(dtr) native_load_idt(dtr)
#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
#define store_gdt(dtr) native_store_gdt(dtr)
#define store_idt(dtr) native_store_idt(dtr)
#define store_tr(tr) (tr = native_store_tr())
#define load_TLS(t, cpu) native_load_tls(t, cpu)
#define set_ldt native_set_ldt
#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
}
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
}
#endif /* CONFIG_PARAVIRT */
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
{
memcpy(&idt[entry], gate, sizeof(*gate));
}
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
{
memcpy(&ldt[entry], desc, 8);
}
static inline void
native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
{
unsigned int size;
switch (type) {
case DESC_TSS: size = sizeof(tss_desc); break;
case DESC_LDT: size = sizeof(ldt_desc); break;
default: size = sizeof(*gdt); break;
}
memcpy(&gdt[entry], desc, size);
}
static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
unsigned long limit, unsigned char type,
unsigned char flags)
{
desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
(limit & 0x000f0000) | ((type & 0xff) << 8) |
((flags & 0xf) << 20);
desc->p = 1;
}
static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
{
#ifdef CONFIG_X86_64
struct ldttss_desc64 *desc = d;
memset(desc, 0, sizeof(*desc));
desc->limit0 = size & 0xFFFF;
desc->base0 = PTR_LOW(addr);
desc->base1 = PTR_MIDDLE(addr) & 0xFF;
desc->type = type;
desc->p = 1;
desc->limit1 = (size >> 16) & 0xF;
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
desc->base3 = PTR_HIGH(addr);
#else
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
}
static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
{
struct desc_struct *d = get_cpu_gdt_table(cpu);
tss_desc tss;
/*
* sizeof(unsigned long) coming from an extra "long" at the end
* of the iobitmap. See tss_struct definition in processor.h
*
* -1? seg base+limit should be pointing to the address of the
* last valid byte
*/
set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
sizeof(unsigned long) - 1);
write_gdt_entry(d, entry, &tss, DESC_TSS);
}
#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
static inline void native_set_ldt(const void *addr, unsigned int entries)
{
if (likely(entries == 0))
asm volatile("lldt %w0"::"q" (0));
else {
unsigned cpu = smp_processor_id();
ldt_desc ldt;
set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
entries * LDT_ENTRY_SIZE - 1);
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
&ldt, DESC_LDT);
asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
}
}
static inline void native_load_tr_desc(void)
{
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
asm volatile("lgdt %0"::"m" (*dtr));
}
static inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
static inline void native_store_gdt(struct desc_ptr *dtr)
{
asm volatile("sgdt %0":"=m" (*dtr));
}
static inline void native_store_idt(struct desc_ptr *dtr)
{
asm volatile("sidt %0":"=m" (*dtr));
}
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
asm volatile("str %0":"=r" (tr));
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
/* This intentionally ignores lm, since 32-bit apps don't have that field. */
#define LDT_empty(info) \
((info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0)
/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
static inline bool LDT_zero(const struct user_desc *info)
{
return (info->base_addr == 0 &&
info->limit == 0 &&
info->contents == 0 &&
info->read_exec_only == 0 &&
info->seg_32bit == 0 &&
info->limit_in_pages == 0 &&
info->seg_not_present == 0 &&
info->useable == 0);
}
static inline void clear_LDT(void)
{
set_ldt(NULL, 0);
}
/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock(mm_context_t *pc)
{
set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
preempt_disable();
load_LDT_nolock(pc);
preempt_enable();
}
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
}
static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
{
desc->base0 = base & 0xffff;
desc->base1 = (base >> 16) & 0xff;
desc->base2 = (base >> 24) & 0xff;
}
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{
return desc->limit0 | (desc->limit << 16);
}
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
{
desc->limit0 = limit & 0xffff;
desc->limit = (limit >> 16) & 0xf;
}
#ifdef CONFIG_X86_64
static inline void set_nmi_gate(int gate, void *addr)
{
gate_desc s;
pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
write_idt_entry(debug_idt_table, gate, &s);
}
#endif
#ifdef CONFIG_TRACING
extern struct desc_ptr trace_idt_descr;
extern gate_desc trace_idt_table[];
static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
{
write_idt_entry(trace_idt_table, entry, gate);
}
static inline void _trace_set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/*
* does not need to be atomic because it is only done once at
* setup time
*/
write_trace_idt_entry(gate, &s);
}
#else
static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
{
}
#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
#endif
static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/*
* does not need to be atomic because it is only done once at
* setup time
*/
write_idt_entry(idt_table, gate, &s);
write_trace_idt_entry(gate, &s);
}
/*
* This needs to use 'idt_table' rather than 'idt', and
* thus use the _nonmapped_ version of the IDT, as the
* Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected.
*/
#define set_intr_gate(n, addr) \
do { \
BUG_ON((unsigned)n > 0xFF); \
_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
__KERNEL_CS); \
_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
0, 0, __KERNEL_CS); \
} while (0)
extern int first_system_vector;
/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
extern unsigned long used_vectors[];
static inline void alloc_system_vector(int vector)
{
if (!test_bit(vector, used_vectors)) {
set_bit(vector, used_vectors);
if (first_system_vector > vector)
first_system_vector = vector;
} else {
BUG();
}
}
#define alloc_intr_gate(n, addr) \
do { \
alloc_system_vector(n); \
set_intr_gate(n, addr); \
} while (0)
/*
* This routine sets up an interrupt gate at directory privilege level 3.
*/
static inline void set_system_intr_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
}
static inline void set_system_trap_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
}
static inline void set_trap_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
}
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
}
static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
}
static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
}
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u32, debug_idt_ctr);
static inline bool is_debug_idt_enabled(void)
{
if (this_cpu_read(debug_idt_ctr))
return true;
return false;
}
static inline void load_debug_idt(void)
{
load_idt((const struct desc_ptr *)&debug_idt_descr);
}
#else
static inline bool is_debug_idt_enabled(void)
{
return false;
}
static inline void load_debug_idt(void)
{
}
#endif
#ifdef CONFIG_TRACING
extern atomic_t trace_idt_ctr;
static inline bool is_trace_idt_enabled(void)
{
if (atomic_read(&trace_idt_ctr))
return true;
return false;
}
static inline void load_trace_idt(void)
{
load_idt((const struct desc_ptr *)&trace_idt_descr);
}
#else
static inline bool is_trace_idt_enabled(void)
{
return false;
}
static inline void load_trace_idt(void)
{
}
#endif
/*
* The load_current_idt() must be called with interrupts disabled
* to avoid races. That way the IDT will always be set back to the expected
* descriptor. It's also called when a CPU is being initialized, and
* that doesn't need to disable interrupts, as nothing should be
* bothering the CPU then.
*/
static inline void load_current_idt(void)
{
if (is_debug_idt_enabled())
load_debug_idt();
else if (is_trace_idt_enabled())
load_trace_idt();
else
load_idt((const struct desc_ptr *)&idt_descr);
}
#endif /* _ASM_X86_DESC_H */

View file

@ -0,0 +1,101 @@
/* Written 2000 by Andi Kleen */
#ifndef _ASM_X86_DESC_DEFS_H
#define _ASM_X86_DESC_DEFS_H
/*
* Segment descriptor structure definitions, usable from both x86_64 and i386
* archs.
*/
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* FIXME: Accessing the desc_struct through its fields is more elegant,
* and should be the one valid thing to do. However, a lot of open code
* still touches the a and b accessors, and doing this allow us to do it
* incrementally. We keep the signature as a struct, rather than an union,
* so we can get rid of it transparently in the future -- glommer
*/
/* 8 byte segment descriptor */
struct desc_struct {
union {
struct {
unsigned int a;
unsigned int b;
};
struct {
u16 limit0;
u16 base0;
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
};
};
} __attribute__((packed));
#define GDT_ENTRY_INIT(flags, base, limit) { { { \
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
((limit) & 0xf0000) | ((base) & 0xff000000), \
} } }
enum {
GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF,
GATE_CALL = 0xC,
GATE_TASK = 0x5,
};
/* 16byte gate */
struct gate_struct64 {
u16 offset_low;
u16 segment;
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
u16 offset_middle;
u32 offset_high;
u32 zero1;
} __attribute__((packed));
#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
DESCTYPE_S = 0x10, /* !system */
};
/* LDT or TSS descriptor in the GDT. 16 bytes. */
struct ldttss_desc64 {
u16 limit0;
u16 base0;
unsigned base1 : 8, type : 5, dpl : 2, p : 1;
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
u32 base3;
u32 zero1;
} __attribute__((packed));
#ifdef CONFIG_X86_64
typedef struct gate_struct64 gate_desc;
typedef struct ldttss_desc64 ldt_desc;
typedef struct ldttss_desc64 tss_desc;
#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
#define gate_segment(g) ((g).segment)
#else
typedef struct desc_struct gate_desc;
typedef struct desc_struct ldt_desc;
typedef struct desc_struct tss_desc;
#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
#define gate_segment(g) ((g).a >> 16)
#endif
struct desc_ptr {
unsigned short size;
unsigned long address;
} __attribute__((packed)) ;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_DESC_DEFS_H */

View file

@ -0,0 +1,16 @@
#ifndef _ASM_X86_DEVICE_H
#define _ASM_X86_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops *dma_ops;
#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#endif
};
struct pdev_archdata {
};
#endif /* _ASM_X86_DEVICE_H */

View file

@ -0,0 +1,39 @@
#ifndef _ASM_X86_DISABLED_FEATURES_H
#define _ASM_X86_DISABLED_FEATURES_H
/* These features, although they might be available in a CPU
* will not be used because the compile options to support
* them are not present.
*
* This code allows them to be checked and disabled at
* compile time without an explicit #ifdef. Use
* cpu_feature_enabled().
*/
#ifdef CONFIG_X86_64
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
#else
# define DISABLE_VME 0
# define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0
#endif /* CONFIG_X86_64 */
/*
* Make sure to add features to the correct mask
*/
#define DISABLED_MASK0 (DISABLE_VME)
#define DISABLED_MASK1 0
#define DISABLED_MASK2 0
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
#define DISABLED_MASK4 0
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 0
#define DISABLED_MASK8 0
#define DISABLED_MASK9 0
#endif /* _ASM_X86_DISABLED_FEATURES_H */

View file

@ -0,0 +1,66 @@
#ifndef _ASM_X86_DIV64_H
#define _ASM_X86_DIV64_H
#ifdef CONFIG_X86_32
#include <linux/types.h>
#include <linux/log2.h>
/*
* do_div() is NOT a C function. It wants to return
* two values (the quotient and the remainder), but
* since that doesn't work very well in C, what it
* does is:
*
* - modifies the 64-bit dividend _in_place_
* - returns the 32-bit remainder
*
* This ends up being the most efficient "calling
* convention" on x86.
*/
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
__mod = n & (__base - 1); \
n >>= ilog2(__base); \
} else { \
asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2" : "=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
} \
__mod; \
})
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
u64 v64;
u32 v32[2];
} d = { dividend };
u32 upper;
upper = d.v32[1];
d.v32[1] = 0;
if (upper >= divisor) {
d.v32[1] = upper / divisor;
upper %= divisor;
}
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
return d.v64;
}
#define div_u64_rem div_u64_rem
#else
# include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_DIV64_H */

View file

@ -0,0 +1,176 @@
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
/*
* IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation.
*/
#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
#else
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif
#define DMA_ERROR_CODE 0
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops;
#else
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
else
return dev->archdata.dma_ops;
#endif
}
#include <asm-generic/dma-mapping-common.h>
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return (dma_addr == DMA_ERROR_CODE);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs);
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
#else
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return daddr;
}
#endif /* CONFIG_X86_DMA_REMAP */
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
unsigned long dma_mask = 0;
dma_mask = dev->coherent_dma_mask;
if (!dma_mask)
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
return dma_mask;
}
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
if (dma_mask <= DMA_BIT_MASK(24))
gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
return gfp;
}
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!dev)
dev = &x86_dma_fallback_dev;
if (!is_device_dma_capable(dev))
return NULL;
if (!ops->alloc)
return NULL;
memory = ops->alloc(dev, size, dma_handle,
dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
debug_dma_free_coherent(dev, size, vaddr, bus);
if (ops->free)
ops->free(dev, size, vaddr, bus, attrs);
}
#endif

317
arch/x86/include/asm/dma.h Normal file
View file

@ -0,0 +1,317 @@
/*
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*/
#ifndef _ASM_X86_DMA_H
#define _ASM_X86_DMA_H
#include <linux/spinlock.h> /* And spinlocks */
#include <asm/io.h> /* need byte IO */
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
#define dma_outb outb
#endif
#define dma_inb inb
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
#define MAX_DMA_CHANNELS 8
/* 16MB ISA DMA zone */
#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
#ifdef CONFIG_X86_32
/* The maximum address that we can perform a DMA transfer to on this platform */
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
#else
/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
/* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_READ 0x44
/* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48
/* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_MODE_CASCADE 0xC0
#define DMA_AUTOINIT 0x10
#ifdef CONFIG_ISA_DMA_API
extern spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
#endif /* CONFIG_ISA_DMA_API */
/* enable/disable a specific DMA channel */
static inline void enable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
static inline void disable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static inline void clear_dma_ff(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(0, DMA1_CLEAR_FF_REG);
else
dma_outb(0, DMA2_CLEAR_FF_REG);
}
/* set mode (above) for a specific DMA channel */
static inline void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr <= 3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static inline void set_dma_page(unsigned int dmanr, char pagenr)
{
switch (dmanr) {
case 0:
dma_outb(pagenr, DMA_PAGE_0);
break;
case 1:
dma_outb(pagenr, DMA_PAGE_1);
break;
case 2:
dma_outb(pagenr, DMA_PAGE_2);
break;
case 3:
dma_outb(pagenr, DMA_PAGE_3);
break;
case 5:
dma_outb(pagenr & 0xfe, DMA_PAGE_5);
break;
case 6:
dma_outb(pagenr & 0xfe, DMA_PAGE_6);
break;
case 7:
dma_outb(pagenr & 0xfe, DMA_PAGE_7);
break;
}
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
} else {
dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
}
}
/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
dma_outb((count >> 8) & 0xff,
((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
} else {
dma_outb((count >> 1) & 0xff,
((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
dma_outb((count >> 9) & 0xff,
((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
}
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static inline int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port;
/* using short to get 16-bit wrap around */
unsigned short count;
io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
: ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr <= 3) ? count : (count << 1);
}
/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
#ifdef CONFIG_ISA_DMA_API
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
#endif
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_X86_DMA_H */

View file

@ -0,0 +1,21 @@
#ifndef _ASM_X86_DMI_H
#define _ASM_X86_DMI_H
#include <linux/compiler.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/setup.h>
static __always_inline __init void *dmi_alloc(unsigned len)
{
return extend_brk(len, sizeof(int));
}
/* Use early IO mappings for DMI because it's initialized early */
#define dmi_early_remap early_ioremap
#define dmi_early_unmap early_iounmap
#define dmi_remap ioremap
#define dmi_unmap iounmap
#endif /* _ASM_X86_DMI_H */

View file

@ -0,0 +1,146 @@
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */

View file

@ -0,0 +1,77 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
#ifdef CONFIG_EFI
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
#else /* ! CONFIG_EFI */
#define E820_X_MAX E820MAX
#endif
#include <uapi/asm/e820.h>
#ifndef __ASSEMBLY__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
extern struct e820map e820_saved;
extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
extern int
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
int checktype);
extern void update_e820(void);
extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
extern void parse_e820_ext(u64 phys_addr, u32 data_len);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
extern void e820_mark_nosave_regions(unsigned long limit_pfn);
#else
static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
{
}
#endif
#ifdef CONFIG_MEMTEST
extern void early_memtest(unsigned long start, unsigned long end);
#else
static inline void early_memtest(unsigned long start, unsigned long end)
{
}
#endif
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 sizet, u64 align);
void memblock_x86_fill(void);
void memblock_find_dma_reserve(void);
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);
/*
* Returns true iff the specified range [s,e) is completely contained inside
* the ISA region.
*/
static inline bool is_ISA_range(u64 s, u64 e)
{
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
}
#endif /* __ASSEMBLY__ */
#include <linux/ioport.h>
#define HIGH_MEMORY (1024*1024)
#endif /* _ASM_X86_E820_H */

View file

@ -0,0 +1,18 @@
#ifndef _ASM_X86_EDAC_H
#define _ASM_X86_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static inline void atomic_scrub(void *va, u32 size)
{
u32 i, *virt_addr = va;
/*
* Very carefully read and write to memory atomically so we
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
}
#endif /* _ASM_X86_EDAC_H */

171
arch/x86/include/asm/efi.h Normal file
View file

@ -0,0 +1,171 @@
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
#include <asm/i387.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
* for a total max space of 64G. This way, we provide for stable runtime
* services addresses across kernels so that a kexec'd kernel can still
* use them.
*
* This is the main reason why we're doing stable VA mappings for RT
* services.
*
* This flag is used in conjuction with a chicken bit called
* "efi=old_map" which can be used as a fallback to the old runtime
* services mapping method in case there's some b0rkage with a
* particular EFI implementation (haha, it is hard to hold up the
* sarcasm here...).
*/
#define EFI_OLD_MEMMAP EFI_ARCH_1
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
#ifdef CONFIG_X86_32
extern unsigned long asmlinkage efi_call_phys(void *, ...);
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
*/
/* Use this macro if your virtual returns a non-void value */
#define efi_call_virt(f, args...) \
({ \
efi_status_t __s; \
kernel_fpu_begin(); \
__s = ((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
kernel_fpu_end(); \
__s; \
})
/* Use this macro if your virtual call does not return any value */
#define __efi_call_virt(f, args...) \
({ \
kernel_fpu_begin(); \
((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
kernel_fpu_end(); \
})
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
extern u64 asmlinkage efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)
#define efi_call_virt(f, ...) \
({ \
efi_status_t __s; \
\
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
__kernel_fpu_end(); \
preempt_enable(); \
__s; \
})
/*
* All X86_64 virt calls return non-void values. Thus, use non-void call for
* virt calls that would be void on X86_32.
*/
#define __efi_call_virt(f, args...) efi_call_virt(f, args)
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_call_phys_prolog(void);
extern void __init efi_call_phys_epilog(void);
extern void __init efi_unmap_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_mkexec(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
struct efi_setup_data {
u64 fw_vendor;
u64 runtime;
u64 tables;
u64 smbios;
u64 reserved[8];
};
extern u64 efi_setup;
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
{
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
}
static inline bool efi_runtime_supported(void)
{
if (efi_is_native())
return true;
if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
return true;
return false;
}
extern struct console early_efi_console;
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
#ifdef CONFIG_EFI_MIXED
extern void efi_thunk_runtime_setup(void);
extern efi_status_t efi_thunk_set_virtual_address_map(
void *phys_set_virtual_address_map,
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map);
#else
static inline void efi_thunk_runtime_setup(void) {}
static inline efi_status_t efi_thunk_set_virtual_address_map(
void *phys_set_virtual_address_map,
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map)
{
return EFI_SUCCESS;
}
#endif /* CONFIG_EFI_MIXED */
extern bool efi_reboot_required(void);
#else
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
static inline bool efi_reboot_required(void)
{
return false;
}
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */

372
arch/x86/include/asm/elf.h Normal file
View file

@ -0,0 +1,372 @@
#ifndef _ASM_X86_ELF_H
#define _ASM_X86_ELF_H
/*
* ELF register definitions..
*/
#include <linux/thread_info.h>
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/auxvec.h>
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
#ifdef __i386__
typedef struct user_fxsr_struct elf_fpxregset_t;
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
#define R_386_GOT32 3
#define R_386_PLT32 4
#define R_386_COPY 5
#define R_386_GLOB_DAT 6
#define R_386_JMP_SLOT 7
#define R_386_RELATIVE 8
#define R_386_GOTOFF 9
#define R_386_GOTPC 10
#define R_386_NUM 11
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386
#else
/* x86-64 relocation types */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_X86_64
#endif
#include <asm/vdso.h>
#ifdef CONFIG_X86_64
extern unsigned int vdso64_enabled;
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
extern unsigned int vdso32_enabled;
#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch_ia32(x) \
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
#include <asm/processor.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
#define elf_check_arch(x) elf_check_arch_ia32(x)
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
contains a pointer to a function which might be registered using `atexit'.
This provides a mean for the dynamic linker to call DT_FINI functions for
shared libraries that have been loaded before the code runs.
A value of 0 tells we have no such handler.
We might as well make sure everything else is cleared too (except for %esp),
just to make things more deterministic.
*/
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->bx = 0; _r->cx = 0; _r->dx = 0; \
_r->si = 0; _r->di = 0; _r->bp = 0; \
_r->ax = 0; \
} while (0)
/*
* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
* now struct_user_regs, they are different)
*/
#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
do { \
pr_reg[0] = regs->bx; \
pr_reg[1] = regs->cx; \
pr_reg[2] = regs->dx; \
pr_reg[3] = regs->si; \
pr_reg[4] = regs->di; \
pr_reg[5] = regs->bp; \
pr_reg[6] = regs->ax; \
pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \
pr_reg[14] = regs->flags; \
pr_reg[15] = regs->sp; \
pr_reg[16] = regs->ss & 0xffff; \
} while (0);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
pr_reg[10] = get_user_gs(regs); \
} while (0);
#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
savesegment(gs, pr_reg[10]); \
} while (0);
#define ELF_PLATFORM (utsname()->machine)
#define set_personality_64bit() do { } while (0)
#else /* CONFIG_X86_32 */
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
#define compat_elf_check_arch(x) \
(elf_check_arch_ia32(x) || \
(IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
#if __USER32_DS != __USER_DS
# error "The following code assumes __USER32_DS == __USER_DS"
#endif
static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds)
{
regs->ax = regs->bx = regs->cx = regs->dx = 0;
regs->si = regs->di = regs->bp = 0;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
t->fs = t->gs = 0;
t->fsindex = t->gsindex = 0;
t->ds = t->es = ds;
}
#define ELF_PLAT_INIT(_r, load_addr) \
elf_common_init(&current->thread, _r, 0)
#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
#define compat_start_thread start_thread_ia32
void set_personality_ia32(bool);
#define COMPAT_SET_PERSONALITY(ex) \
set_personality_ia32((ex).e_machine == EM_X86_64)
#define COMPAT_ELF_PLATFORM ("i686")
/*
* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
* now struct_user_regs, they are different). Assumes current is the process
* getting dumped.
*/
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
(pr_reg)[2] = (regs)->r13; \
(pr_reg)[3] = (regs)->r12; \
(pr_reg)[4] = (regs)->bp; \
(pr_reg)[5] = (regs)->bx; \
(pr_reg)[6] = (regs)->r11; \
(pr_reg)[7] = (regs)->r10; \
(pr_reg)[8] = (regs)->r9; \
(pr_reg)[9] = (regs)->r8; \
(pr_reg)[10] = (regs)->ax; \
(pr_reg)[11] = (regs)->cx; \
(pr_reg)[12] = (regs)->dx; \
(pr_reg)[13] = (regs)->si; \
(pr_reg)[14] = (regs)->di; \
(pr_reg)[15] = (regs)->orig_ax; \
(pr_reg)[16] = (regs)->ip; \
(pr_reg)[17] = (regs)->cs; \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
(pr_reg)[21] = current->thread.fs; \
(pr_reg)[22] = current->thread.gs; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
} while (0);
/* I'm not sure if we can use '-' here */
#define ELF_PLATFORM ("x86_64")
extern void set_personality_64bit(void);
extern unsigned int sysctl_vsyscall32;
extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define SET_PERSONALITY(ex) set_personality_64bit()
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) \
(executable_stack != EXSTACK_DISABLE_X)
struct task_struct;
#define ARCH_DLINFO_IA32 \
do { \
if (vdso32_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
} while (0)
#ifdef CONFIG_X86_32
#define STACK_RND_MASK (0x7ff)
#define ARCH_DLINFO ARCH_DLINFO_IA32
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#else /* CONFIG_X86_32 */
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
#define ARCH_DLINFO \
do { \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long __force)current->mm->context.vdso); \
} while (0)
/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
#define ARCH_DLINFO_X32 \
do { \
if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long __force)current->mm->context.vdso); \
} while (0)
#define AT_SYSINFO 32
#define COMPAT_ARCH_DLINFO \
if (test_thread_flag(TIF_X32)) \
ARCH_DLINFO_X32; \
else \
ARCH_DLINFO_IA32
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
#endif /* !CONFIG_X86_32 */
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_ENTRY \
((unsigned long)current->mm->context.vdso + \
selected_vdso32->sym___kernel_vsyscall)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
/*
* True on X86_32 or when emulating IA32 on X86_64
*/
static inline int mmap_is_ia32(void)
{
#ifdef CONFIG_X86_32
return 1;
#endif
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_ADDR32))
return 1;
#endif
return 0;
}
/* Do not change the values. See get_align_mask() */
enum align_flags {
ALIGN_VA_32 = BIT(0),
ALIGN_VA_64 = BIT(1),
};
struct va_alignment {
int flags;
unsigned long mask;
} ____cacheline_aligned;
extern struct va_alignment va_align;
extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */

View file

@ -0,0 +1,6 @@
#ifndef _ASM_X86_EMERGENCY_RESTART_H
#define _ASM_X86_EMERGENCY_RESTART_H
extern void machine_emergency_restart(void);
#endif /* _ASM_X86_EMERGENCY_RESTART_H */

View file

@ -0,0 +1,53 @@
/*
* This file is designed to contain the BUILD_INTERRUPT specifications for
* all of the extra named interrupt vectors used by the architecture.
* Usually this is the Inter Process Interrupts (IPIs)
*/
/*
* The following vectors are part of the Linux architecture, there
* is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs)
*/
#ifdef CONFIG_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
smp_irq_move_cleanup_interrupt)
BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
#endif
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
#ifdef CONFIG_HAVE_KVM
BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
smp_kvm_posted_intr_ipi)
#endif
/*
* every pentium local APIC has two 'local interrupts', with a
* soft-definable vector attached to both interrupts, one of
* which is a timer interrupt, the other one is error counter
* overflow. Linux uses the local APIC timer interrupt to get
* a much simpler SMP time architecture:
*/
#ifdef CONFIG_X86_LOCAL_APIC
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
#ifdef CONFIG_IRQ_WORK
BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
#endif
#endif

View file

@ -0,0 +1,16 @@
#ifndef _ASM_X86_ESPFIX_H
#define _ASM_X86_ESPFIX_H
#ifdef CONFIG_X86_64
#include <asm/percpu.h>
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
extern void init_espfix_bsp(void);
extern void init_espfix_ap(void);
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_ESPFIX_H */

View file

@ -0,0 +1 @@
/* define arch_align_stack() here */

17
arch/x86/include/asm/fb.h Normal file
View file

@ -0,0 +1,17 @@
#ifndef _ASM_X86_FB_H
#define _ASM_X86_FB_H
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
if (boot_cpu_data.x86 > 3)
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
}
extern int fb_is_primary_device(struct fb_info *info);
#endif /* _ASM_X86_FB_H */

View file

@ -0,0 +1,170 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
*/
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/pvclock.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
#else
#include <uapi/asm/vsyscall.h>
#endif
/*
* We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
* uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
* Because of this, FIXADDR_TOP x86 integration was left as later work.
*/
#ifdef CONFIG_X86_32
/* used by vmalloc.c, vsyscall.lds.S.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap.
*/
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#else
#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
PAGE_SIZE)
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process.
* for x86_32: We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* These 'compile-time allocated' memory buffers are
* fixed-size 4k pages (or larger if used with an increment
* higher than 1). Use set_fixmap(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
#ifdef CONFIG_X86_32
FIX_HOLE,
#else
VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
#ifdef CONFIG_PARAVIRT_CLOCK
PVCLOCK_FIXMAP_BEGIN,
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
#endif
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#endif
FIX_RO_IDT, /* Virtual mapping for read-only IDT */
#ifdef CONFIG_X86_32
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
#endif
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
#ifdef CONFIG_X86_INTEL_MID
FIX_LNW_VRTC,
#endif
__end_of_permanent_fixed_addresses,
/*
* 512 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* If necessary we round it up to the next 512 pages boundary so
* that we can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 8
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
FIX_BTMAP_END =
(__end_of_permanent_fixed_addresses ^
(__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) &
-PTRS_PER_PTE
? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS -
(__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
: __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
#ifdef CONFIG_X86_32
FIX_WP_TEST,
#endif
#ifdef CONFIG_INTEL_TXT
FIX_TBOOT_BASE,
#endif
__end_of_fixed_addresses
};
extern void reserve_top_address(unsigned long reserve);
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
extern int fixmaps_set;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
native_set_fixmap(idx, phys, flags);
}
#endif
#include <asm-generic/fixmap.h>
#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
#define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
void __early_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */

View file

@ -0,0 +1,281 @@
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef _ASM_X86_FLOPPY_H
#define _ASM_X86_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a, s, vdma) \
(!(vdma) && \
((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma & 1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) inb_p(port)
#define fd_outb(value, port) outb_p(value, port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count;
static int virtual_dma_residue;
static char *virtual_dma_addr;
static int virtual_dma_mode;
static int doing_pdma;
static irqreturn_t floppy_hardint(int irq, void *dev_id)
{
unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls;
static int bytes;
static int dma_wait;
#endif
if (!doing_pdma)
return floppy_interrupt(irq, dev_id);
#ifdef TRACE_FLPY_INT
if (!calls)
bytes = virtual_dma_count;
#endif
{
int lcount;
char *lptr;
st = 1;
for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
lcount; lcount--, lptr++) {
st = inb(virtual_dma_port + 4) & 0xa0;
if (st != 0xa0)
break;
if (virtual_dma_mode)
outb_p(*lptr, virtual_dma_port + 5);
else
*lptr = inb_p(virtual_dma_port + 5);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = inb(virtual_dma_port + 4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if (st == 0x20)
return IRQ_HANDLED;
if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait = 0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
#ifdef TRACE_FLPY_INT
if (!virtual_dma_count)
dma_wait++;
#endif
return IRQ_HANDLED;
}
static void fd_disable_dma(void)
{
if (!(can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
}
static int vdma_request_dma(unsigned int dmanr, const char *device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
0, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
0, "floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long)vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if ((unsigned long)addr >= (unsigned long)high_memory)
vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if (can_use_virtual_dma == 2) {
if ((unsigned long)addr >= (unsigned long)high_memory ||
isa_virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA, mode);
set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
set_dma_count(FLOPPY_DMA, size);
enable_dma(FLOPPY_DMA);
return 0;
}
static struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char *device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc)(unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0;
static int FDC2 = -1;
/*
* Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
* is needed to prevent corrupted CMOS RAM in case "insmod floppy"
* coincides with another rtc CMOS user. Paul G.
*/
#define FLOPPY0_TYPE \
({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = (CMOS_READ(0x10) >> 4) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define FLOPPY1_TYPE \
({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = CMOS_READ(0x10) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define N_FDC 2
#define N_DRIVE 8
#define EXTRA_FLOPPY_PARAMS
#endif /* _ASM_X86_FLOPPY_H */

View file

@ -0,0 +1,622 @@
/*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
* x86-64 work by Andi Kleen 2002
*/
#ifndef _FPU_INTERNAL_H
#define _FPU_INTERNAL_H
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
#include <asm/uaccess.h>
#include <asm/xsave.h>
#include <asm/smap.h>
#ifdef CONFIG_X86_64
# include <asm/sigcontext32.h>
# include <asm/user32.h>
struct ksignal;
int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
compat_sigset_t *set, struct pt_regs *regs);
int ia32_setup_frame(int sig, struct ksignal *ksig,
compat_sigset_t *set, struct pt_regs *regs);
#else
# define user_i387_ia32_struct user_i387_struct
# define user32_fxsr_struct user_fxsr_struct
# define ia32_setup_frame __setup_frame
# define ia32_setup_rt_frame __setup_rt_frame
#endif
extern unsigned int mxcsr_feature_mask;
extern void fpu_init(void);
extern void eager_fpu_init(void);
DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
extern void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env);
extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
xstateregs_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
xstateregs_set;
/*
* xstateregs_active == fpregs_active. Please refer to the comment
* at the definition of fpregs_active.
*/
#define xstateregs_active fpregs_active
#ifdef CONFIG_MATH_EMULATION
extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
static inline int is_ia32_compat_frame(void)
{
return config_enabled(CONFIG_IA32_EMULATION) &&
test_thread_flag(TIF_IA32);
}
static inline int is_ia32_frame(void)
{
return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
}
static inline int is_x32_frame(void)
{
return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
}
#define X87_FSW_ES (1 << 7) /* Exception Summary */
static __always_inline __pure bool use_eager_fpu(void)
{
return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
}
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
}
static __always_inline __pure bool use_xsave(void)
{
return static_cpu_has_safe(X86_FEATURE_XSAVE);
}
static __always_inline __pure bool use_fxsr(void)
{
return static_cpu_has_safe(X86_FEATURE_FXSR);
}
static inline void fx_finit(struct i387_fxsave_struct *fx)
{
memset(fx, 0, xstate_size);
fx->cwd = 0x37f;
fx->mxcsr = MXCSR_DEFAULT;
}
extern void __sanitize_i387_state(struct task_struct *);
static inline void sanitize_i387_state(struct task_struct *tsk)
{
if (!use_xsaveopt())
return;
__sanitize_i387_state(tsk);
}
#define user_insn(insn, output, input...) \
({ \
int err; \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
})
#define check_insn(insn, output, input...) \
({ \
int err; \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
})
static inline int fsave_user(struct i387_fsave_struct __user *fx)
{
return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
}
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in fpu_fxsave() below. */
return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
}
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
if (config_enabled(CONFIG_X86_32))
return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in fpu_fxsave() below. */
return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}
static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in fpu_fxsave() below. */
return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}
static inline int frstor_checking(struct i387_fsave_struct *fx)
{
return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int frstor_user(struct i387_fsave_struct __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline void fpu_fxsave(struct fpu *fpu)
{
if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second
* REX prefix will be generated (to the assembler, rex64
* followed by semicolon is a separate instruction), and hence
* the 64-bitness is lost.
*
* Using "fxsaveq %0" would be the ideal choice, but is only
* supported starting with gas 2.16.
*
* Using, as a workaround, the properly prefixed form below
* isn't accepted by any binutils version so far released,
* complaining that the same type of prefix is used twice if
* an extended register is needed for addressing (fix submitted
* to mainline 2005-11-21).
*
* asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
*
* This, however, we can work around by forcing the compiler to
* select an addressing mode that doesn't require extended
* registers.
*/
asm volatile( "rex64/fxsave (%[fx])"
: "=m" (fpu->state->fxsave)
: [fx] "R" (&fpu->state->fxsave));
}
}
/*
* These must be called with preempt disabled. Returns
* 'true' if the FPU state is still intact.
*/
static inline int fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
fpu_xsave(fpu);
/*
* xsave header may indicate the init state of the FP.
*/
if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
return 1;
} else if (use_fxsr()) {
fpu_fxsave(fpu);
} else {
asm volatile("fnsave %[fx]; fwait"
: [fx] "=m" (fpu->state->fsave));
return 0;
}
/*
* If exceptions are pending, we need to clear them so
* that we don't randomly get exceptions later.
*
* FIXME! Is this perhaps only true for the old-style
* irq13 case? Maybe we could leave the x87 state
* intact otherwise?
*/
if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
asm volatile("fnclex");
return 0;
}
return 1;
}
static inline int __save_init_fpu(struct task_struct *tsk)
{
return fpu_save_init(&tsk->thread.fpu);
}
static inline int fpu_restore_checking(struct fpu *fpu)
{
if (use_xsave())
return fpu_xrstor_checking(&fpu->state->xsave);
else if (use_fxsr())
return fxrstor_checking(&fpu->state->fxsave);
else
return frstor_checking(&fpu->state->fsave);
}
static inline int restore_fpu_checking(struct task_struct *tsk)
{
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
: : [addr] "m" (tsk->thread.fpu.has_fpu));
}
return fpu_restore_checking(&tsk->thread.fpu);
}
/*
* Software FPU state helpers. Careful: these need to
* be preemption protection *and* they need to be
* properly paired with the CR0.TS changes!
*/
static inline int __thread_has_fpu(struct task_struct *tsk)
{
return tsk->thread.fpu.has_fpu;
}
/* Must be paired with an 'stts' after! */
static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{
tsk->thread.fpu.has_fpu = 0;
this_cpu_write(fpu_owner_task, NULL);
}
/* Must be paired with a 'clts' before! */
static inline void __thread_set_has_fpu(struct task_struct *tsk)
{
tsk->thread.fpu.has_fpu = 1;
this_cpu_write(fpu_owner_task, tsk);
}
/*
* Encapsulate the CR0.TS handling together with the
* software flag.
*
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
static inline void __thread_fpu_end(struct task_struct *tsk)
{
__thread_clear_has_fpu(tsk);
if (!use_eager_fpu())
stts();
}
static inline void __thread_fpu_begin(struct task_struct *tsk)
{
if (!use_eager_fpu())
clts();
__thread_set_has_fpu(tsk);
}
static inline void __drop_fpu(struct task_struct *tsk)
{
if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
__thread_fpu_end(tsk);
}
}
static inline void drop_fpu(struct task_struct *tsk)
{
/*
* Forget coprocessor state..
*/
preempt_disable();
tsk->thread.fpu_counter = 0;
__drop_fpu(tsk);
clear_stopped_child_used_math(tsk);
preempt_enable();
}
static inline void drop_init_fpu(struct task_struct *tsk)
{
if (!use_eager_fpu())
drop_fpu(tsk);
else {
if (use_xsave())
xrstor_state(init_xstate_buf, -1);
else
fxrstor_checking(&init_xstate_buf->i387);
}
}
/*
* FPU state switching for scheduling.
*
* This is a two-stage process:
*
* - switch_fpu_prepare() saves the old state and
* sets the new state of the CR0.TS bit. This is
* done within the context of the old process.
*
* - switch_fpu_finish() restores the new state as
* necessary.
*/
typedef struct { int preload; } fpu_switch_t;
/*
* Must be run with preemption disabled: this clears the fpu_owner_task,
* on this CPU.
*
* This will disable any lazy FPU state restore of the current FPU state,
* but if the current thread owns the FPU, it will still be saved by.
*/
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
per_cpu(fpu_owner_task, cpu) = NULL;
}
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == this_cpu_read_stable(fpu_owner_task) &&
cpu == new->thread.fpu.last_cpu;
}
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
{
fpu_switch_t fpu;
/*
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
new->thread.fpu_counter > 5);
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
cpu = ~0;
old->thread.fpu.last_cpu = cpu;
old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
new->thread.fpu_counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else if (!use_eager_fpu())
stts();
} else {
old->thread.fpu_counter = 0;
old->thread.fpu.last_cpu = ~0;
if (fpu.preload) {
new->thread.fpu_counter++;
if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
__thread_fpu_begin(new);
}
}
return fpu;
}
/*
* By the time this gets called, we've already cleared CR0.TS and
* given the process the FPU if we are going to preload the FPU
* state - all we need to do is to conditionally restore the register
* state itself.
*/
static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
{
if (fpu.preload) {
if (unlikely(restore_fpu_checking(new)))
drop_init_fpu(new);
}
}
/*
* Signal frame handlers...
*/
extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
static inline int xstate_sigframe_size(void)
{
return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
}
static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
{
void __user *buf_fx = buf;
int size = xstate_sigframe_size();
if (ia32_frame && use_fxsr()) {
buf_fx = buf + sizeof(struct i387_fsave_struct);
size += sizeof(struct i387_fsave_struct);
}
return __restore_xstate_sig(buf, buf_fx, size);
}
/*
* Need to be preemption-safe.
*
* NOTE! user_fpu_begin() must be used only immediately before restoring
* it. This function does not do any save/restore on their own.
*/
static inline void user_fpu_begin(void)
{
preempt_disable();
if (!user_has_fpu())
__thread_fpu_begin(current);
preempt_enable();
}
static inline void __save_fpu(struct task_struct *tsk)
{
if (use_xsave()) {
if (unlikely(system_state == SYSTEM_BOOTING))
xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
else
xsave_state(&tsk->thread.fpu.state->xsave, -1);
} else
fpu_fxsave(&tsk->thread.fpu);
}
/*
* These disable preemption on their own and are safe
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
WARN_ON_ONCE(!__thread_has_fpu(tsk));
if (use_eager_fpu()) {
__save_fpu(tsk);
return;
}
preempt_disable();
__save_init_fpu(tsk);
__thread_fpu_end(tsk);
preempt_enable();
}
/*
* i387 state interaction
*/
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.fpu.state->fxsave.cwd;
} else {
return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.fpu.state->fxsave.swd;
} else {
return (unsigned short)tsk->thread.fpu.state->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
return tsk->thread.fpu.state->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
static bool fpu_allocated(struct fpu *fpu)
{
return fpu->state != NULL;
}
static inline int fpu_alloc(struct fpu *fpu)
{
if (fpu_allocated(fpu))
return 0;
fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
if (!fpu->state)
return -ENOMEM;
WARN_ON((unsigned long)fpu->state & 15);
return 0;
}
static inline void fpu_free(struct fpu *fpu)
{
if (fpu->state) {
kmem_cache_free(task_xstate_cachep, fpu->state);
fpu->state = NULL;
}
}
static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
{
if (use_eager_fpu()) {
memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
__save_fpu(dst);
} else {
struct fpu *dfpu = &dst->thread.fpu;
struct fpu *sfpu = &src->thread.fpu;
unlazy_fpu(src);
memcpy(dfpu->state, sfpu->state, xstate_size);
}
}
static inline unsigned long
alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
unsigned long *size)
{
unsigned long frame_size = xstate_sigframe_size();
*buf_fx = sp = round_down(sp - frame_size, 64);
if (ia32_frame && use_fxsr()) {
frame_size += sizeof(struct i387_fsave_struct);
sp -= sizeof(struct i387_fsave_struct);
}
*size = frame_size;
return sp;
}
#endif

View file

@ -0,0 +1,26 @@
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */
#ifdef CONFIG_FRAME_POINTER
.macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm
.macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
.endm
#else
.macro FRAME
.endm
.macro ENDFRAME
.endm
#endif
#endif /* __ASSEMBLY__ */

View file

@ -0,0 +1,101 @@
#ifndef _ASM_X86_FTRACE_H
#define _ASM_X86_FTRACE_H
#ifdef __ASSEMBLY__
/* skip is set if the stack was already partially adjusted */
.macro MCOUNT_SAVE_FRAME skip=0
/*
* We add enough stack to save all regs.
*/
subq $(SS+8-\skip), %rsp
movq %rax, RAX(%rsp)
movq %rcx, RCX(%rsp)
movq %rdx, RDX(%rsp)
movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/* Move RIP to its proper location */
movq SS+8(%rsp), %rdx
movq %rdx, RIP(%rsp)
.endm
.macro MCOUNT_RESTORE_FRAME skip=0
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
movq RSI(%rsp), %rsi
movq RDX(%rsp), %rdx
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
addq $(SS+8-\skip), %rsp
.endm
#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY
# define MCOUNT_ADDR ((long)(__fentry__))
#else
# define MCOUNT_ADDR ((long)(mcount))
#endif
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
#ifndef __ASSEMBLY__
extern void mcount(void);
extern atomic_t modifying_ftrace_code;
extern void __fentry__(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
* addr is the address of the mcount call instruction.
* recordmcount does the necessary offset calculation.
*/
return addr;
}
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
/* No extra data needed for x86 */
};
int ftrace_int3_handler(struct pt_regs *regs);
#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
#include <asm/compat.h>
/*
* Because ia32 syscalls do not map to x86_64 syscall numbers
* this screws up the trace output when tracing a ia32 task.
* Instead of reporting bogus syscalls, just do not trace them.
*
* If the user realy wants these, then they should use the
* raw syscall tracepoints with filtering.
*/
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{
if (is_compat_task())
return true;
return false;
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
#endif /* _ASM_X86_FTRACE_H */

View file

@ -0,0 +1,117 @@
#ifndef _ASM_X86_FUTEX_H
#define _ASM_X86_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/smap.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
asm volatile("\t" ASM_STAC "\n" \
"1:\t" insn "\n" \
"2:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
asm volatile("\t" ASM_STAC "\n" \
"1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
"3:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (oldval == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (oldval != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (oldval < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (oldval >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (oldval <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (oldval > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
#endif
#endif /* _ASM_X86_FUTEX_H */

113
arch/x86/include/asm/gart.h Normal file
View file

@ -0,0 +1,113 @@
#ifndef _ASM_X86_GART_H
#define _ASM_X86_GART_H
#include <asm/e820.h>
extern void set_up_gart_resume(u32, u32);
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int fix_aperture;
/* PTE bits. */
#define GPTE_VALID 1
#define GPTE_COHERENT 2
/* Aperture control register bits. */
#define GARTEN (1<<0)
#define DISGARTCPU (1<<4)
#define DISGARTIO (1<<5)
#define DISTLBWALKPRB (1<<6)
/* GART cache control register bits. */
#define INVGART (1<<0)
#define GARTPTEERR (1<<1)
/* K8 On-cpu GART registers */
#define AMD64_GARTAPERTURECTL 0x90
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
#ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture;
extern int gart_iommu_aperture_allowed;
extern int gart_iommu_aperture_disabled;
extern void early_gart_iommu_check(void);
extern int gart_iommu_init(void);
extern void __init gart_parse_options(char *);
extern int gart_iommu_hole_init(void);
#else
#define gart_iommu_aperture 0
#define gart_iommu_aperture_allowed 0
#define gart_iommu_aperture_disabled 1
static inline void early_gart_iommu_check(void)
{
}
static inline void gart_parse_options(char *options)
{
}
static inline int gart_iommu_hole_init(void)
{
return -ENODEV;
}
#endif
extern int agp_amd64_init(void);
static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
{
u32 ctl;
/*
* Don't enable translation but enable GART IO and CPU accesses.
* Also, set DISTLBWALKPRB since GART tables memory is UC.
*/
ctl = order << 1;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
/* address of the mappings table */
addr >>= 12;
tmp = (u32) addr<<4;
tmp &= ~0xf;
pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
/* Enable GART translation for this hammer. */
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
ctl |= GARTEN | DISTLBWALKPRB;
ctl &= ~(DISGARTCPU | DISGARTIO);
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
{
if (!aper_base)
return 0;
if (aper_base + aper_size > 0x100000000ULL) {
printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
if (aper_size < min_size) {
printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
aper_size>>20, min_size>>20);
return 0;
}
return 1;
}
#endif /* _ASM_X86_GART_H */

View file

@ -0,0 +1 @@
#include <asm/apic.h>

View file

@ -0,0 +1,36 @@
/*
* AMD Geode definitions
* Copyright (C) 2006, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#ifndef _ASM_X86_GEODE_H
#define _ASM_X86_GEODE_H
#include <asm/processor.h>
#include <linux/io.h>
#include <linux/cs5535.h>
static inline int is_geode_gx(void)
{
return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
(boot_cpu_data.x86 == 5) &&
(boot_cpu_data.x86_model == 5));
}
static inline int is_geode_lx(void)
{
return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
(boot_cpu_data.x86 == 5) &&
(boot_cpu_data.x86_model == 10));
}
static inline int is_geode(void)
{
return (is_geode_gx() || is_geode_lx());
}
#endif /* _ASM_X86_GEODE_H */

View file

@ -0,0 +1,4 @@
#ifndef __LINUX_GPIO_H
#warning Include linux/gpio.h instead of asm/gpio.h
#include <linux/gpio.h>
#endif

View file

@ -0,0 +1,63 @@
#ifndef _ASM_X86_HARDIRQ_H
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
unsigned int icr_read_retry_count;
#endif
#ifdef CONFIG_HAVE_KVM
unsigned int kvm_posted_intr_ipis;
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
unsigned int apic_irq_work_irqs;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
/*
* irq_tlb_count is double-counted in irq_call_count, so it must be
* subtracted from irq_call_count when displaying irq_call_count
*/
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
unsigned int irq_thermal_count;
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
unsigned int irq_threshold_count;
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
unsigned int irq_hv_callback_count;
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) \
this_cpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
#endif /* _ASM_X86_HARDIRQ_H */

View file

@ -0,0 +1,7 @@
#ifndef _ASM_X86_HASH_H
#define _ASM_X86_HASH_H
struct fast_hash_ops;
extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
#endif /* _ASM_X86_HASH_H */

View file

@ -0,0 +1,77 @@
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_X86_HIGHMEM_H
#define _ASM_X86_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/interrupt.h>
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/fixmap.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
/*
* Ordering is:
*
* FIXADDR_TOP
* fixed_addresses
* FIXADDR_START
* temp fixed addresses
* FIXADDR_BOOT_START
* Persistent kmap area
* PKMAP_BASE
* VMALLOC_END
* Vmalloc area
* VMALLOC_START
* high_memory
*/
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_HIGHMEM_H */

118
arch/x86/include/asm/hpet.h Normal file
View file

@ -0,0 +1,118 @@
#ifndef _ASM_X86_HPET_H
#define _ASM_X86_HPET_H
#include <linux/msi.h>
#ifdef CONFIG_HPET_TIMER
#define HPET_MMAP_SIZE 1024
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_T1_CFG 0x120
#define HPET_T1_CMP 0x128
#define HPET_T1_ROUTE 0x130
#define HPET_T2_CFG 0x140
#define HPET_T2_CMP 0x148
#define HPET_T2_ROUTE 0x150
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_64BIT 0x00002000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
#define HPET_TN_LEVEL 0x0002
#define HPET_TN_ENABLE 0x0004
#define HPET_TN_PERIODIC 0x0008
#define HPET_TN_PERIODIC_CAP 0x0010
#define HPET_TN_64BIT_CAP 0x0020
#define HPET_TN_SETVAL 0x0040
#define HPET_TN_32BIT 0x0100
#define HPET_TN_ROUTE 0x3e00
#define HPET_TN_FSB 0x4000
#define HPET_TN_FSB_CAP 0x8000
#define HPET_TN_ROUTE_SHIFT 9
/* Max HPET Period is 10^8 femto sec as in HPET spec */
#define HPET_MAX_PERIOD 100000000UL
/*
* Min HPET period is 10^5 femto sec just for safety. If it is less than this,
* then 32 bit HPET counter wrapsaround in less than 0.5 sec.
*/
#define HPET_MIN_PERIOD 100000UL
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
extern int boot_hpet_disable;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
extern unsigned int hpet_readl(unsigned int a);
extern void force_hpet_resume(void);
struct irq_data;
extern void hpet_msi_unmask(struct irq_data *data);
extern void hpet_msi_mask(struct irq_data *data);
struct hpet_dev;
extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
#else
static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_HPET_EMULATE_RTC
#include <linux/interrupt.h>
typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie);
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
extern int hpet_register_irq_handler(rtc_irq_handler handler);
extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
#endif /* CONFIG_HPET_EMULATE_RTC */
#else /* CONFIG_HPET_TIMER */
static inline int hpet_enable(void) { return 0; }
static inline int is_hpet_enabled(void) { return 0; }
#define hpet_readl(a) 0
#define default_setup_hpet_msi NULL
#endif
#endif /* _ASM_X86_HPET_H */

View file

@ -0,0 +1,99 @@
#ifndef _ASM_X86_HUGETLB_H
#define _ASM_X86_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_X86_HUGETLB_H */

View file

@ -0,0 +1,75 @@
#ifndef _I386_HW_BREAKPOINT_H
#define _I386_HW_BREAKPOINT_H
#include <uapi/asm/hw_breakpoint.h>
#define __ARCH_HW_BREAKPOINT_H
/*
* The name should probably be something dealt in
* a higher level. While dealing with the user
* (display/resolving)
*/
struct arch_hw_breakpoint {
unsigned long address;
u8 len;
u8 type;
};
#include <linux/kdebug.h>
#include <linux/percpu.h>
#include <linux/list.h>
/* Available HW breakpoint length encodings */
#define X86_BREAKPOINT_LEN_X 0x40
#define X86_BREAKPOINT_LEN_1 0x40
#define X86_BREAKPOINT_LEN_2 0x44
#define X86_BREAKPOINT_LEN_4 0x4c
#ifdef CONFIG_X86_64
#define X86_BREAKPOINT_LEN_8 0x48
#endif
/* Available HW breakpoint type encodings */
/* trigger on instruction execute */
#define X86_BREAKPOINT_EXECUTE 0x80
/* trigger on memory write */
#define X86_BREAKPOINT_WRITE 0x81
/* trigger on memory read or write */
#define X86_BREAKPOINT_RW 0x83
/* Total number of available HW breakpoint registers */
#define HBP_NUM 4
static inline int hw_breakpoint_slots(int type)
{
return HBP_NUM;
}
struct perf_event;
struct pmu;
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
extern void
arch_fill_perf_breakpoint(struct perf_event *bp);
unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type);
int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type);
extern int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type);
extern struct pmu perf_ops_bp;
#endif /* _I386_HW_BREAKPOINT_H */

View file

@ -0,0 +1,212 @@
#ifndef _ASM_X86_HW_IRQ_H
#define _ASM_X86_HW_IRQ_H
/*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
*
* moved some of the old arch/i386/kernel/irq.h to here. VY
*
* IRQ/IPI changes taken from work by Thomas Radke
* <tomsoft@informatik.tu-chemnitz.de>
*
* hacked by Andi Kleen for x86-64.
* unified by tglx
*/
#include <asm/irq_vectors.h>
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/sections.h>
/* Interrupt handlers registered during init_IRQ */
extern asmlinkage void apic_timer_interrupt(void);
extern asmlinkage void x86_platform_ipi(void);
extern asmlinkage void kvm_posted_intr_ipi(void);
extern asmlinkage void error_interrupt(void);
extern asmlinkage void irq_work_interrupt(void);
extern asmlinkage void spurious_interrupt(void);
extern asmlinkage void thermal_interrupt(void);
extern asmlinkage void reschedule_interrupt(void);
extern asmlinkage void invalidate_interrupt(void);
extern asmlinkage void invalidate_interrupt0(void);
extern asmlinkage void invalidate_interrupt1(void);
extern asmlinkage void invalidate_interrupt2(void);
extern asmlinkage void invalidate_interrupt3(void);
extern asmlinkage void invalidate_interrupt4(void);
extern asmlinkage void invalidate_interrupt5(void);
extern asmlinkage void invalidate_interrupt6(void);
extern asmlinkage void invalidate_interrupt7(void);
extern asmlinkage void invalidate_interrupt8(void);
extern asmlinkage void invalidate_interrupt9(void);
extern asmlinkage void invalidate_interrupt10(void);
extern asmlinkage void invalidate_interrupt11(void);
extern asmlinkage void invalidate_interrupt12(void);
extern asmlinkage void invalidate_interrupt13(void);
extern asmlinkage void invalidate_interrupt14(void);
extern asmlinkage void invalidate_interrupt15(void);
extern asmlinkage void invalidate_interrupt16(void);
extern asmlinkage void invalidate_interrupt17(void);
extern asmlinkage void invalidate_interrupt18(void);
extern asmlinkage void invalidate_interrupt19(void);
extern asmlinkage void invalidate_interrupt20(void);
extern asmlinkage void invalidate_interrupt21(void);
extern asmlinkage void invalidate_interrupt22(void);
extern asmlinkage void invalidate_interrupt23(void);
extern asmlinkage void invalidate_interrupt24(void);
extern asmlinkage void invalidate_interrupt25(void);
extern asmlinkage void invalidate_interrupt26(void);
extern asmlinkage void invalidate_interrupt27(void);
extern asmlinkage void invalidate_interrupt28(void);
extern asmlinkage void invalidate_interrupt29(void);
extern asmlinkage void invalidate_interrupt30(void);
extern asmlinkage void invalidate_interrupt31(void);
extern asmlinkage void irq_move_cleanup_interrupt(void);
extern asmlinkage void reboot_interrupt(void);
extern asmlinkage void threshold_interrupt(void);
extern asmlinkage void call_function_interrupt(void);
extern asmlinkage void call_function_single_interrupt(void);
#ifdef CONFIG_TRACING
/* Interrupt handlers registered during init_IRQ */
extern void trace_apic_timer_interrupt(void);
extern void trace_x86_platform_ipi(void);
extern void trace_error_interrupt(void);
extern void trace_irq_work_interrupt(void);
extern void trace_spurious_interrupt(void);
extern void trace_thermal_interrupt(void);
extern void trace_reschedule_interrupt(void);
extern void trace_threshold_interrupt(void);
extern void trace_call_function_interrupt(void);
extern void trace_call_function_single_interrupt(void);
#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
#define trace_reboot_interrupt reboot_interrupt
#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
#endif /* CONFIG_TRACING */
/* IOAPIC */
#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
struct io_apic_irq_attr {
int ioapic;
int ioapic_pin;
int trigger;
int polarity;
};
static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
int ioapic, int ioapic_pin,
int trigger, int polarity)
{
irq_attr->ioapic = ioapic;
irq_attr->ioapic_pin = ioapic_pin;
irq_attr->trigger = trigger;
irq_attr->polarity = polarity;
}
/* Intel specific interrupt remapping information */
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
u16 sub_handle;
u8 irte_mask;
};
/* AMD specific interrupt remapping information */
struct irq_2_irte {
u16 devid; /* Device ID for IRTE table */
u16 index; /* Index into IRTE table*/
};
/*
* This is performance-critical, we want to do it O(1)
*
* Most irqs are mapped 1:1 with pins.
*/
struct irq_cfg {
struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 vector;
u8 move_in_progress : 1;
#ifdef CONFIG_IRQ_REMAP
u8 remapped : 1;
union {
struct irq_2_iommu irq_2_iommu;
struct irq_2_irte irq_2_irte;
};
#endif
};
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *);
struct irq_data;
int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
unsigned int *dest_id);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
extern void setup_ioapic_dest(void);
extern void enable_IO_APIC(void);
/* Statistics */
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
/* SMP */
extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
extern __visible void smp_spurious_interrupt(struct pt_regs *);
extern __visible void smp_x86_platform_ipi(struct pt_regs *);
extern __visible void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_IO_APIC
extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
#endif
#ifdef CONFIG_SMP
extern __visible void smp_reschedule_interrupt(struct pt_regs *);
extern __visible void smp_call_function_interrupt(struct pt_regs *);
extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
#ifdef CONFIG_TRACING
#define trace_interrupt interrupt
#endif
#define VECTOR_UNDEFINED (-1)
#define VECTOR_RETRIGGERED (-2)
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern void setup_vector_irq(int cpu);
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void __setup_vector_irq(int cpu);
#else
static inline void lock_vector_lock(void) {}
static inline void unlock_vector_lock(void) {}
static inline void __setup_vector_irq(int cpu) {}
#endif
#endif /* !ASSEMBLY_ */
#endif /* _ASM_X86_HW_IRQ_H */

View file

@ -0,0 +1,45 @@
#ifndef _ASM_X86_HYPERTRANSPORT_H
#define _ASM_X86_HYPERTRANSPORT_H
/*
* Constants for x86 Hypertransport Interrupts.
*/
#define HT_IRQ_LOW_BASE 0xf8000000
#define HT_IRQ_LOW_VECTOR_SHIFT 16
#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
#define HT_IRQ_LOW_VECTOR(v) \
(((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
#define HT_IRQ_LOW_DEST_ID_SHIFT 8
#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
#define HT_IRQ_LOW_DEST_ID(v) \
(((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
#define HT_IRQ_LOW_MT_FIXED 0x0000000
#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
#define HT_IRQ_LOW_MT_SMI 0x0000008
#define HT_IRQ_LOW_MT_NMI 0x000000c
#define HT_IRQ_LOW_MT_INIT 0x0000010
#define HT_IRQ_LOW_MT_STARTUP 0x0000014
#define HT_IRQ_LOW_MT_EXTINT 0x0000018
#define HT_IRQ_LOW_MT_LINT1 0x000008c
#define HT_IRQ_LOW_MT_LINT0 0x0000098
#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
#define HT_IRQ_HIGH_DEST_ID(v) \
((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
#endif /* _ASM_X86_HYPERTRANSPORT_H */

View file

@ -0,0 +1,64 @@
/*
* Copyright (C) 2008, VMware, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _ASM_X86_HYPERVISOR_H
#define _ASM_X86_HYPERVISOR_H
#ifdef CONFIG_HYPERVISOR_GUEST
#include <asm/kvm_para.h>
#include <asm/xen/hypervisor.h>
/*
* x86 hypervisor information
*/
struct hypervisor_x86 {
/* Hypervisor name */
const char *name;
/* Detection routine */
uint32_t (*detect)(void);
/* Adjust CPU feature bits (run once per CPU) */
void (*set_cpu_features)(struct cpuinfo_x86 *);
/* Platform setup (run once per boot) */
void (*init_platform)(void);
/* X2APIC detection (run once per boot) */
bool (*x2apic_available)(void);
};
extern const struct hypervisor_x86 *x86_hyper;
/* Recognized hypervisors */
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
extern bool hypervisor_x2apic_available(void);
#else
static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
static inline void init_hypervisor_platform(void) { }
static inline bool hypervisor_x2apic_available(void) { return false; }
#endif /* CONFIG_HYPERVISOR_GUEST */
#endif /* _ASM_X86_HYPERVISOR_H */

104
arch/x86/include/asm/i387.h Normal file
View file

@ -0,0 +1,104 @@
/*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
* x86-64 work by Andi Kleen 2002
*/
#ifndef _ASM_X86_I387_H
#define _ASM_X86_I387_H
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/hardirq.h>
struct pt_regs;
struct user_i387_struct;
extern int init_fpu(struct task_struct *child);
extern void fpu_finit(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void);
extern bool irq_fpu_usable(void);
/*
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
* and they don't touch the preempt state on their own.
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
* state from getting corrupted. KVM for example uses this model.
*
* All other cases use kernel_fpu_begin/end() which disable preemption
* during kernel FPU usage.
*/
extern void __kernel_fpu_begin(void);
extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void)
{
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
__kernel_fpu_begin();
}
static inline void kernel_fpu_end(void)
{
__kernel_fpu_end();
preempt_enable();
}
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
* get used from interrupt context as well. To prevent these kernel instructions
* in interrupt context interacting wrongly with other user/kernel fpu usage, we
* should use them only in the context of irq_ts_save/restore()
*/
static inline int irq_ts_save(void)
{
/*
* If in process context and not atomic, we can take a spurious DNA fault.
* Otherwise, doing clts() in process context requires disabling preemption
* or some heavy lifting like kernel_fpu_begin()
*/
if (!in_atomic())
return 0;
if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}
return 0;
}
static inline void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
/*
* The question "does this thread have fpu access?"
* is slightly racy, since preemption could come in
* and revoke it immediately after the test.
*
* However, even in that very unlikely scenario,
* we can just assume we have FPU access - typically
* to save the FP state - we'll just take a #NM
* fault and get the FPU access back.
*/
static inline int user_has_fpu(void)
{
return current->thread.fpu.has_fpu;
}
extern void unlazy_fpu(struct task_struct *tsk);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_I387_H */

View file

@ -0,0 +1,75 @@
#ifndef _ASM_X86_I8259_H
#define _ASM_X86_I8259_H
#include <linux/delay.h>
extern unsigned int cached_irq_mask;
#define __byte(x, y) (((unsigned char *)&(y))[x])
#define cached_master_mask (__byte(0, cached_irq_mask))
#define cached_slave_mask (__byte(1, cached_irq_mask))
/* i8259A PIC registers */
#define PIC_MASTER_CMD 0x20
#define PIC_MASTER_IMR 0x21
#define PIC_MASTER_ISR PIC_MASTER_CMD
#define PIC_MASTER_POLL PIC_MASTER_ISR
#define PIC_MASTER_OCW3 PIC_MASTER_ISR
#define PIC_SLAVE_CMD 0xa0
#define PIC_SLAVE_IMR 0xa1
/* i8259A PIC related value */
#define PIC_CASCADE_IR 2
#define MASTER_ICW4_DEFAULT 0x01
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
extern raw_spinlock_t i8259A_lock;
/* the PIC may need a careful delay on some platforms, hence specific calls */
static inline unsigned char inb_pic(unsigned int port)
{
unsigned char value = inb(port);
/*
* delay for some accesses to PIC on motherboard or in chipset
* must be at least one microsecond, so be safe here:
*/
udelay(2);
return value;
}
static inline void outb_pic(unsigned char value, unsigned int port)
{
outb(value, port);
/*
* delay for some accesses to PIC on motherboard or in chipset
* must be at least one microsecond, so be safe here:
*/
udelay(2);
}
extern struct irq_chip i8259A_chip;
struct legacy_pic {
int nr_legacy_irqs;
struct irq_chip *chip;
void (*mask)(unsigned int irq);
void (*unmask)(unsigned int irq);
void (*mask_all)(void);
void (*restore_mask)(void);
void (*init)(int auto_eoi);
int (*irq_pending)(unsigned int irq);
void (*make_irq)(unsigned int irq);
};
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
static inline int nr_legacy_irqs(void)
{
return legacy_pic->nr_legacy_irqs;
}
#endif /* _ASM_X86_I8259_H */

View file

@ -0,0 +1,81 @@
#ifndef _ASM_X86_IA32_H
#define _ASM_X86_IA32_H
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
/*
* 32 bit structures for IA32 support.
*/
#include <asm/sigcontext32.h>
/* signal.h */
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
compat_stack_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct ucontext_x32 {
unsigned int uc_flags;
unsigned int uc_link;
compat_stack_t uc_stack;
unsigned int uc__pad0; /* needed for alignment */
struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
/* This matches struct stat64 in glibc2.2, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned long long st_dev;
unsigned char __pad0[4];
#define STAT64_HAS_BROKEN_ST_INO 1
unsigned int __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[4];
long long st_size;
unsigned int st_blksize;
long long st_blocks;/* Number 512-byte blocks allocated */
unsigned st_atime;
unsigned st_atime_nsec;
unsigned st_mtime;
unsigned st_mtime_nsec;
unsigned st_ctime;
unsigned st_ctime_nsec;
unsigned long long st_ino;
} __attribute__((packed));
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
struct mm_struct;
extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif
#endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_X86_IA32_H */

View file

@ -0,0 +1,11 @@
#ifndef _ASM_X86_IA32_UNISTD_H
#define _ASM_X86_IA32_UNISTD_H
/*
* This file contains the system call numbers of the ia32 compat ABI,
* this is for the kernel only.
*/
#define __SYSCALL_ia32_NR(x) (x)
#include <asm/unistd_32_ia32.h>
#endif /* _ASM_X86_IA32_UNISTD_H */

View file

@ -0,0 +1,15 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
#ifdef CONFIG_X86_64
void enter_idle(void);
void exit_idle(void);
#else /* !CONFIG_X86_64 */
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
static inline void __exit_idle(void) { }
#endif /* CONFIG_X86_64 */
void amd_e400_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */

221
arch/x86/include/asm/inat.h Normal file
View file

@ -0,0 +1,221 @@
#ifndef _ASM_X86_INAT_H
#define _ASM_X86_INAT_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <asm/inat_types.h>
/*
* Internal bits. Don't use bitmasks directly, because these bits are
* unstable. You should use checking functions.
*/
#define INAT_OPCODE_TABLE_SIZE 256
#define INAT_GROUP_TABLE_SIZE 8
/* Legacy last prefixes */
#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
/* Other Legacy prefixes */
#define INAT_PFX_LOCK 4 /* 0xF0 */
#define INAT_PFX_CS 5 /* 0x2E */
#define INAT_PFX_DS 6 /* 0x3E */
#define INAT_PFX_ES 7 /* 0x26 */
#define INAT_PFX_FS 8 /* 0x64 */
#define INAT_PFX_GS 9 /* 0x65 */
#define INAT_PFX_SS 10 /* 0x36 */
#define INAT_PFX_ADDRSZ 11 /* 0x67 */
/* x86-64 REX prefix */
#define INAT_PFX_REX 12 /* 0x4X */
/* AVX VEX prefixes */
#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
#define INAT_LSTPFX_MAX 3
#define INAT_LGCPFX_MAX 11
/* Immediate size */
#define INAT_IMM_BYTE 1
#define INAT_IMM_WORD 2
#define INAT_IMM_DWORD 3
#define INAT_IMM_QWORD 4
#define INAT_IMM_PTR 5
#define INAT_IMM_VWORD32 6
#define INAT_IMM_VWORD 7
/* Legacy prefix */
#define INAT_PFX_OFFS 0
#define INAT_PFX_BITS 4
#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
/* Escape opcodes */
#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
#define INAT_ESC_BITS 2
#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
/* Group opcodes (1-16) */
#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
#define INAT_GRP_BITS 5
#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
/* Immediates */
#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
#define INAT_IMM_BITS 3
#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
/* Flags */
#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
/* Attribute making macros for attribute tables */
#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
/* Attribute search APIs */
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
int lpfx_id,
insn_attr_t esc_attr);
extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
int lpfx_id,
insn_attr_t esc_attr);
extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
insn_byte_t vex_m,
insn_byte_t vex_pp);
/* Attribute checking functions */
static inline int inat_is_legacy_prefix(insn_attr_t attr)
{
attr &= INAT_PFX_MASK;
return attr && attr <= INAT_LGCPFX_MAX;
}
static inline int inat_is_address_size_prefix(insn_attr_t attr)
{
return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
}
static inline int inat_is_operand_size_prefix(insn_attr_t attr)
{
return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
}
static inline int inat_is_rex_prefix(insn_attr_t attr)
{
return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
}
static inline int inat_last_prefix_id(insn_attr_t attr)
{
if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
return 0;
else
return attr & INAT_PFX_MASK;
}
static inline int inat_is_vex_prefix(insn_attr_t attr)
{
attr &= INAT_PFX_MASK;
return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3;
}
static inline int inat_is_vex3_prefix(insn_attr_t attr)
{
return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
}
static inline int inat_is_escape(insn_attr_t attr)
{
return attr & INAT_ESC_MASK;
}
static inline int inat_escape_id(insn_attr_t attr)
{
return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
}
static inline int inat_is_group(insn_attr_t attr)
{
return attr & INAT_GRP_MASK;
}
static inline int inat_group_id(insn_attr_t attr)
{
return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
}
static inline int inat_group_common_attribute(insn_attr_t attr)
{
return attr & ~INAT_GRP_MASK;
}
static inline int inat_has_immediate(insn_attr_t attr)
{
return attr & INAT_IMM_MASK;
}
static inline int inat_immediate_size(insn_attr_t attr)
{
return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
}
static inline int inat_has_modrm(insn_attr_t attr)
{
return attr & INAT_MODRM;
}
static inline int inat_is_force64(insn_attr_t attr)
{
return attr & INAT_FORCE64;
}
static inline int inat_has_second_immediate(insn_attr_t attr)
{
return attr & INAT_SCNDIMM;
}
static inline int inat_has_moffset(insn_attr_t attr)
{
return attr & INAT_MOFFSET;
}
static inline int inat_has_variant(insn_attr_t attr)
{
return attr & INAT_VARIANT;
}
static inline int inat_accept_vex(insn_attr_t attr)
{
return attr & INAT_VEXOK;
}
static inline int inat_must_vex(insn_attr_t attr)
{
return attr & INAT_VEXONLY;
}
#endif

View file

@ -0,0 +1,29 @@
#ifndef _ASM_X86_INAT_TYPES_H
#define _ASM_X86_INAT_TYPES_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
/* Instruction attributes */
typedef unsigned int insn_attr_t;
typedef unsigned char insn_byte_t;
typedef signed int insn_value_t;
#endif

Some files were not shown because too many files have changed in this diff Show more