mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-09 01:28:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
11
arch/ia64/include/asm/Kbuild
Normal file
11
arch/ia64/include/asm/Kbuild
Normal file
|
@ -0,0 +1,11 @@
|
|||
|
||||
generic-y += clkdev.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += preempt.h
|
||||
generic-y += scatterlist.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += vtime.h
|
52
arch/ia64/include/asm/acenv.h
Normal file
52
arch/ia64/include/asm/acenv.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* IA64 specific ACPICA environments and implementation
|
||||
*
|
||||
* Copyright (C) 2014, Intel Corporation
|
||||
* Author: Lv Zheng <lv.zheng@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_ACENV_H
|
||||
#define _ASM_IA64_ACENV_H
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
|
||||
#define COMPILER_DEPENDENT_INT64 long
|
||||
#define COMPILER_DEPENDENT_UINT64 unsigned long
|
||||
|
||||
/* Asm macros */
|
||||
|
||||
static inline int
|
||||
ia64_acpi_acquire_global_lock(unsigned int *lock)
|
||||
{
|
||||
unsigned int old, new, val;
|
||||
do {
|
||||
old = *lock;
|
||||
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
|
||||
val = ia64_cmpxchg4_acq(lock, new, old);
|
||||
} while (unlikely (val != old));
|
||||
return (new < 3) ? -1 : 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ia64_acpi_release_global_lock(unsigned int *lock)
|
||||
{
|
||||
unsigned int old, new, val;
|
||||
do {
|
||||
old = *lock;
|
||||
new = old & ~0x3;
|
||||
val = ia64_cmpxchg4_acq(lock, new, old);
|
||||
} while (unlikely (val != old));
|
||||
return old & 0x1;
|
||||
}
|
||||
|
||||
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
|
||||
((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
|
||||
|
||||
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
|
||||
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
|
||||
|
||||
#endif /* _ASM_IA64_ACENV_H */
|
20
arch/ia64/include/asm/acpi-ext.h
Normal file
20
arch/ia64/include/asm/acpi-ext.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
|
||||
* Alex Williamson <alex.williamson@hp.com>
|
||||
* Bjorn Helgaas <bjorn.helgaas@hp.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Vendor specific extensions to ACPI.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_ACPI_EXT_H
|
||||
#define _ASM_IA64_ACPI_EXT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
|
||||
|
||||
#endif /* _ASM_IA64_ACPI_EXT_H */
|
147
arch/ia64/include/asm/acpi.h
Normal file
147
arch/ia64/include/asm/acpi.h
Normal file
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* Copyright (C) 1999 VA Linux Systems
|
||||
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
|
||||
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
|
||||
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
#ifndef _ASM_ACPI_H
|
||||
#define _ASM_ACPI_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <acpi/pdc_intel.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/numa.h>
|
||||
#include <asm/numa.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
extern int acpi_lapic;
|
||||
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
|
||||
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
|
||||
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
|
||||
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
|
||||
|
||||
static inline bool acpi_has_cpu_in_madt(void)
|
||||
{
|
||||
return !!acpi_lapic;
|
||||
}
|
||||
#endif
|
||||
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
|
||||
static inline void disable_acpi(void) { }
|
||||
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
const char *acpi_get_sysname (void);
|
||||
#else
|
||||
static inline const char *acpi_get_sysname (void)
|
||||
{
|
||||
# if defined (CONFIG_IA64_HP_SIM)
|
||||
return "hpsim";
|
||||
# elif defined (CONFIG_IA64_HP_ZX1)
|
||||
return "hpzx1";
|
||||
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
|
||||
return "hpzx1_swiotlb";
|
||||
# elif defined (CONFIG_IA64_SGI_SN2)
|
||||
return "sn2";
|
||||
# elif defined (CONFIG_IA64_SGI_UV)
|
||||
return "uv";
|
||||
# elif defined (CONFIG_IA64_DIG)
|
||||
return "dig";
|
||||
# elif defined(CONFIG_IA64_DIG_VTD)
|
||||
return "dig_vtd";
|
||||
# else
|
||||
# error Unknown platform. Fix acpi.c.
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
int acpi_request_vector (u32 int_type);
|
||||
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
|
||||
|
||||
/* Low-level suspend routine. */
|
||||
extern int acpi_suspend_lowlevel(void);
|
||||
|
||||
extern unsigned long acpi_wakeup_address;
|
||||
|
||||
/*
|
||||
* Record the cpei override flag and current logical cpu. This is
|
||||
* useful for CPU removal.
|
||||
*/
|
||||
extern unsigned int can_cpei_retarget(void);
|
||||
extern unsigned int is_cpu_cpei_target(unsigned int cpu);
|
||||
extern void set_cpei_target_cpu(unsigned int cpu);
|
||||
extern unsigned int get_cpei_target_cpu(void);
|
||||
extern void prefill_possible_map(void);
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
extern int additional_cpus;
|
||||
#else
|
||||
#define additional_cpus 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
#if MAX_NUMNODES > 256
|
||||
#define MAX_PXM_DOMAINS MAX_NUMNODES
|
||||
#else
|
||||
#define MAX_PXM_DOMAINS (256)
|
||||
#endif
|
||||
extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
|
||||
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
|
||||
#endif
|
||||
|
||||
static inline bool arch_has_acpi_pdc(void) { return true; }
|
||||
static inline void arch_acpi_set_pdc_bits(u32 *buf)
|
||||
{
|
||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
}
|
||||
|
||||
#define acpi_unlazy_tlb(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern cpumask_t early_cpu_possible_map;
|
||||
#define for_each_possible_early_cpu(cpu) \
|
||||
for_each_cpu_mask((cpu), early_cpu_possible_map)
|
||||
|
||||
static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
|
||||
{
|
||||
int low_cpu, high_cpu;
|
||||
int cpu;
|
||||
int next_nid = 0;
|
||||
|
||||
low_cpu = cpus_weight(early_cpu_possible_map);
|
||||
|
||||
high_cpu = max(low_cpu, min_cpus);
|
||||
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
|
||||
|
||||
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
|
||||
cpu_set(cpu, early_cpu_possible_map);
|
||||
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
|
||||
node_cpuid[cpu].nid = next_nid;
|
||||
next_nid++;
|
||||
if (next_nid >= num_online_nodes())
|
||||
next_nid = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#endif /*__KERNEL__*/
|
||||
|
||||
#endif /*_ASM_ACPI_H*/
|
26
arch/ia64/include/asm/agp.h
Normal file
26
arch/ia64/include/asm/agp.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
#ifndef _ASM_IA64_AGP_H
|
||||
#define _ASM_IA64_AGP_H
|
||||
|
||||
/*
|
||||
* IA-64 specific AGP definitions.
|
||||
*
|
||||
* Copyright (C) 2002-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
|
||||
* in coherent mode, which lets us map the AGP memory as normal (write-back) memory
|
||||
* (unlike x86, where it gets mapped "write-coalescing").
|
||||
*/
|
||||
#define map_page_into_agp(page) /* nothing */
|
||||
#define unmap_page_from_agp(page) /* nothing */
|
||||
#define flush_agp_cache() mb()
|
||||
|
||||
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
||||
#define alloc_gatt_pages(order) \
|
||||
((char *)__get_free_pages(GFP_KERNEL, (order)))
|
||||
#define free_gatt_pages(table, order) \
|
||||
free_pages((unsigned long)(table), (order))
|
||||
|
||||
#endif /* _ASM_IA64_AGP_H */
|
1
arch/ia64/include/asm/asm-offsets.h
Normal file
1
arch/ia64/include/asm/asm-offsets.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <generated/asm-offsets.h>
|
135
arch/ia64/include/asm/asmmacro.h
Normal file
135
arch/ia64/include/asm/asmmacro.h
Normal file
|
@ -0,0 +1,135 @@
|
|||
#ifndef _ASM_IA64_ASMMACRO_H
|
||||
#define _ASM_IA64_ASMMACRO_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
|
||||
#define ENTRY(name) \
|
||||
.align 32; \
|
||||
.proc name; \
|
||||
name:
|
||||
|
||||
#define ENTRY_MIN_ALIGN(name) \
|
||||
.align 16; \
|
||||
.proc name; \
|
||||
name:
|
||||
|
||||
#define GLOBAL_ENTRY(name) \
|
||||
.global name; \
|
||||
ENTRY(name)
|
||||
|
||||
#define END(name) \
|
||||
.endp name
|
||||
|
||||
/*
|
||||
* Helper macros to make unwind directives more readable:
|
||||
*/
|
||||
|
||||
/* prologue_gr: */
|
||||
#define ASM_UNW_PRLG_RP 0x8
|
||||
#define ASM_UNW_PRLG_PFS 0x4
|
||||
#define ASM_UNW_PRLG_PSP 0x2
|
||||
#define ASM_UNW_PRLG_PR 0x1
|
||||
#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
|
||||
|
||||
/*
|
||||
* Helper macros for accessing user memory.
|
||||
*
|
||||
* When adding any new .section/.previous entries here, make sure to
|
||||
* also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
|
||||
* unpleasant things will happen.
|
||||
*/
|
||||
|
||||
.section "__ex_table", "a" // declare section & section attributes
|
||||
.previous
|
||||
|
||||
# define EX(y,x...) \
|
||||
.xdata4 "__ex_table", 99f-., y-.; \
|
||||
[99:] x
|
||||
# define EXCLR(y,x...) \
|
||||
.xdata4 "__ex_table", 99f-., y-.+4; \
|
||||
[99:] x
|
||||
|
||||
/*
|
||||
* Tag MCA recoverable instruction ranges.
|
||||
*/
|
||||
|
||||
.section "__mca_table", "a" // declare section & section attributes
|
||||
.previous
|
||||
|
||||
# define MCA_RECOVER_RANGE(y) \
|
||||
.xdata4 "__mca_table", y-., 99f-.; \
|
||||
[99:]
|
||||
|
||||
/*
|
||||
* Mark instructions that need a load of a virtual address patched to be
|
||||
* a load of a physical address. We use this either in critical performance
|
||||
* path (ivt.S - TLB miss processing) or in places where it might not be
|
||||
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
|
||||
*/
|
||||
.section ".data..patch.vtop", "a" // declare section & section attributes
|
||||
.previous
|
||||
|
||||
#define LOAD_PHYSICAL(pr, reg, obj) \
|
||||
[1:](pr)movl reg = obj; \
|
||||
.xdata4 ".data..patch.vtop", 1b-.
|
||||
|
||||
/*
|
||||
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
|
||||
* we'll patch out the work-around bundles with NOPs, so their impact is minimal.
|
||||
*/
|
||||
#define DO_MCKINLEY_E9_WORKAROUND
|
||||
|
||||
#ifdef DO_MCKINLEY_E9_WORKAROUND
|
||||
.section ".data..patch.mckinley_e9", "a"
|
||||
.previous
|
||||
/* workaround for Itanium 2 Errata 9: */
|
||||
# define FSYS_RETURN \
|
||||
.xdata4 ".data..patch.mckinley_e9", 1f-.; \
|
||||
1:{ .mib; \
|
||||
nop.m 0; \
|
||||
mov r16=ar.pfs; \
|
||||
br.call.sptk.many b7=2f;; \
|
||||
}; \
|
||||
2:{ .mib; \
|
||||
nop.m 0; \
|
||||
mov ar.pfs=r16; \
|
||||
br.ret.sptk.many b6;; \
|
||||
}
|
||||
#else
|
||||
# define FSYS_RETURN br.ret.sptk.many b6
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If physical stack register size is different from DEF_NUM_STACK_REG,
|
||||
* dynamically patch the kernel for correct size.
|
||||
*/
|
||||
.section ".data..patch.phys_stack_reg", "a"
|
||||
.previous
|
||||
#define LOAD_PHYS_STACK_REG_SIZE(reg) \
|
||||
[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
|
||||
.xdata4 ".data..patch.phys_stack_reg", 1b-.
|
||||
|
||||
/*
|
||||
* Up until early 2004, use of .align within a function caused bad unwind info.
|
||||
* TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
|
||||
* otherwise.
|
||||
*/
|
||||
#ifdef HAVE_WORKING_TEXT_ALIGN
|
||||
# define TEXT_ALIGN(n) .align n
|
||||
#else
|
||||
# define TEXT_ALIGN(n)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SERIALIZE_DIRECTIVE
|
||||
# define dv_serialize_data .serialize.data
|
||||
# define dv_serialize_instruction .serialize.instruction
|
||||
#else
|
||||
# define dv_serialize_data
|
||||
# define dv_serialize_instruction
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_ASMMACRO_H */
|
196
arch/ia64/include/asm/atomic.h
Normal file
196
arch/ia64/include/asm/atomic.h
Normal file
|
@ -0,0 +1,196 @@
|
|||
#ifndef _ASM_IA64_ATOMIC_H
|
||||
#define _ASM_IA64_ATOMIC_H
|
||||
|
||||
/*
|
||||
* Atomic operations that C can't guarantee us. Useful for
|
||||
* resource counting etc..
|
||||
*
|
||||
* NOTE: don't mess with the types below! The "unsigned long" and
|
||||
* "int" types were carefully placed so as to ensure proper operation
|
||||
* of the macros.
|
||||
*
|
||||
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic64_read(v) ACCESS_ONCE((v)->counter)
|
||||
|
||||
#define atomic_set(v,i) (((v)->counter) = (i))
|
||||
#define atomic64_set(v,i) (((v)->counter) = (i))
|
||||
|
||||
#define ATOMIC_OP(op, c_op) \
|
||||
static __inline__ int \
|
||||
ia64_atomic_##op (int i, atomic_t *v) \
|
||||
{ \
|
||||
__s32 old, new; \
|
||||
CMPXCHG_BUGCHECK_DECL \
|
||||
\
|
||||
do { \
|
||||
CMPXCHG_BUGCHECK(v); \
|
||||
old = atomic_read(v); \
|
||||
new = old c_op i; \
|
||||
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
|
||||
return new; \
|
||||
}
|
||||
|
||||
ATOMIC_OP(add, +)
|
||||
ATOMIC_OP(sub, -)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_add_return(i,v) \
|
||||
({ \
|
||||
int __ia64_aar_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
||||
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
||||
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
||||
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
||||
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
||||
: ia64_atomic_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
||||
#define atomic_sub_return(i,v) \
|
||||
({ \
|
||||
int __ia64_asr_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
||||
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
||||
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
||||
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
||||
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
||||
: ia64_atomic_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
||||
#define ATOMIC64_OP(op, c_op) \
|
||||
static __inline__ long \
|
||||
ia64_atomic64_##op (__s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
__s64 old, new; \
|
||||
CMPXCHG_BUGCHECK_DECL \
|
||||
\
|
||||
do { \
|
||||
CMPXCHG_BUGCHECK(v); \
|
||||
old = atomic64_read(v); \
|
||||
new = old c_op i; \
|
||||
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
|
||||
return new; \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(add, +)
|
||||
ATOMIC64_OP(sub, -)
|
||||
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define atomic64_add_return(i,v) \
|
||||
({ \
|
||||
long __ia64_aar_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|
||||
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|
||||
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|
||||
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
|
||||
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
||||
: ia64_atomic64_add(__ia64_aar_i, v); \
|
||||
})
|
||||
|
||||
#define atomic64_sub_return(i,v) \
|
||||
({ \
|
||||
long __ia64_asr_i = (i); \
|
||||
(__builtin_constant_p(i) \
|
||||
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|
||||
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|
||||
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|
||||
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
|
||||
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
||||
: ia64_atomic64_sub(__ia64_asr_i, v); \
|
||||
})
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#define atomic64_cmpxchg(v, old, new) \
|
||||
(cmpxchg(&((v)->counter), old, new))
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
|
||||
static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long c, old;
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return c != (u);
|
||||
}
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
/*
|
||||
* Atomically add I to V and return TRUE if the resulting value is
|
||||
* negative.
|
||||
*/
|
||||
static __inline__ int
|
||||
atomic_add_negative (int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(i, v) < 0;
|
||||
}
|
||||
|
||||
static __inline__ long
|
||||
atomic64_add_negative (__s64 i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_add_return(i, v) < 0;
|
||||
}
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
|
||||
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
|
||||
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
||||
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
|
||||
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
|
||||
|
||||
#define atomic_add(i,v) (void)atomic_add_return((i), (v))
|
||||
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
|
||||
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
|
||||
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
|
||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
||||
|
||||
#endif /* _ASM_IA64_ATOMIC_H */
|
94
arch/ia64/include/asm/barrier.h
Normal file
94
arch/ia64/include/asm/barrier.h
Normal file
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Memory barrier definitions. This is based on information published
|
||||
* in the Processor Abstraction Layer and the System Abstraction Layer
|
||||
* manual.
|
||||
*
|
||||
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_BARRIER_H
|
||||
#define _ASM_IA64_BARRIER_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* Macros to force memory ordering. In these descriptions, "previous"
|
||||
* and "subsequent" refer to program order; "visible" means that all
|
||||
* architecturally visible effects of a memory access have occurred
|
||||
* (at a minimum, this means the memory has been read or written).
|
||||
*
|
||||
* wmb(): Guarantees that all preceding stores to memory-
|
||||
* like regions are visible before any subsequent
|
||||
* stores and that all following stores will be
|
||||
* visible only after all previous stores.
|
||||
* rmb(): Like wmb(), but for reads.
|
||||
* mb(): wmb()/rmb() combo, i.e., all previous memory
|
||||
* accesses are visible before all subsequent
|
||||
* accesses and vice versa. This is also known as
|
||||
* a "fence."
|
||||
*
|
||||
* Note: "mb()" and its variants cannot be used as a fence to order
|
||||
* accesses to memory mapped I/O registers. For that, mf.a needs to
|
||||
* be used. However, we don't want to always use mf.a because (a)
|
||||
* it's (presumably) much slower than mf and (b) mf.a is supported for
|
||||
* sequential memory pages only.
|
||||
*/
|
||||
#define mb() ia64_mf()
|
||||
#define rmb() mb()
|
||||
#define wmb() mb()
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define smp_mb() mb()
|
||||
# define smp_rmb() rmb()
|
||||
# define smp_wmb() wmb()
|
||||
# define smp_read_barrier_depends() read_barrier_depends()
|
||||
|
||||
#else
|
||||
|
||||
# define smp_mb() barrier()
|
||||
# define smp_rmb() barrier()
|
||||
# define smp_wmb() barrier()
|
||||
# define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#endif
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
|
||||
/*
|
||||
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
|
||||
* need for asm trickery!
|
||||
*/
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
ACCESS_ONCE(*p) = (v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
/*
|
||||
* XXX check on this ---I suspect what Linus really wants here is
|
||||
* acquire vs release semantics but we can't discuss this stuff with
|
||||
* Linus just yet. Grrr...
|
||||
*/
|
||||
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
|
||||
|
||||
/*
|
||||
* The group barrier in front of the rsm & ssm are necessary to ensure
|
||||
* that none of the previous instructions in the same group are
|
||||
* affected by the rsm/ssm.
|
||||
*/
|
||||
|
||||
#endif /* _ASM_IA64_BARRIER_H */
|
456
arch/ia64/include/asm/bitops.h
Normal file
456
arch/ia64/include/asm/bitops.h
Normal file
|
@ -0,0 +1,456 @@
|
|||
#ifndef _ASM_IA64_BITOPS_H
|
||||
#define _ASM_IA64_BITOPS_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*
|
||||
* 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
|
||||
* O(1) scheduler patch
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* This function is atomic and may not be reordered. See __set_bit()
|
||||
* if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*
|
||||
* The address must be (at least) "long" aligned.
|
||||
* Note that there are driver (e.g., eepro100) which use these operations to
|
||||
* operate on hw-defined data-structures, so we can't easily change these
|
||||
* operations to force a bigger alignment.
|
||||
*
|
||||
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
|
||||
*/
|
||||
static __inline__ void
|
||||
set_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 bit, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
bit = 1 << (nr & 31);
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old | bit;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
}
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__set_bit (int nr, volatile void *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered. However, it does
|
||||
* not contain a memory barrier, so if it is used for locking purposes,
|
||||
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
|
||||
* in order to ensure changes are visible on other processors.
|
||||
*/
|
||||
static __inline__ void
|
||||
clear_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 mask, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
mask = ~(1 << (nr & 31));
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old & mask;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit_unlock - Clears a bit in memory with release
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit_unlock() is atomic and may not be reordered. It does
|
||||
* contain a memory barrier suitable for unlock type operations.
|
||||
*/
|
||||
static __inline__ void
|
||||
clear_bit_unlock (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 mask, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
mask = ~(1 << (nr & 31));
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old & mask;
|
||||
} while (cmpxchg_rel(m, old, new) != old);
|
||||
}
|
||||
|
||||
/**
|
||||
* __clear_bit_unlock - Non-atomically clears a bit in memory with release
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* Similarly to clear_bit_unlock, the implementation uses a store
|
||||
* with release semantics. See also arch_spin_unlock().
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit_unlock(int nr, void *addr)
|
||||
{
|
||||
__u32 * const m = (__u32 *) addr + (nr >> 5);
|
||||
__u32 const new = *m & ~(1 << (nr & 31));
|
||||
|
||||
ia64_st4_rel_nta(m, new);
|
||||
}
|
||||
|
||||
/**
|
||||
* __clear_bit - Clears a bit in memory (non-atomic version)
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike clear_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit (int nr, volatile void *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
* change_bit - Toggle a bit in memory
|
||||
* @nr: Bit to toggle
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* change_bit() is atomic and may not be reordered.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static __inline__ void
|
||||
change_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 bit, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
bit = (1 << (nr & 31));
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old ^ bit;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to toggle
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__change_bit (int nr, volatile void *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies the acquisition side of the memory barrier.
|
||||
*/
|
||||
static __inline__ int
|
||||
test_and_set_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 bit, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
bit = 1 << (nr & 31);
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old | bit;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit_lock - Set a bit and return its old value for lock
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This is the same as test_and_set_bit on ia64
|
||||
*/
|
||||
#define test_and_set_bit_lock test_and_set_bit
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_set_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
int oldbitset = (*p & m) != 0;
|
||||
|
||||
*p |= m;
|
||||
return oldbitset;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies the acquisition side of the memory barrier.
|
||||
*/
|
||||
static __inline__ int
|
||||
test_and_clear_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 mask, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
mask = ~(1 << (nr & 31));
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old & mask;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
return (old & ~mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
int oldbitset = (*p & m) != 0;
|
||||
|
||||
*p &= ~m;
|
||||
return oldbitset;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies the acquisition side of the memory barrier.
|
||||
*/
|
||||
static __inline__ int
|
||||
test_and_change_bit (int nr, volatile void *addr)
|
||||
{
|
||||
__u32 bit, old, new;
|
||||
volatile __u32 *m;
|
||||
CMPXCHG_BUGCHECK_DECL
|
||||
|
||||
m = (volatile __u32 *) addr + (nr >> 5);
|
||||
bit = (1 << (nr & 31));
|
||||
do {
|
||||
CMPXCHG_BUGCHECK(m);
|
||||
old = *m;
|
||||
new = old ^ bit;
|
||||
} while (cmpxchg_acq(m, old, new) != old);
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_change_bit (int nr, void *addr)
|
||||
{
|
||||
__u32 old, bit = (1 << (nr & 31));
|
||||
__u32 *m = (__u32 *) addr + (nr >> 5);
|
||||
|
||||
old = *m;
|
||||
*m = old ^ bit;
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int
|
||||
test_bit (int nr, const volatile void *addr)
|
||||
{
|
||||
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
* ffz - find the first zero bit in a long word
|
||||
* @x: The long word to find the bit in
|
||||
*
|
||||
* Returns the bit-number (0..63) of the first (least significant) zero bit.
|
||||
* Undefined if no zero exists, so code should check against ~0UL first...
|
||||
*/
|
||||
static inline unsigned long
|
||||
ffz (unsigned long x)
|
||||
{
|
||||
unsigned long result;
|
||||
|
||||
result = ia64_popcnt(x & (~x - 1));
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first bit in word.
|
||||
* @x: The word to search
|
||||
*
|
||||
* Undefined if no bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static __inline__ unsigned long
|
||||
__ffs (unsigned long x)
|
||||
{
|
||||
unsigned long result;
|
||||
|
||||
result = ia64_popcnt((x-1) & ~x);
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* Return bit number of last (most-significant) bit set. Undefined
|
||||
* for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
|
||||
*/
|
||||
static inline unsigned long
|
||||
ia64_fls (unsigned long x)
|
||||
{
|
||||
long double d = x;
|
||||
long exp;
|
||||
|
||||
exp = ia64_getf_exp(d);
|
||||
return exp - 0xffff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the last (most significant) bit set. Returns 0 for x==0 and
|
||||
* bits are numbered from 1..32 (e.g., fls(9) == 4).
|
||||
*/
|
||||
static inline int
|
||||
fls (int t)
|
||||
{
|
||||
unsigned long x = t & 0xffffffffu;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
return ia64_popcnt(x);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the last (most significant) bit set. Undefined for x==0.
|
||||
* Bits are numbered from 0..63 (e.g., __fls(9) == 3).
|
||||
*/
|
||||
static inline unsigned long
|
||||
__fls (unsigned long x)
|
||||
{
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
x |= x >> 32;
|
||||
return ia64_popcnt(x) - 1;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
|
||||
#include <asm-generic/bitops/builtin-ffs.h>
|
||||
|
||||
/*
|
||||
* hweightN: returns the hamming weight (i.e. the number
|
||||
* of bits set) of a N-bit word
|
||||
*/
|
||||
static __inline__ unsigned long __arch_hweight64(unsigned long x)
|
||||
{
|
||||
unsigned long result;
|
||||
result = ia64_popcnt(x);
|
||||
return result;
|
||||
}
|
||||
|
||||
#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
|
||||
#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
|
||||
#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
|
||||
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_IA64_BITOPS_H */
|
14
arch/ia64/include/asm/bug.h
Normal file
14
arch/ia64/include/asm/bug.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef _ASM_IA64_BUG_H
|
||||
#define _ASM_IA64_BUG_H
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
#define ia64_abort() __builtin_trap()
|
||||
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
|
||||
|
||||
/* should this BUG be made generic? */
|
||||
#define HAVE_ARCH_BUG
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
||||
#endif
|
19
arch/ia64/include/asm/bugs.h
Normal file
19
arch/ia64/include/asm/bugs.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* This is included by init/main.c to check for architecture-dependent bugs.
|
||||
*
|
||||
* Needs:
|
||||
* void check_bugs(void);
|
||||
*
|
||||
* Based on <asm-alpha/bugs.h>.
|
||||
*
|
||||
* Modified 1998, 1999, 2003
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
|
||||
*/
|
||||
#ifndef _ASM_IA64_BUGS_H
|
||||
#define _ASM_IA64_BUGS_H
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern void check_bugs (void);
|
||||
|
||||
#endif /* _ASM_IA64_BUGS_H */
|
29
arch/ia64/include/asm/cache.h
Normal file
29
arch/ia64/include/asm/cache.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
#ifndef _ASM_IA64_CACHE_H
|
||||
#define _ASM_IA64_CACHE_H
|
||||
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998-2000 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
/* Bytes per L1 (data) cache line. */
|
||||
#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
|
||||
# define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
#else
|
||||
/*
|
||||
* The "aligned" directive can only _increase_ alignment, so this is
|
||||
* safe and provides an easy way to avoid wasting space on a
|
||||
* uni-processor:
|
||||
*/
|
||||
# define SMP_CACHE_SHIFT 3
|
||||
# define SMP_CACHE_BYTES (1 << 3)
|
||||
#endif
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
#endif /* _ASM_IA64_CACHE_H */
|
54
arch/ia64/include/asm/cacheflush.h
Normal file
54
arch/ia64/include/asm/cacheflush.h
Normal file
|
@ -0,0 +1,54 @@
|
|||
#ifndef _ASM_IA64_CACHEFLUSH_H
|
||||
#define _ASM_IA64_CACHEFLUSH_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
|
||||
* to avoid them whenever possible.
|
||||
*/
|
||||
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define flush_icache_page(vma,page) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) \
|
||||
do { \
|
||||
clear_bit(PG_arch_1, &(page)->flags); \
|
||||
} while (0)
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
extern void flush_icache_range (unsigned long start, unsigned long end);
|
||||
extern void clflush_cache_range(void *addr, int size);
|
||||
|
||||
|
||||
#define flush_icache_user_range(vma, page, user_addr, len) \
|
||||
do { \
|
||||
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
|
||||
flush_icache_range(_addr, _addr + (len)); \
|
||||
} while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { memcpy(dst, src, len); \
|
||||
flush_icache_user_range(vma, page, vaddr, len); \
|
||||
} while (0)
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy(dst, src, len)
|
||||
|
||||
#endif /* _ASM_IA64_CACHEFLUSH_H */
|
79
arch/ia64/include/asm/checksum.h
Normal file
79
arch/ia64/include/asm/checksum.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
#ifndef _ASM_IA64_CHECKSUM_H
|
||||
#define _ASM_IA64_CHECKSUM_H
|
||||
|
||||
/*
|
||||
* Modified 1998, 1999
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is a version of ip_compute_csum() optimized for IP headers,
|
||||
* which always checksum on 4 octet boundaries.
|
||||
*/
|
||||
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
|
||||
|
||||
/*
|
||||
* Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
|
||||
* checksum, already complemented
|
||||
*/
|
||||
extern __sum16 csum_tcpudp_magic (__be32 saddr, __be32 daddr,
|
||||
unsigned short len,
|
||||
unsigned short proto,
|
||||
__wsum sum);
|
||||
|
||||
extern __wsum csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
|
||||
unsigned short len,
|
||||
unsigned short proto,
|
||||
__wsum sum);
|
||||
|
||||
/*
|
||||
* Computes the checksum of a memory block at buff, length len,
|
||||
* and adds in "sum" (32-bit)
|
||||
*
|
||||
* returns a 32-bit number suitable for feeding into itself
|
||||
* or csum_tcpudp_magic
|
||||
*
|
||||
* this function must be called with even lengths, except
|
||||
* for the last fragment, which may be odd
|
||||
*
|
||||
* it's best to have buff aligned on a 32-bit boundary
|
||||
*/
|
||||
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
|
||||
|
||||
/*
|
||||
* Same as csum_partial, but copies from src while it checksums.
|
||||
*
|
||||
* Here it is even more important to align src and dst on a 32-bit (or
|
||||
* even better 64-bit) boundary.
|
||||
*/
|
||||
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
|
||||
int len, __wsum sum,
|
||||
int *errp);
|
||||
|
||||
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
|
||||
int len, __wsum sum);
|
||||
|
||||
/*
|
||||
* This routine is used for miscellaneous IP-like checksums, mainly in
|
||||
* icmp.c
|
||||
*/
|
||||
extern __sum16 ip_compute_csum(const void *buff, int len);
|
||||
|
||||
/*
|
||||
* Fold a partial checksum without adding pseudo headers.
|
||||
*/
|
||||
static inline __sum16 csum_fold(__wsum csum)
|
||||
{
|
||||
u32 sum = (__force u32)csum;
|
||||
sum = (sum & 0xffff) + (sum >> 16);
|
||||
sum = (sum & 0xffff) + (sum >> 16);
|
||||
return (__force __sum16)~sum;
|
||||
}
|
||||
|
||||
#define _HAVE_ARCH_IPV6_CSUM 1
|
||||
struct in6_addr;
|
||||
extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
const struct in6_addr *daddr, __u32 len, unsigned short proto,
|
||||
__wsum csum);
|
||||
|
||||
#endif /* _ASM_IA64_CHECKSUM_H */
|
10
arch/ia64/include/asm/clocksource.h
Normal file
10
arch/ia64/include/asm/clocksource.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
/* IA64-specific clocksource additions */
|
||||
|
||||
#ifndef _ASM_IA64_CLOCKSOURCE_H
|
||||
#define _ASM_IA64_CLOCKSOURCE_H
|
||||
|
||||
struct arch_clocksource_data {
|
||||
void *fsys_mmio; /* used by fsyscall asm code */
|
||||
};
|
||||
|
||||
#endif /* _ASM_IA64_CLOCKSOURCE_H */
|
22
arch/ia64/include/asm/cpu.h
Normal file
22
arch/ia64/include/asm/cpu.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
#ifndef _ASM_IA64_CPU_H_
|
||||
#define _ASM_IA64_CPU_H_
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct ia64_cpu {
|
||||
struct cpu cpu;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
|
||||
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int arch_register_cpu(int num);
|
||||
extern void arch_unregister_cpu(int);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_CPU_H_ */
|
29
arch/ia64/include/asm/cputime.h
Normal file
29
arch/ia64/include/asm/cputime.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Definitions for measuring cputime on ia64 machines.
|
||||
*
|
||||
* Based on <asm-powerpc/cputime.h>.
|
||||
*
|
||||
* Copyright (C) 2007 FUJITSU LIMITED
|
||||
* Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
|
||||
* Otherwise we measure cpu time in jiffies using the generic definitions.
|
||||
*/
|
||||
|
||||
#ifndef __IA64_CPUTIME_H
|
||||
#define __IA64_CPUTIME_H
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
# include <asm-generic/cputime.h>
|
||||
#else
|
||||
# include <asm/processor.h>
|
||||
# include <asm-generic/cputime_nsecs.h>
|
||||
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
|
||||
#endif /* __IA64_CPUTIME_H */
|
17
arch/ia64/include/asm/current.h
Normal file
17
arch/ia64/include/asm/current.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
#ifndef _ASM_IA64_CURRENT_H
|
||||
#define _ASM_IA64_CURRENT_H
|
||||
|
||||
/*
|
||||
* Modified 1998-2000
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
||||
*/
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
|
||||
/*
|
||||
* In kernel mode, thread pointer (r13) is used to point to the current task
|
||||
* structure.
|
||||
*/
|
||||
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
|
||||
|
||||
#endif /* _ASM_IA64_CURRENT_H */
|
15
arch/ia64/include/asm/cyclone.h
Normal file
15
arch/ia64/include/asm/cyclone.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
#ifndef ASM_IA64_CYCLONE_H
|
||||
#define ASM_IA64_CYCLONE_H
|
||||
|
||||
#ifdef CONFIG_IA64_CYCLONE
|
||||
extern int use_cyclone;
|
||||
extern void __init cyclone_setup(void);
|
||||
#else /* CONFIG_IA64_CYCLONE */
|
||||
#define use_cyclone 0
|
||||
static inline void cyclone_setup(void)
|
||||
{
|
||||
printk(KERN_ERR "Cyclone Counter: System not configured"
|
||||
" w/ CONFIG_IA64_CYCLONE.\n");
|
||||
}
|
||||
#endif /* CONFIG_IA64_CYCLONE */
|
||||
#endif /* !ASM_IA64_CYCLONE_H */
|
88
arch/ia64/include/asm/delay.h
Normal file
88
arch/ia64/include/asm/delay.h
Normal file
|
@ -0,0 +1,88 @@
|
|||
#ifndef _ASM_IA64_DELAY_H
|
||||
#define _ASM_IA64_DELAY_H
|
||||
|
||||
/*
|
||||
* Delay routines using a pre-computed "cycles/usec" value.
|
||||
*
|
||||
* Copyright (C) 1998, 1999 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 1999 VA Linux Systems
|
||||
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static __inline__ void
|
||||
ia64_set_itm (unsigned long val)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_ITM, val);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
static __inline__ unsigned long
|
||||
ia64_get_itm (void)
|
||||
{
|
||||
unsigned long result;
|
||||
|
||||
result = ia64_getreg(_IA64_REG_CR_ITM);
|
||||
ia64_srlz_d();
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
ia64_set_itv (unsigned long val)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_ITV, val);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
static __inline__ unsigned long
|
||||
ia64_get_itv (void)
|
||||
{
|
||||
return ia64_getreg(_IA64_REG_CR_ITV);
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
ia64_set_itc (unsigned long val)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_AR_ITC, val);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
static __inline__ unsigned long
|
||||
ia64_get_itc (void)
|
||||
{
|
||||
unsigned long result;
|
||||
|
||||
result = ia64_getreg(_IA64_REG_AR_ITC);
|
||||
ia64_barrier();
|
||||
#ifdef CONFIG_ITANIUM
|
||||
while (unlikely((__s32) result == -1)) {
|
||||
result = ia64_getreg(_IA64_REG_AR_ITC);
|
||||
ia64_barrier();
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
extern void ia64_delay_loop (unsigned long loops);
|
||||
|
||||
static __inline__ void
|
||||
__delay (unsigned long loops)
|
||||
{
|
||||
if (unlikely(loops < 1))
|
||||
return;
|
||||
|
||||
ia64_delay_loop (loops - 1);
|
||||
}
|
||||
|
||||
extern void udelay (unsigned long usecs);
|
||||
|
||||
#endif /* _ASM_IA64_DELAY_H */
|
18
arch/ia64/include/asm/device.h
Normal file
18
arch/ia64/include/asm/device.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Arch specific extensions to struct device
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*/
|
||||
#ifndef _ASM_IA64_DEVICE_H
|
||||
#define _ASM_IA64_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
void *iommu; /* hook for IOMMU specific extension */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pdev_archdata {
|
||||
};
|
||||
|
||||
#endif /* _ASM_IA64_DEVICE_H */
|
1
arch/ia64/include/asm/div64.h
Normal file
1
arch/ia64/include/asm/div64.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/div64.h>
|
109
arch/ia64/include/asm/dma-mapping.h
Normal file
109
arch/ia64/include/asm/dma-mapping.h
Normal file
|
@ -0,0 +1,109 @@
|
|||
#ifndef _ASM_IA64_DMA_MAPPING_H
|
||||
#define _ASM_IA64_DMA_MAPPING_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2003-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
#include <asm/machvec.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
|
||||
#define DMA_ERROR_CODE 0
|
||||
|
||||
extern struct dma_map_ops *dma_ops;
|
||||
extern struct ia64_machine_vector ia64_mv;
|
||||
extern void set_iommu_machvec(void);
|
||||
|
||||
extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *daddr, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
void *caddr;
|
||||
|
||||
caddr = ops->alloc(dev, size, daddr, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
|
||||
return caddr;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *caddr, dma_addr_t daddr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
debug_dma_free_coherent(dev, size, caddr, daddr);
|
||||
ops->free(dev, size, caddr, daddr, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
#define get_dma_ops(dev) platform_dma_get_ops(dev)
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
debug_dma_mapping_error(dev, daddr);
|
||||
return ops->mapping_error(dev, daddr);
|
||||
}
|
||||
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask (struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
*dev->dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_cache_sync (struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/*
|
||||
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
|
||||
* ensure that dma_cache_sync() enforces order, hence the mb().
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_DMA_MAPPING_H */
|
24
arch/ia64/include/asm/dma.h
Normal file
24
arch/ia64/include/asm/dma.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
#ifndef _ASM_IA64_DMA_H
|
||||
#define _ASM_IA64_DMA_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/io.h> /* need byte IO */
|
||||
|
||||
extern unsigned long MAX_DMA_ADDRESS;
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int isa_dma_bridge_buggy;
|
||||
#else
|
||||
# define isa_dma_bridge_buggy (0)
|
||||
#endif
|
||||
|
||||
#define free_dma(x)
|
||||
|
||||
void dma_mark_clean(void *addr, size_t size);
|
||||
|
||||
#endif /* _ASM_IA64_DMA_H */
|
14
arch/ia64/include/asm/dmi.h
Normal file
14
arch/ia64/include/asm/dmi.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef _ASM_DMI_H
|
||||
#define _ASM_DMI_H 1
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/* Use normal IO mappings for DMI */
|
||||
#define dmi_early_remap ioremap
|
||||
#define dmi_early_unmap(x, l) iounmap(x)
|
||||
#define dmi_remap ioremap
|
||||
#define dmi_unmap iounmap
|
||||
#define dmi_alloc(l) kzalloc(l, GFP_ATOMIC)
|
||||
|
||||
#endif
|
234
arch/ia64/include/asm/elf.h
Normal file
234
arch/ia64/include/asm/elf.h
Normal file
|
@ -0,0 +1,234 @@
|
|||
#ifndef _ASM_IA64_ELF_H
|
||||
#define _ASM_IA64_ELF_H
|
||||
|
||||
/*
|
||||
* ELF-specific definitions.
|
||||
*
|
||||
* Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/auxvec.h>
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_ARCH EM_IA_64
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
|
||||
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
|
||||
interpreted as follows by Linux: */
|
||||
#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */
|
||||
|
||||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* This is the location that an ET_DYN program is loaded if exec'ed.
|
||||
* Typical use of this is to invoke "./ld.so someprog" to test out a
|
||||
* new version of the loader. We need to make sure that it is out of
|
||||
* the way of the program that it will "exec", and that there is
|
||||
* sufficient room for the brk.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
|
||||
|
||||
#define PT_IA_64_UNWIND 0x70000001
|
||||
|
||||
/* IA-64 relocations: */
|
||||
#define R_IA64_NONE 0x00 /* none */
|
||||
#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
|
||||
#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
|
||||
#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
|
||||
#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
|
||||
#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
|
||||
#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
|
||||
#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
|
||||
#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
|
||||
#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
|
||||
#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
|
||||
#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
|
||||
#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
|
||||
#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
|
||||
#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
|
||||
#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
|
||||
#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
|
||||
#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
|
||||
#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
|
||||
#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
|
||||
#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
|
||||
#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
|
||||
#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
|
||||
#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
|
||||
#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
|
||||
#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
|
||||
#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
|
||||
#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
|
||||
#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
|
||||
#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
|
||||
#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
|
||||
#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
|
||||
#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
|
||||
#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
|
||||
#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
|
||||
#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
|
||||
#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
|
||||
#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
|
||||
#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
|
||||
#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
|
||||
#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
|
||||
#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
|
||||
#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
|
||||
#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
|
||||
#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
|
||||
#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
|
||||
#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
|
||||
#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
|
||||
#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
|
||||
#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
|
||||
#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
|
||||
#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
|
||||
#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
|
||||
#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
|
||||
#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
|
||||
#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
|
||||
#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
|
||||
#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
|
||||
#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
|
||||
#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
|
||||
#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
|
||||
#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
|
||||
#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
|
||||
#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
|
||||
#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
|
||||
#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
|
||||
#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
|
||||
#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
|
||||
#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
|
||||
#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
|
||||
#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
|
||||
#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
|
||||
#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
|
||||
#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
|
||||
#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
|
||||
#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
|
||||
#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
|
||||
#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
|
||||
#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
|
||||
#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
|
||||
#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
|
||||
|
||||
/* IA-64 specific section flags: */
|
||||
#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
|
||||
|
||||
/*
|
||||
* We use (abuse?) this macro to insert the (empty) vm_area that is
|
||||
* used to map the register backing store. I don't see any better
|
||||
* place to do this, but we should discuss this with Linus once we can
|
||||
* talk to him...
|
||||
*/
|
||||
extern void ia64_init_addr_space (void);
|
||||
#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space()
|
||||
|
||||
/* ELF register definitions. This is needed for core dump support. */
|
||||
|
||||
/*
|
||||
* elf_gregset_t contains the application-level state in the following order:
|
||||
* r0-r31
|
||||
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
|
||||
* predicate registers (p0-p63)
|
||||
* b0-b7
|
||||
* ip cfm psr
|
||||
* ar.rsc ar.bsp ar.bspstore ar.rnat
|
||||
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
|
||||
*/
|
||||
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
|
||||
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
|
||||
|
||||
/* elf_gregset_t register offsets */
|
||||
#define ELF_GR_0_OFFSET 0
|
||||
#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
|
||||
#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
|
||||
#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
|
||||
#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
|
||||
#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
|
||||
#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
|
||||
#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
|
||||
#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
|
||||
#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
|
||||
#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
|
||||
|
||||
typedef unsigned long elf_fpxregset_t;
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct ia64_fpreg elf_fpreg_t;
|
||||
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
|
||||
|
||||
|
||||
struct pt_regs; /* forward declaration... */
|
||||
extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
|
||||
#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
|
||||
|
||||
/* This macro yields a bitmask that programs can use to figure out
|
||||
what instruction set this CPU supports. */
|
||||
#define ELF_HWCAP 0
|
||||
|
||||
/* This macro yields a string that ld.so will use to load
|
||||
implementation specific libraries for optimization. Not terribly
|
||||
relevant until we have real hardware to play with... */
|
||||
#define ELF_PLATFORM NULL
|
||||
|
||||
#define elf_read_implies_exec(ex, executable_stack) \
|
||||
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
|
||||
|
||||
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
extern char __kernel_syscall_via_epc[]; \
|
||||
NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* format for entries in the Global Offset Table
|
||||
*/
|
||||
struct got_entry {
|
||||
uint64_t val;
|
||||
};
|
||||
|
||||
/*
|
||||
* Layout of the Function Descriptor
|
||||
*/
|
||||
struct fdesc {
|
||||
uint64_t ip;
|
||||
uint64_t gp;
|
||||
};
|
||||
|
||||
#endif /* _ASM_IA64_ELF_H */
|
6
arch/ia64/include/asm/emergency-restart.h
Normal file
6
arch/ia64/include/asm/emergency-restart.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef _ASM_EMERGENCY_RESTART_H
|
||||
#define _ASM_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
29
arch/ia64/include/asm/esi.h
Normal file
29
arch/ia64/include/asm/esi.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* ESI service calls.
|
||||
*
|
||||
* Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P.
|
||||
* Alex Williamson <alex.williamson@hp.com>
|
||||
*/
|
||||
#ifndef esi_h
|
||||
#define esi_h
|
||||
|
||||
#include <linux/efi.h>
|
||||
|
||||
#define ESI_QUERY 0x00000001
|
||||
#define ESI_OPEN_HANDLE 0x02000000
|
||||
#define ESI_CLOSE_HANDLE 0x02000001
|
||||
|
||||
enum esi_proc_type {
|
||||
ESI_PROC_SERIALIZED, /* calls need to be serialized */
|
||||
ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */
|
||||
ESI_PROC_REENTRANT /* MP-safe and reentrant */
|
||||
};
|
||||
|
||||
extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
|
||||
extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
|
||||
enum esi_proc_type,
|
||||
u64, u64, u64, u64, u64, u64, u64, u64);
|
||||
extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64,
|
||||
u64, u64, u64, u64, u64, u64);
|
||||
|
||||
#endif /* esi_h */
|
23
arch/ia64/include/asm/fb.h
Normal file
23
arch/ia64/include/asm/fb.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
#ifndef _ASM_FB_H_
|
||||
#define _ASM_FB_H_
|
||||
|
||||
#include <linux/fb.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/efi.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
|
||||
unsigned long off)
|
||||
{
|
||||
if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
else
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static inline int fb_is_primary_device(struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _ASM_FB_H_ */
|
73
arch/ia64/include/asm/fpswa.h
Normal file
73
arch/ia64/include/asm/fpswa.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
#ifndef _ASM_IA64_FPSWA_H
|
||||
#define _ASM_IA64_FPSWA_H
|
||||
|
||||
/*
|
||||
* Floating-point Software Assist
|
||||
*
|
||||
* Copyright (C) 1999 Intel Corporation.
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
/* 4 * 128 bits */
|
||||
unsigned long fp_lp[4*2];
|
||||
} fp_state_low_preserved_t;
|
||||
|
||||
typedef struct {
|
||||
/* 10 * 128 bits */
|
||||
unsigned long fp_lv[10 * 2];
|
||||
} fp_state_low_volatile_t;
|
||||
|
||||
typedef struct {
|
||||
/* 16 * 128 bits */
|
||||
unsigned long fp_hp[16 * 2];
|
||||
} fp_state_high_preserved_t;
|
||||
|
||||
typedef struct {
|
||||
/* 96 * 128 bits */
|
||||
unsigned long fp_hv[96 * 2];
|
||||
} fp_state_high_volatile_t;
|
||||
|
||||
/**
|
||||
* floating point state to be passed to the FP emulation library by
|
||||
* the trap/fault handler
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long bitmask_low64;
|
||||
unsigned long bitmask_high64;
|
||||
fp_state_low_preserved_t *fp_state_low_preserved;
|
||||
fp_state_low_volatile_t *fp_state_low_volatile;
|
||||
fp_state_high_preserved_t *fp_state_high_preserved;
|
||||
fp_state_high_volatile_t *fp_state_high_volatile;
|
||||
} fp_state_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned long status;
|
||||
unsigned long err0;
|
||||
unsigned long err1;
|
||||
unsigned long err2;
|
||||
} fpswa_ret_t;
|
||||
|
||||
/**
|
||||
* function header for the Floating Point software assist
|
||||
* library. This function is invoked by the Floating point software
|
||||
* assist trap/fault handler.
|
||||
*/
|
||||
typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
|
||||
unsigned long *fsr, unsigned long *isr, unsigned long *preds,
|
||||
unsigned long *ifs, fp_state_t *fp_state);
|
||||
|
||||
/**
|
||||
* This is the FPSWA library interface as defined by EFI. We need to pass a
|
||||
* pointer to the interface itself on a call to the assist library
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int revision;
|
||||
unsigned int reserved;
|
||||
efi_fpswa_t fpswa;
|
||||
} fpswa_interface_t;
|
||||
|
||||
extern fpswa_interface_t *fpswa_interface;
|
||||
|
||||
#endif /* _ASM_IA64_FPSWA_H */
|
27
arch/ia64/include/asm/ftrace.h
Normal file
27
arch/ia64/include/asm/ftrace.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef _ASM_IA64_FTRACE_H
|
||||
#define _ASM_IA64_FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_INSN_SIZE 32 /* sizeof mcount call */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
|
||||
#define mcount _mcount
|
||||
|
||||
/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
|
||||
#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
|
||||
#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
/* second bundle, insn 2 */
|
||||
return addr - 0x12;
|
||||
}
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* _ASM_IA64_FTRACE_H */
|
126
arch/ia64/include/asm/futex.h
Normal file
126
arch/ia64/include/asm/futex.h
Normal file
|
@ -0,0 +1,126 @@
|
|||
#ifndef _ASM_FUTEX_H
|
||||
#define _ASM_FUTEX_H
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
|
||||
do { \
|
||||
register unsigned long r8 __asm ("r8") = 0; \
|
||||
__asm__ __volatile__( \
|
||||
" mf;; \n" \
|
||||
"[1:] " insn ";; \n" \
|
||||
" .xdata4 \"__ex_table\", 1b-., 2f-. \n" \
|
||||
"[2:]" \
|
||||
: "+r" (r8), "=r" (oldval) \
|
||||
: "r" (uaddr), "r" (oparg) \
|
||||
: "memory"); \
|
||||
ret = r8; \
|
||||
} while (0)
|
||||
|
||||
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
|
||||
do { \
|
||||
register unsigned long r8 __asm ("r8") = 0; \
|
||||
int val, newval; \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
" mf;; \n" \
|
||||
"[1:] ld4 %3=[%4];; \n" \
|
||||
" mov %2=%3 \n" \
|
||||
insn ";; \n" \
|
||||
" mov ar.ccv=%2;; \n" \
|
||||
"[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \
|
||||
" .xdata4 \"__ex_table\", 1b-., 3f-.\n" \
|
||||
" .xdata4 \"__ex_table\", 2b-., 3f-.\n" \
|
||||
"[3:]" \
|
||||
: "+r" (r8), "=r" (val), "=&r" (oldval), \
|
||||
"=&r" (newval) \
|
||||
: "r" (uaddr), "r" (oparg) \
|
||||
: "memory"); \
|
||||
if (unlikely (r8)) \
|
||||
break; \
|
||||
} while (unlikely (val != oldval)); \
|
||||
ret = r8; \
|
||||
} while (0)
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret;
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr,
|
||||
oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr,
|
||||
~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
||||
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
||||
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
||||
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
||||
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
||||
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
||||
default: ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
{
|
||||
register unsigned long r8 __asm ("r8") = 0;
|
||||
unsigned long prev;
|
||||
__asm__ __volatile__(
|
||||
" mf;; \n"
|
||||
" mov ar.ccv=%4;; \n"
|
||||
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
|
||||
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
|
||||
"[2:]"
|
||||
: "+r" (r8), "=&r" (prev)
|
||||
: "r" (uaddr), "r" (newval),
|
||||
"rO" ((long) (unsigned) oldval)
|
||||
: "memory");
|
||||
*uval = prev;
|
||||
return r8;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _ASM_FUTEX_H */
|
12
arch/ia64/include/asm/gcc_intrin.h
Normal file
12
arch/ia64/include/asm/gcc_intrin.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
|
||||
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_GCC_INTRIN_H
|
||||
#define _ASM_IA64_GCC_INTRIN_H
|
||||
|
||||
#include <uapi/asm/gcc_intrin.h>
|
||||
|
||||
register unsigned long ia64_r13 asm ("r13") __used;
|
||||
#endif /* _ASM_IA64_GCC_INTRIN_H */
|
4
arch/ia64/include/asm/gpio.h
Normal file
4
arch/ia64/include/asm/gpio.h
Normal file
|
@ -0,0 +1,4 @@
|
|||
#ifndef __LINUX_GPIO_H
|
||||
#warning Include linux/gpio.h instead of asm/gpio.h
|
||||
#include <linux/gpio.h>
|
||||
#endif
|
26
arch/ia64/include/asm/hardirq.h
Normal file
26
arch/ia64/include/asm/hardirq.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
#ifndef _ASM_IA64_HARDIRQ_H
|
||||
#define _ASM_IA64_HARDIRQ_H
|
||||
|
||||
/*
|
||||
* Modified 1998-2002, 2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
|
||||
*/
|
||||
|
||||
#define __ARCH_IRQ_STAT 1
|
||||
|
||||
#define local_softirq_pending() (local_cpu_data->softirq_pending)
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern void __iomem *ipi_base_addr;
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
|
||||
#endif /* _ASM_IA64_HARDIRQ_H */
|
16
arch/ia64/include/asm/hpsim.h
Normal file
16
arch/ia64/include/asm/hpsim.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
#ifndef _ASMIA64_HPSIM_H
|
||||
#define _ASMIA64_HPSIM_H
|
||||
|
||||
#ifndef CONFIG_HP_SIMSERIAL_CONSOLE
|
||||
static inline int simcons_register(void) { return 1; }
|
||||
#else
|
||||
int simcons_register(void);
|
||||
#endif
|
||||
|
||||
struct tty_driver;
|
||||
extern struct tty_driver *hp_simserial_driver;
|
||||
|
||||
extern int hpsim_get_irq(int intr);
|
||||
void ia64_ctl_trace(long on);
|
||||
|
||||
#endif
|
85
arch/ia64/include/asm/hugetlb.h
Normal file
85
arch/ia64/include/asm/hugetlb.h
Normal file
|
@ -0,0 +1,85 @@
|
|||
#ifndef _ASM_IA64_HUGETLB_H
|
||||
#define _ASM_IA64_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
unsigned long end, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
|
||||
int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len);
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len)
|
||||
{
|
||||
return (REGION_NUMBER(addr) == RGN_HPAGE ||
|
||||
REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_wrprotect(pte);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
static inline int arch_prepare_hugepage(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_release_hugepage(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_HUGETLB_H */
|
194
arch/ia64/include/asm/hw_irq.h
Normal file
194
arch/ia64/include/asm/hw_irq.h
Normal file
|
@ -0,0 +1,194 @@
|
|||
#ifndef _ASM_IA64_HW_IRQ_H
|
||||
#define _ASM_IA64_HW_IRQ_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2001-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/profile.h>
|
||||
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
typedef u8 ia64_vector;
|
||||
#else
|
||||
typedef u16 ia64_vector;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 0 special
|
||||
*
|
||||
* 1,3-14 are reserved from firmware
|
||||
*
|
||||
* 16-255 (vectored external interrupts) are available
|
||||
*
|
||||
* 15 spurious interrupt (see IVR)
|
||||
*
|
||||
* 16 lowest priority, 255 highest priority
|
||||
*
|
||||
* 15 classes of 16 interrupts each.
|
||||
*/
|
||||
#define IA64_MIN_VECTORED_IRQ 16
|
||||
#define IA64_MAX_VECTORED_IRQ 255
|
||||
#define IA64_NUM_VECTORS 256
|
||||
|
||||
#define AUTO_ASSIGN -1
|
||||
|
||||
#define IA64_SPURIOUS_INT_VECTOR 0x0f
|
||||
|
||||
/*
|
||||
* Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
|
||||
*/
|
||||
#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
|
||||
#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
|
||||
#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
|
||||
#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
|
||||
/*
|
||||
* Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
|
||||
* Use vectors 0x30-0xe7 as the default device vector range for ia64.
|
||||
* Platforms may choose to reduce this range in platform_irq_setup, but the
|
||||
* platform range must fall within
|
||||
* [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
|
||||
*/
|
||||
extern int ia64_first_device_vector;
|
||||
extern int ia64_last_device_vector;
|
||||
|
||||
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
|
||||
/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
|
||||
#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
|
||||
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
|
||||
#else
|
||||
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
|
||||
#endif
|
||||
#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
|
||||
#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
|
||||
#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
|
||||
#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
|
||||
#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
|
||||
|
||||
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
|
||||
#define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */
|
||||
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
|
||||
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
|
||||
#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
|
||||
#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
|
||||
#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
|
||||
|
||||
/* Used for encoding redirected irqs */
|
||||
|
||||
#define IA64_IRQ_REDIRECTED (1 << 31)
|
||||
|
||||
/* IA64 inter-cpu interrupt related definitions */
|
||||
|
||||
#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
|
||||
|
||||
/* Delivery modes for inter-cpu interrupts */
|
||||
enum {
|
||||
IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
|
||||
IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
|
||||
IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
|
||||
IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
|
||||
IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
|
||||
};
|
||||
|
||||
extern __u8 isa_irq_to_vector_map[16];
|
||||
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
|
||||
|
||||
struct irq_cfg {
|
||||
ia64_vector vector;
|
||||
cpumask_t domain;
|
||||
cpumask_t old_domain;
|
||||
unsigned move_cleanup_count;
|
||||
u8 move_in_progress : 1;
|
||||
};
|
||||
extern spinlock_t vector_lock;
|
||||
extern struct irq_cfg irq_cfg[NR_IRQS];
|
||||
#define irq_to_domain(x) irq_cfg[(x)].domain
|
||||
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
|
||||
|
||||
extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define ia64_register_ipi ia64_native_register_ipi
|
||||
#define assign_irq_vector ia64_native_assign_irq_vector
|
||||
#define free_irq_vector ia64_native_free_irq_vector
|
||||
#define register_percpu_irq ia64_native_register_percpu_irq
|
||||
#define ia64_resend_irq ia64_native_resend_irq
|
||||
#endif
|
||||
|
||||
extern void ia64_native_register_ipi(void);
|
||||
extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
|
||||
extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */
|
||||
extern void ia64_native_free_irq_vector (int vector);
|
||||
extern int reserve_irq_vector (int vector);
|
||||
extern void __setup_vector_irq(int cpu);
|
||||
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
|
||||
extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
|
||||
extern void destroy_and_reserve_irq (unsigned int irq);
|
||||
|
||||
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
||||
extern int irq_prepare_move(int irq, int cpu);
|
||||
extern void irq_complete_move(unsigned int irq);
|
||||
#else
|
||||
static inline int irq_prepare_move(int irq, int cpu) { return 0; }
|
||||
static inline void irq_complete_move(unsigned int irq) {}
|
||||
#endif
|
||||
|
||||
static inline void ia64_native_resend_irq(unsigned int vector)
|
||||
{
|
||||
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Default implementations for the irq-descriptor API:
|
||||
*/
|
||||
#ifndef CONFIG_IA64_GENERIC
|
||||
static inline ia64_vector __ia64_irq_to_vector(int irq)
|
||||
{
|
||||
return irq_cfg[irq].vector;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
__ia64_local_vector_to_irq (ia64_vector vec)
|
||||
{
|
||||
return __this_cpu_read(vector_irq[vec]);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
|
||||
* vectors. On smaller systems, there is a one-to-one correspondence between interrupt
|
||||
* vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
|
||||
* domains meaning that the translation from vector number to irq number depends on the
|
||||
* interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
|
||||
* differences and provides a uniform means to translate between vector and irq numbers
|
||||
* and to obtain the irq descriptor for a given irq number.
|
||||
*/
|
||||
|
||||
/* Extract the IA-64 vector that corresponds to IRQ. */
|
||||
static inline ia64_vector
|
||||
irq_to_vector (int irq)
|
||||
{
|
||||
return platform_irq_to_vector(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert the local IA-64 vector to the corresponding irq number. This translation is
|
||||
* done in the context of the interrupt domain that the currently executing CPU belongs
|
||||
* to.
|
||||
*/
|
||||
static inline unsigned int
|
||||
local_vector_to_irq (ia64_vector vec)
|
||||
{
|
||||
return platform_local_vector_to_irq(vec);
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_HW_IRQ_H */
|
7
arch/ia64/include/asm/idle.h
Normal file
7
arch/ia64/include/asm/idle.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#ifndef _ASM_IA64_IDLE_H
|
||||
#define _ASM_IA64_IDLE_H
|
||||
|
||||
static inline void enter_idle(void) { }
|
||||
static inline void exit_idle(void) { }
|
||||
|
||||
#endif /* _ASM_IA64_IDLE_H */
|
25
arch/ia64/include/asm/intrinsics.h
Normal file
25
arch/ia64/include/asm/intrinsics.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Compiler-dependent intrinsics.
|
||||
*
|
||||
* Copyright (C) 2002-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_INTRINSICS_H
|
||||
#define _ASM_IA64_INTRINSICS_H
|
||||
|
||||
#include <asm/paravirt_privop.h>
|
||||
#include <uapi/asm/intrinsics.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#if defined(CONFIG_PARAVIRT)
|
||||
# undef IA64_INTRINSIC_API
|
||||
# undef IA64_INTRINSIC_MACRO
|
||||
# ifdef ASM_SUPPORTED
|
||||
# define IA64_INTRINSIC_API(name) paravirt_ ## name
|
||||
# else
|
||||
# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
|
||||
# endif
|
||||
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
|
||||
#endif
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_INTRINSICS_H */
|
445
arch/ia64/include/asm/io.h
Normal file
445
arch/ia64/include/asm/io.h
Normal file
|
@ -0,0 +1,445 @@
|
|||
#ifndef _ASM_IA64_IO_H
|
||||
#define _ASM_IA64_IO_H
|
||||
|
||||
/*
|
||||
* This file contains the definitions for the emulated IO instructions
|
||||
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
|
||||
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
|
||||
* versions of the single-IO instructions (inb_p/inw_p/..).
|
||||
*
|
||||
* This file is not meant to be obfuscating: it's just complicated to
|
||||
* (a) handle it all in a way that makes gcc able to optimize it as
|
||||
* well as possible and (b) trying to avoid writing the same thing
|
||||
* over and over again with slight variations and possibly making a
|
||||
* mistake somewhere.
|
||||
*
|
||||
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/* We don't use IO slowdowns on the ia64, but.. */
|
||||
#define __SLOW_DOWN_IO do { } while (0)
|
||||
#define SLOW_DOWN_IO do { } while (0)
|
||||
|
||||
#define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
|
||||
|
||||
/*
|
||||
* The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
|
||||
* large machines may have multiple other I/O spaces so we can't place any a priori limit
|
||||
* on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
|
||||
*/
|
||||
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
|
||||
|
||||
#define MAX_IO_SPACES_BITS 8
|
||||
#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
|
||||
#define IO_SPACE_BITS 24
|
||||
#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
|
||||
|
||||
#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
|
||||
#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
|
||||
#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
|
||||
|
||||
#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
|
||||
|
||||
struct io_space {
|
||||
unsigned long mmio_base; /* base in MMIO space */
|
||||
int sparse;
|
||||
};
|
||||
|
||||
extern struct io_space io_space[];
|
||||
extern unsigned int num_io_spaces;
|
||||
|
||||
# ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
|
||||
* 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
|
||||
* 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
|
||||
*
|
||||
* ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
|
||||
* code that uses bare port numbers without the prerequisite pci_iomap().
|
||||
*/
|
||||
#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
|
||||
#define PIO_MASK (PIO_OFFSET - 1)
|
||||
#define PIO_RESERVED __IA64_UNCACHED_OFFSET
|
||||
#define HAVE_ARCH_PIO_SIZE
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
/*
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
*/
|
||||
static inline unsigned long
|
||||
virt_to_phys (volatile void *address)
|
||||
{
|
||||
return (unsigned long) address - PAGE_OFFSET;
|
||||
}
|
||||
|
||||
static inline void*
|
||||
phys_to_virt (unsigned long address)
|
||||
{
|
||||
return (void *) (address + PAGE_OFFSET);
|
||||
}
|
||||
|
||||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
|
||||
extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
|
||||
extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
|
||||
|
||||
/*
|
||||
* The following two macros are deprecated and scheduled for removal.
|
||||
* Please use the PCI-DMA interface defined in <asm/pci.h> instead.
|
||||
*/
|
||||
#define bus_to_virt phys_to_virt
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define page_to_bus page_to_phys
|
||||
|
||||
# endif /* KERNEL */
|
||||
|
||||
/*
|
||||
* Memory fence w/accept. This should never be used in code that is
|
||||
* not IA-64 specific.
|
||||
*/
|
||||
#define __ia64_mf_a() ia64_mfa()
|
||||
|
||||
/**
|
||||
* ___ia64_mmiowb - I/O write barrier
|
||||
*
|
||||
* Ensure ordering of I/O space writes. This will make sure that writes
|
||||
* following the barrier will arrive after all previous writes. For most
|
||||
* ia64 platforms, this is a simple 'mf.a' instruction.
|
||||
*
|
||||
* See Documentation/DocBook/deviceiobook.tmpl for more information.
|
||||
*/
|
||||
static inline void ___ia64_mmiowb(void)
|
||||
{
|
||||
ia64_mfa();
|
||||
}
|
||||
|
||||
static inline void*
|
||||
__ia64_mk_io_addr (unsigned long port)
|
||||
{
|
||||
struct io_space *space;
|
||||
unsigned long offset;
|
||||
|
||||
space = &io_space[IO_SPACE_NR(port)];
|
||||
port = IO_SPACE_PORT(port);
|
||||
if (space->sparse)
|
||||
offset = IO_SPACE_SPARSE_ENCODING(port);
|
||||
else
|
||||
offset = port;
|
||||
|
||||
return (void *) (space->mmio_base | offset);
|
||||
}
|
||||
|
||||
#define __ia64_inb ___ia64_inb
|
||||
#define __ia64_inw ___ia64_inw
|
||||
#define __ia64_inl ___ia64_inl
|
||||
#define __ia64_outb ___ia64_outb
|
||||
#define __ia64_outw ___ia64_outw
|
||||
#define __ia64_outl ___ia64_outl
|
||||
#define __ia64_readb ___ia64_readb
|
||||
#define __ia64_readw ___ia64_readw
|
||||
#define __ia64_readl ___ia64_readl
|
||||
#define __ia64_readq ___ia64_readq
|
||||
#define __ia64_readb_relaxed ___ia64_readb
|
||||
#define __ia64_readw_relaxed ___ia64_readw
|
||||
#define __ia64_readl_relaxed ___ia64_readl
|
||||
#define __ia64_readq_relaxed ___ia64_readq
|
||||
#define __ia64_writeb ___ia64_writeb
|
||||
#define __ia64_writew ___ia64_writew
|
||||
#define __ia64_writel ___ia64_writel
|
||||
#define __ia64_writeq ___ia64_writeq
|
||||
#define __ia64_mmiowb ___ia64_mmiowb
|
||||
|
||||
/*
|
||||
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
|
||||
* that the access has completed before executing other I/O accesses. Since we're doing
|
||||
* the accesses through an uncachable (UC) translation, the CPU will execute them in
|
||||
* program order. However, we still need to tell the compiler not to shuffle them around
|
||||
* during optimization, which is why we use "volatile" pointers.
|
||||
*/
|
||||
|
||||
static inline unsigned int
|
||||
___ia64_inb (unsigned long port)
|
||||
{
|
||||
volatile unsigned char *addr = __ia64_mk_io_addr(port);
|
||||
unsigned char ret;
|
||||
|
||||
ret = *addr;
|
||||
__ia64_mf_a();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
___ia64_inw (unsigned long port)
|
||||
{
|
||||
volatile unsigned short *addr = __ia64_mk_io_addr(port);
|
||||
unsigned short ret;
|
||||
|
||||
ret = *addr;
|
||||
__ia64_mf_a();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
___ia64_inl (unsigned long port)
|
||||
{
|
||||
volatile unsigned int *addr = __ia64_mk_io_addr(port);
|
||||
unsigned int ret;
|
||||
|
||||
ret = *addr;
|
||||
__ia64_mf_a();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
___ia64_outb (unsigned char val, unsigned long port)
|
||||
{
|
||||
volatile unsigned char *addr = __ia64_mk_io_addr(port);
|
||||
|
||||
*addr = val;
|
||||
__ia64_mf_a();
|
||||
}
|
||||
|
||||
static inline void
|
||||
___ia64_outw (unsigned short val, unsigned long port)
|
||||
{
|
||||
volatile unsigned short *addr = __ia64_mk_io_addr(port);
|
||||
|
||||
*addr = val;
|
||||
__ia64_mf_a();
|
||||
}
|
||||
|
||||
static inline void
|
||||
___ia64_outl (unsigned int val, unsigned long port)
|
||||
{
|
||||
volatile unsigned int *addr = __ia64_mk_io_addr(port);
|
||||
|
||||
*addr = val;
|
||||
__ia64_mf_a();
|
||||
}
|
||||
|
||||
static inline void
|
||||
__insb (unsigned long port, void *dst, unsigned long count)
|
||||
{
|
||||
unsigned char *dp = dst;
|
||||
|
||||
while (count--)
|
||||
*dp++ = platform_inb(port);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__insw (unsigned long port, void *dst, unsigned long count)
|
||||
{
|
||||
unsigned short *dp = dst;
|
||||
|
||||
while (count--)
|
||||
put_unaligned(platform_inw(port), dp++);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__insl (unsigned long port, void *dst, unsigned long count)
|
||||
{
|
||||
unsigned int *dp = dst;
|
||||
|
||||
while (count--)
|
||||
put_unaligned(platform_inl(port), dp++);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__outsb (unsigned long port, const void *src, unsigned long count)
|
||||
{
|
||||
const unsigned char *sp = src;
|
||||
|
||||
while (count--)
|
||||
platform_outb(*sp++, port);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__outsw (unsigned long port, const void *src, unsigned long count)
|
||||
{
|
||||
const unsigned short *sp = src;
|
||||
|
||||
while (count--)
|
||||
platform_outw(get_unaligned(sp++), port);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__outsl (unsigned long port, const void *src, unsigned long count)
|
||||
{
|
||||
const unsigned int *sp = src;
|
||||
|
||||
while (count--)
|
||||
platform_outl(get_unaligned(sp++), port);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unfortunately, some platforms are broken and do not follow the IA-64 architecture
|
||||
* specification regarding legacy I/O support. Thus, we have to make these operations
|
||||
* platform dependent...
|
||||
*/
|
||||
#define __inb platform_inb
|
||||
#define __inw platform_inw
|
||||
#define __inl platform_inl
|
||||
#define __outb platform_outb
|
||||
#define __outw platform_outw
|
||||
#define __outl platform_outl
|
||||
#define __mmiowb platform_mmiowb
|
||||
|
||||
#define inb(p) __inb(p)
|
||||
#define inw(p) __inw(p)
|
||||
#define inl(p) __inl(p)
|
||||
#define insb(p,d,c) __insb(p,d,c)
|
||||
#define insw(p,d,c) __insw(p,d,c)
|
||||
#define insl(p,d,c) __insl(p,d,c)
|
||||
#define outb(v,p) __outb(v,p)
|
||||
#define outw(v,p) __outw(v,p)
|
||||
#define outl(v,p) __outl(v,p)
|
||||
#define outsb(p,s,c) __outsb(p,s,c)
|
||||
#define outsw(p,s,c) __outsw(p,s,c)
|
||||
#define outsl(p,s,c) __outsl(p,s,c)
|
||||
#define mmiowb() __mmiowb()
|
||||
|
||||
/*
|
||||
* The address passed to these functions are ioremap()ped already.
|
||||
*
|
||||
* We need these to be machine vectors since some platforms don't provide
|
||||
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
|
||||
* a good idea). Writes are ok though for all existing ia64 platforms (and
|
||||
* hopefully it'll stay that way).
|
||||
*/
|
||||
static inline unsigned char
|
||||
___ia64_readb (const volatile void __iomem *addr)
|
||||
{
|
||||
return *(volatile unsigned char __force *)addr;
|
||||
}
|
||||
|
||||
static inline unsigned short
|
||||
___ia64_readw (const volatile void __iomem *addr)
|
||||
{
|
||||
return *(volatile unsigned short __force *)addr;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
___ia64_readl (const volatile void __iomem *addr)
|
||||
{
|
||||
return *(volatile unsigned int __force *) addr;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
___ia64_readq (const volatile void __iomem *addr)
|
||||
{
|
||||
return *(volatile unsigned long __force *) addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__writeb (unsigned char val, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile unsigned char __force *) addr = val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__writew (unsigned short val, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile unsigned short __force *) addr = val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__writel (unsigned int val, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile unsigned int __force *) addr = val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__writeq (unsigned long val, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile unsigned long __force *) addr = val;
|
||||
}
|
||||
|
||||
#define __readb platform_readb
|
||||
#define __readw platform_readw
|
||||
#define __readl platform_readl
|
||||
#define __readq platform_readq
|
||||
#define __readb_relaxed platform_readb_relaxed
|
||||
#define __readw_relaxed platform_readw_relaxed
|
||||
#define __readl_relaxed platform_readl_relaxed
|
||||
#define __readq_relaxed platform_readq_relaxed
|
||||
|
||||
#define readb(a) __readb((a))
|
||||
#define readw(a) __readw((a))
|
||||
#define readl(a) __readl((a))
|
||||
#define readq(a) __readq((a))
|
||||
#define readb_relaxed(a) __readb_relaxed((a))
|
||||
#define readw_relaxed(a) __readw_relaxed((a))
|
||||
#define readl_relaxed(a) __readl_relaxed((a))
|
||||
#define readq_relaxed(a) __readq_relaxed((a))
|
||||
#define __raw_readb readb
|
||||
#define __raw_readw readw
|
||||
#define __raw_readl readl
|
||||
#define __raw_readq readq
|
||||
#define __raw_readb_relaxed readb_relaxed
|
||||
#define __raw_readw_relaxed readw_relaxed
|
||||
#define __raw_readl_relaxed readl_relaxed
|
||||
#define __raw_readq_relaxed readq_relaxed
|
||||
#define writeb(v,a) __writeb((v), (a))
|
||||
#define writew(v,a) __writew((v), (a))
|
||||
#define writel(v,a) __writel((v), (a))
|
||||
#define writeq(v,a) __writeq((v), (a))
|
||||
#define __raw_writeb writeb
|
||||
#define __raw_writew writew
|
||||
#define __raw_writel writel
|
||||
#define __raw_writeq writeq
|
||||
|
||||
#ifndef inb_p
|
||||
# define inb_p inb
|
||||
#endif
|
||||
#ifndef inw_p
|
||||
# define inw_p inw
|
||||
#endif
|
||||
#ifndef inl_p
|
||||
# define inl_p inl
|
||||
#endif
|
||||
|
||||
#ifndef outb_p
|
||||
# define outb_p outb
|
||||
#endif
|
||||
#ifndef outw_p
|
||||
# define outw_p outw
|
||||
#endif
|
||||
#ifndef outl_p
|
||||
# define outl_p outl
|
||||
#endif
|
||||
|
||||
# ifdef __KERNEL__
|
||||
|
||||
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
|
||||
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
|
||||
extern void iounmap (volatile void __iomem *addr);
|
||||
extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
|
||||
#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size)
|
||||
extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
|
||||
#define early_memunmap(addr, size) early_iounmap(addr, size)
|
||||
static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return ioremap(phys_addr, size);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* String version of IO memory access ops:
|
||||
*/
|
||||
extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
|
||||
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
|
||||
extern void memset_io(volatile void __iomem *s, int c, long n);
|
||||
|
||||
# endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_IA64_IO_H */
|
22
arch/ia64/include/asm/iommu.h
Normal file
22
arch/ia64/include/asm/iommu.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
#ifndef _ASM_IA64_IOMMU_H
|
||||
#define _ASM_IA64_IOMMU_H 1
|
||||
|
||||
#define cpu_has_x2apic 0
|
||||
/* 10 seconds */
|
||||
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
|
||||
|
||||
extern void pci_iommu_shutdown(void);
|
||||
extern void no_iommu_init(void);
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int force_iommu, no_iommu;
|
||||
extern int iommu_pass_through;
|
||||
extern int iommu_detected;
|
||||
#else
|
||||
#define iommu_pass_through (0)
|
||||
#define no_iommu (1)
|
||||
#define iommu_detected (0)
|
||||
#endif
|
||||
extern void iommu_dma_init(void);
|
||||
extern void machvec_init(const char *name);
|
||||
|
||||
#endif
|
6
arch/ia64/include/asm/iommu_table.h
Normal file
6
arch/ia64/include/asm/iommu_table.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef _ASM_IA64_IOMMU_TABLE_H
|
||||
#define _ASM_IA64_IOMMU_TABLE_H
|
||||
|
||||
#define IOMMU_INIT_POST(_detect)
|
||||
|
||||
#endif /* _ASM_IA64_IOMMU_TABLE_H */
|
121
arch/ia64/include/asm/iosapic.h
Normal file
121
arch/ia64/include/asm/iosapic.h
Normal file
|
@ -0,0 +1,121 @@
|
|||
#ifndef __ASM_IA64_IOSAPIC_H
|
||||
#define __ASM_IA64_IOSAPIC_H
|
||||
|
||||
#define IOSAPIC_REG_SELECT 0x0
|
||||
#define IOSAPIC_WINDOW 0x10
|
||||
#define IOSAPIC_EOI 0x40
|
||||
|
||||
#define IOSAPIC_VERSION 0x1
|
||||
|
||||
/*
|
||||
* Redirection table entry
|
||||
*/
|
||||
#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
|
||||
#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
|
||||
|
||||
#define IOSAPIC_DEST_SHIFT 16
|
||||
|
||||
/*
|
||||
* Delivery mode
|
||||
*/
|
||||
#define IOSAPIC_DELIVERY_SHIFT 8
|
||||
#define IOSAPIC_FIXED 0x0
|
||||
#define IOSAPIC_LOWEST_PRIORITY 0x1
|
||||
#define IOSAPIC_PMI 0x2
|
||||
#define IOSAPIC_NMI 0x4
|
||||
#define IOSAPIC_INIT 0x5
|
||||
#define IOSAPIC_EXTINT 0x7
|
||||
|
||||
/*
|
||||
* Interrupt polarity
|
||||
*/
|
||||
#define IOSAPIC_POLARITY_SHIFT 13
|
||||
#define IOSAPIC_POL_HIGH 0
|
||||
#define IOSAPIC_POL_LOW 1
|
||||
|
||||
/*
|
||||
* Trigger mode
|
||||
*/
|
||||
#define IOSAPIC_TRIGGER_SHIFT 15
|
||||
#define IOSAPIC_EDGE 0
|
||||
#define IOSAPIC_LEVEL 1
|
||||
|
||||
/*
|
||||
* Mask bit
|
||||
*/
|
||||
|
||||
#define IOSAPIC_MASK_SHIFT 16
|
||||
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
|
||||
|
||||
#define IOSAPIC_VECTOR_MASK 0xffffff00
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_IOSAPIC
|
||||
|
||||
#define NR_IOSAPICS 256
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
|
||||
#define __iosapic_read __ia64_native_iosapic_read
|
||||
#define __iosapic_write __ia64_native_iosapic_write
|
||||
#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
|
||||
#endif
|
||||
|
||||
extern void __init ia64_native_iosapic_pcat_compat_init(void);
|
||||
extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
|
||||
|
||||
static inline unsigned int
|
||||
__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
|
||||
{
|
||||
writel(reg, iosapic + IOSAPIC_REG_SELECT);
|
||||
return readl(iosapic + IOSAPIC_WINDOW);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
|
||||
{
|
||||
writel(reg, iosapic + IOSAPIC_REG_SELECT);
|
||||
writel(val, iosapic + IOSAPIC_WINDOW);
|
||||
}
|
||||
|
||||
static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
|
||||
{
|
||||
writel(vector, iosapic + IOSAPIC_EOI);
|
||||
}
|
||||
|
||||
extern void __init iosapic_system_init (int pcat_compat);
|
||||
extern int iosapic_init (unsigned long address, unsigned int gsi_base);
|
||||
extern int iosapic_remove (unsigned int gsi_base);
|
||||
extern int gsi_to_irq (unsigned int gsi);
|
||||
extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
|
||||
unsigned long trigger);
|
||||
extern void iosapic_unregister_intr (unsigned int irq);
|
||||
extern void iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
|
||||
unsigned long polarity,
|
||||
unsigned long trigger);
|
||||
extern int __init iosapic_register_platform_intr (u32 int_type,
|
||||
unsigned int gsi,
|
||||
int pmi_vector,
|
||||
u16 eid, u16 id,
|
||||
unsigned long polarity,
|
||||
unsigned long trigger);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void map_iosapic_to_node (unsigned int, int);
|
||||
#endif
|
||||
#else
|
||||
#define iosapic_system_init(pcat_compat) do { } while (0)
|
||||
#define iosapic_init(address,gsi_base) (-EINVAL)
|
||||
#define iosapic_remove(gsi_base) (-ENODEV)
|
||||
#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
|
||||
#define iosapic_unregister_intr(irq) do { } while (0)
|
||||
#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
|
||||
#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
|
||||
polarity,trigger) (gsi)
|
||||
#endif
|
||||
|
||||
# endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_IA64_IOSAPIC_H */
|
37
arch/ia64/include/asm/irq.h
Normal file
37
arch/ia64/include/asm/irq.h
Normal file
|
@ -0,0 +1,37 @@
|
|||
#ifndef _ASM_IA64_IRQ_H
|
||||
#define _ASM_IA64_IRQ_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
*
|
||||
* 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
|
||||
* 01/20/99 S.Eranian added keyboard interrupt
|
||||
* 02/29/00 D.Mosberger moved most things into hw_irq.h
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <generated/nr-irqs.h>
|
||||
|
||||
static __inline__ int
|
||||
irq_canonicalize (int irq)
|
||||
{
|
||||
/*
|
||||
* We do the legacy thing here of pretending that irqs < 16
|
||||
* are 8259 irqs. This really shouldn't be necessary at all,
|
||||
* but we keep it here as serial.c still uses it...
|
||||
*/
|
||||
return ((irq == 2) ? 9 : irq);
|
||||
}
|
||||
|
||||
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
|
||||
bool is_affinity_mask_valid(const struct cpumask *cpumask);
|
||||
|
||||
#define is_affinity_mask_valid is_affinity_mask_valid
|
||||
|
||||
int create_irq(void);
|
||||
void destroy_irq(unsigned int irq);
|
||||
|
||||
#endif /* _ASM_IA64_IRQ_H */
|
1
arch/ia64/include/asm/irq_regs.h
Normal file
1
arch/ia64/include/asm/irq_regs.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/irq_regs.h>
|
6
arch/ia64/include/asm/irq_remapping.h
Normal file
6
arch/ia64/include/asm/irq_remapping.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef __IA64_INTR_REMAPPING_H
|
||||
#define __IA64_INTR_REMAPPING_H
|
||||
#define irq_remapping_enabled 0
|
||||
#define dmar_alloc_hwirq create_irq
|
||||
#define dmar_free_hwirq destroy_irq
|
||||
#endif
|
98
arch/ia64/include/asm/irqflags.h
Normal file
98
arch/ia64/include/asm/irqflags.h
Normal file
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* IRQ flags defines.
|
||||
*
|
||||
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_IRQFLAGS_H
|
||||
#define _ASM_IA64_IRQFLAGS_H
|
||||
|
||||
#include <asm/pal.h>
|
||||
#include <asm/kregs.h>
|
||||
|
||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||
extern unsigned long last_cli_ip;
|
||||
static inline void arch_maybe_save_ip(unsigned long flags)
|
||||
{
|
||||
if (flags & IA64_PSR_I)
|
||||
last_cli_ip = ia64_getreg(_IA64_REG_IP);
|
||||
}
|
||||
#else
|
||||
#define arch_maybe_save_ip(flags) do {} while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* - clearing psr.i is implicitly serialized (visible by next insn)
|
||||
* - setting psr.i requires data serialization
|
||||
* - we need a stop-bit before reading PSR because we sometimes
|
||||
* write a floating-point register right before reading the PSR
|
||||
* and that writes to PSR.mfl
|
||||
*/
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
ia64_stop();
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
return ia64_get_psr_i();
|
||||
#else
|
||||
return ia64_getreg(_IA64_REG_PSR);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags = arch_local_save_flags();
|
||||
|
||||
ia64_stop();
|
||||
ia64_rsm(IA64_PSR_I);
|
||||
arch_maybe_save_ip(flags);
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||
arch_local_irq_save();
|
||||
#else
|
||||
ia64_stop();
|
||||
ia64_rsm(IA64_PSR_I);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
ia64_stop();
|
||||
ia64_ssm(IA64_PSR_I);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||
unsigned long old_psr = arch_local_save_flags();
|
||||
#endif
|
||||
ia64_intrin_local_irq_restore(flags & IA64_PSR_I);
|
||||
arch_maybe_save_ip(old_psr & ~flags);
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return (flags & IA64_PSR_I) == 0;
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
static inline void arch_safe_halt(void)
|
||||
{
|
||||
arch_local_irq_enable();
|
||||
ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
|
||||
}
|
||||
|
||||
|
||||
#endif /* _ASM_IA64_IRQFLAGS_H */
|
57
arch/ia64/include/asm/kdebug.h
Normal file
57
arch/ia64/include/asm/kdebug.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
#ifndef _IA64_KDEBUG_H
|
||||
#define _IA64_KDEBUG_H 1
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) Intel Corporation, 2005
|
||||
*
|
||||
* 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
|
||||
* <anil.s.keshavamurthy@intel.com> adopted from
|
||||
* include/asm-x86_64/kdebug.h
|
||||
*
|
||||
* 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more
|
||||
* events.
|
||||
*/
|
||||
|
||||
enum die_val {
|
||||
DIE_BREAK = 1,
|
||||
DIE_FAULT,
|
||||
DIE_OOPS,
|
||||
DIE_MACHINE_HALT,
|
||||
DIE_MACHINE_RESTART,
|
||||
DIE_MCA_MONARCH_ENTER,
|
||||
DIE_MCA_MONARCH_PROCESS,
|
||||
DIE_MCA_MONARCH_LEAVE,
|
||||
DIE_MCA_SLAVE_ENTER,
|
||||
DIE_MCA_SLAVE_PROCESS,
|
||||
DIE_MCA_SLAVE_LEAVE,
|
||||
DIE_MCA_RENDZVOUS_ENTER,
|
||||
DIE_MCA_RENDZVOUS_PROCESS,
|
||||
DIE_MCA_RENDZVOUS_LEAVE,
|
||||
DIE_MCA_NEW_TIMEOUT,
|
||||
DIE_INIT_ENTER,
|
||||
DIE_INIT_MONARCH_ENTER,
|
||||
DIE_INIT_MONARCH_PROCESS,
|
||||
DIE_INIT_MONARCH_LEAVE,
|
||||
DIE_INIT_SLAVE_ENTER,
|
||||
DIE_INIT_SLAVE_PROCESS,
|
||||
DIE_INIT_SLAVE_LEAVE,
|
||||
DIE_KDEBUG_ENTER,
|
||||
DIE_KDEBUG_LEAVE,
|
||||
DIE_KDUMP_ENTER,
|
||||
DIE_KDUMP_LEAVE,
|
||||
};
|
||||
|
||||
#endif
|
45
arch/ia64/include/asm/kexec.h
Normal file
45
arch/ia64/include/asm/kexec.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
#ifndef _ASM_IA64_KEXEC_H
|
||||
#define _ASM_IA64_KEXEC_H
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
||||
/* Maximum physical address we can use pages from */
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
|
||||
/* Maximum address we can reach in physical address mode */
|
||||
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
|
||||
/* Maximum address we can use for the control code buffer */
|
||||
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
|
||||
|
||||
#define KEXEC_CONTROL_PAGE_SIZE (8192 + 8192 + 4096)
|
||||
|
||||
/* The native architecture */
|
||||
#define KEXEC_ARCH KEXEC_ARCH_IA_64
|
||||
|
||||
#define kexec_flush_icache_page(page) do { \
|
||||
unsigned long page_addr = (unsigned long)page_address(page); \
|
||||
flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
|
||||
} while(0)
|
||||
|
||||
extern struct kimage *ia64_kimage;
|
||||
extern const unsigned int relocate_new_kernel_size;
|
||||
extern void relocate_new_kernel(unsigned long, unsigned long,
|
||||
struct ia64_boot_param *, unsigned long);
|
||||
static inline void
|
||||
crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
|
||||
{
|
||||
}
|
||||
extern struct resource efi_memmap_res;
|
||||
extern struct resource boot_param_res;
|
||||
extern void kdump_smp_send_stop(void);
|
||||
extern void kdump_smp_send_init(void);
|
||||
extern void kexec_disable_iosapic(void);
|
||||
extern void crash_save_this_cpu(void);
|
||||
struct rsvd_region;
|
||||
extern unsigned long kdump_find_rsvd_region(unsigned long size,
|
||||
struct rsvd_region *rsvd_regions, int n);
|
||||
extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
|
||||
extern int kdump_status[];
|
||||
extern atomic_t kdump_cpu_freezed;
|
||||
extern atomic_t kdump_in_progress;
|
||||
|
||||
#endif /* _ASM_IA64_KEXEC_H */
|
12
arch/ia64/include/asm/kmap_types.h
Normal file
12
arch/ia64/include/asm/kmap_types.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef _ASM_IA64_KMAP_TYPES_H
|
||||
#define _ASM_IA64_KMAP_TYPES_H
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
#define __WITH_KM_FENCE
|
||||
#endif
|
||||
|
||||
#include <asm-generic/kmap_types.h>
|
||||
|
||||
#undef __WITH_KM_FENCE
|
||||
|
||||
#endif /* _ASM_IA64_KMAP_TYPES_H */
|
127
arch/ia64/include/asm/kprobes.h
Normal file
127
arch/ia64/include/asm/kprobes.h
Normal file
|
@ -0,0 +1,127 @@
|
|||
#ifndef _ASM_KPROBES_H
|
||||
#define _ASM_KPROBES_H
|
||||
/*
|
||||
* Kernel Probes (KProbes)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2002, 2004
|
||||
* Copyright (C) Intel Corporation, 2005
|
||||
*
|
||||
* 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
|
||||
* <anil.s.keshavamurthy@intel.com> adapted from i386
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/break.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
|
||||
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
|
||||
#define NOP_M_INST (long)(1<<27)
|
||||
#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
|
||||
(0x1L << 12) | /* many */ \
|
||||
(((i1) & 1) << 36) | ((i2) << 13))) /* imm */
|
||||
|
||||
typedef union cmp_inst {
|
||||
struct {
|
||||
unsigned long long qp : 6;
|
||||
unsigned long long p1 : 6;
|
||||
unsigned long long c : 1;
|
||||
unsigned long long r2 : 7;
|
||||
unsigned long long r3 : 7;
|
||||
unsigned long long p2 : 6;
|
||||
unsigned long long ta : 1;
|
||||
unsigned long long x2 : 2;
|
||||
unsigned long long tb : 1;
|
||||
unsigned long long opcode : 4;
|
||||
unsigned long long reserved : 23;
|
||||
}f;
|
||||
unsigned long long l;
|
||||
} cmp_inst_t;
|
||||
|
||||
struct kprobe;
|
||||
|
||||
typedef struct _bundle {
|
||||
struct {
|
||||
unsigned long long template : 5;
|
||||
unsigned long long slot0 : 41;
|
||||
unsigned long long slot1_p0 : 64-46;
|
||||
} quad0;
|
||||
struct {
|
||||
unsigned long long slot1_p1 : 41 - (64-46);
|
||||
unsigned long long slot2 : 41;
|
||||
} quad1;
|
||||
} __attribute__((__aligned__(16))) bundle_t;
|
||||
|
||||
struct prev_kprobe {
|
||||
struct kprobe *kp;
|
||||
unsigned long status;
|
||||
};
|
||||
|
||||
#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f)
|
||||
/* per-cpu kprobe control block */
|
||||
#define ARCH_PREV_KPROBE_SZ 2
|
||||
struct kprobe_ctlblk {
|
||||
unsigned long kprobe_status;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
|
||||
unsigned long *bsp;
|
||||
unsigned long cfm;
|
||||
atomic_t prev_kprobe_index;
|
||||
struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
|
||||
};
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define SLOT0_OPCODE_SHIFT (37)
|
||||
#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
|
||||
#define SLOT2_OPCODE_SHIFT (37)
|
||||
|
||||
#define INDIRECT_CALL_OPCODE (1)
|
||||
#define IP_RELATIVE_CALL_OPCODE (5)
|
||||
#define IP_RELATIVE_BRANCH_OPCODE (4)
|
||||
#define IP_RELATIVE_PREDICT_OPCODE (7)
|
||||
#define LONG_BRANCH_OPCODE (0xC)
|
||||
#define LONG_CALL_OPCODE (0xD)
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
|
||||
typedef struct kprobe_opcode {
|
||||
bundle_t bundle;
|
||||
} kprobe_opcode_t;
|
||||
|
||||
/* Architecture specific copy of original instruction*/
|
||||
struct arch_specific_insn {
|
||||
/* copy of the instruction to be emulated */
|
||||
kprobe_opcode_t *insn;
|
||||
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
|
||||
#define INST_FLAG_FIX_BRANCH_REG 2
|
||||
#define INST_FLAG_BREAK_INST 4
|
||||
#define INST_FLAG_BOOSTABLE 8
|
||||
unsigned long inst_flag;
|
||||
unsigned short target_br_reg;
|
||||
unsigned short slot;
|
||||
};
|
||||
|
||||
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
|
||||
extern void invalidate_stacked_regs(void);
|
||||
extern void flush_register_stack(void);
|
||||
extern void arch_remove_kprobe(struct kprobe *p);
|
||||
|
||||
#endif /* _ASM_KPROBES_H */
|
165
arch/ia64/include/asm/kregs.h
Normal file
165
arch/ia64/include/asm/kregs.h
Normal file
|
@ -0,0 +1,165 @@
|
|||
#ifndef _ASM_IA64_KREGS_H
|
||||
#define _ASM_IA64_KREGS_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2001-2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
/*
|
||||
* This file defines the kernel register usage convention used by Linux/ia64.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Kernel registers:
|
||||
*/
|
||||
#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
|
||||
#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
|
||||
#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
|
||||
#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
|
||||
#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
|
||||
#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
|
||||
#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
|
||||
|
||||
#define _IA64_KR_PASTE(x,y) x##y
|
||||
#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
|
||||
#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
|
||||
|
||||
/*
|
||||
* Translation registers:
|
||||
*/
|
||||
#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
|
||||
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
|
||||
#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
|
||||
|
||||
#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
|
||||
#define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/
|
||||
|
||||
/* Processor status register bits: */
|
||||
#define IA64_PSR_BE_BIT 1
|
||||
#define IA64_PSR_UP_BIT 2
|
||||
#define IA64_PSR_AC_BIT 3
|
||||
#define IA64_PSR_MFL_BIT 4
|
||||
#define IA64_PSR_MFH_BIT 5
|
||||
#define IA64_PSR_IC_BIT 13
|
||||
#define IA64_PSR_I_BIT 14
|
||||
#define IA64_PSR_PK_BIT 15
|
||||
#define IA64_PSR_DT_BIT 17
|
||||
#define IA64_PSR_DFL_BIT 18
|
||||
#define IA64_PSR_DFH_BIT 19
|
||||
#define IA64_PSR_SP_BIT 20
|
||||
#define IA64_PSR_PP_BIT 21
|
||||
#define IA64_PSR_DI_BIT 22
|
||||
#define IA64_PSR_SI_BIT 23
|
||||
#define IA64_PSR_DB_BIT 24
|
||||
#define IA64_PSR_LP_BIT 25
|
||||
#define IA64_PSR_TB_BIT 26
|
||||
#define IA64_PSR_RT_BIT 27
|
||||
/* The following are not affected by save_flags()/restore_flags(): */
|
||||
#define IA64_PSR_CPL0_BIT 32
|
||||
#define IA64_PSR_CPL1_BIT 33
|
||||
#define IA64_PSR_IS_BIT 34
|
||||
#define IA64_PSR_MC_BIT 35
|
||||
#define IA64_PSR_IT_BIT 36
|
||||
#define IA64_PSR_ID_BIT 37
|
||||
#define IA64_PSR_DA_BIT 38
|
||||
#define IA64_PSR_DD_BIT 39
|
||||
#define IA64_PSR_SS_BIT 40
|
||||
#define IA64_PSR_RI_BIT 41
|
||||
#define IA64_PSR_ED_BIT 43
|
||||
#define IA64_PSR_BN_BIT 44
|
||||
#define IA64_PSR_IA_BIT 45
|
||||
|
||||
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
|
||||
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
|
||||
execve(). */
|
||||
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
|
||||
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
|
||||
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
|
||||
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
|
||||
|
||||
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
|
||||
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
|
||||
#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
|
||||
#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
|
||||
#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
|
||||
#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
|
||||
#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
|
||||
#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
|
||||
#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
|
||||
#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
|
||||
#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
|
||||
#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
|
||||
#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
|
||||
#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
|
||||
#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
|
||||
#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
|
||||
#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
|
||||
#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
|
||||
#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
|
||||
/* The following are not affected by save_flags()/restore_flags(): */
|
||||
#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
|
||||
#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
|
||||
#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
|
||||
#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
|
||||
#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
|
||||
#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
|
||||
#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
|
||||
#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
|
||||
#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
|
||||
#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
|
||||
#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
|
||||
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
|
||||
|
||||
/* User mask bits: */
|
||||
#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
|
||||
|
||||
/* Default Control Register */
|
||||
#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
|
||||
#define IA64_DCR_BE_BIT 1 /* big-endian default */
|
||||
#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
|
||||
#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
|
||||
#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
|
||||
#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
|
||||
#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
|
||||
#define IA64_DCR_DR_BIT 12 /* defer access right faults */
|
||||
#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
|
||||
#define IA64_DCR_DD_BIT 14 /* defer debug faults */
|
||||
|
||||
#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
|
||||
#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
|
||||
#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
|
||||
#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
|
||||
#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
|
||||
#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
|
||||
#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
|
||||
#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
|
||||
#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
|
||||
#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
|
||||
|
||||
/* Interrupt Status Register */
|
||||
#define IA64_ISR_X_BIT 32 /* execute access */
|
||||
#define IA64_ISR_W_BIT 33 /* write access */
|
||||
#define IA64_ISR_R_BIT 34 /* read access */
|
||||
#define IA64_ISR_NA_BIT 35 /* non-access */
|
||||
#define IA64_ISR_SP_BIT 36 /* speculative load exception */
|
||||
#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
|
||||
#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
|
||||
#define IA64_ISR_CODE_MASK 0xf
|
||||
|
||||
#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
|
||||
#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
|
||||
#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
|
||||
#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
|
||||
#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
|
||||
#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
|
||||
#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
|
||||
|
||||
/* ISR code field for non-access instructions */
|
||||
#define IA64_ISR_CODE_TPA 0
|
||||
#define IA64_ISR_CODE_FC 1
|
||||
#define IA64_ISR_CODE_PROBE 2
|
||||
#define IA64_ISR_CODE_TAK 3
|
||||
#define IA64_ISR_CODE_LFETCH 4
|
||||
#define IA64_ISR_CODE_PROBEF 5
|
||||
|
||||
#endif /* _ASM_IA64_kREGS_H */
|
609
arch/ia64/include/asm/kvm_host.h
Normal file
609
arch/ia64/include/asm/kvm_host.h
Normal file
|
@ -0,0 +1,609 @@
|
|||
/*
|
||||
* kvm_host.h: used for kvm module, and hold ia64-specific sections.
|
||||
*
|
||||
* Copyright (C) 2007, Intel Corporation.
|
||||
*
|
||||
* Xiantao Zhang <xiantao.zhang@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASM_KVM_HOST_H
|
||||
#define __ASM_KVM_HOST_H
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
|
||||
|
||||
/* define exit reasons from vmm to kvm*/
|
||||
#define EXIT_REASON_VM_PANIC 0
|
||||
#define EXIT_REASON_MMIO_INSTRUCTION 1
|
||||
#define EXIT_REASON_PAL_CALL 2
|
||||
#define EXIT_REASON_SAL_CALL 3
|
||||
#define EXIT_REASON_SWITCH_RR6 4
|
||||
#define EXIT_REASON_VM_DESTROY 5
|
||||
#define EXIT_REASON_EXTERNAL_INTERRUPT 6
|
||||
#define EXIT_REASON_IPI 7
|
||||
#define EXIT_REASON_PTC_G 8
|
||||
#define EXIT_REASON_DEBUG 20
|
||||
|
||||
/*Define vmm address space and vm data space.*/
|
||||
#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
|
||||
#define KVM_VMM_SHIFT 24
|
||||
#define KVM_VMM_BASE 0xD000000000000000
|
||||
#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
|
||||
|
||||
/*
|
||||
* Define vm_buffer, used by PAL Services, base address.
|
||||
* Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
|
||||
*/
|
||||
#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
|
||||
#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
|
||||
|
||||
/*
|
||||
* kvm guest's data area looks as follow:
|
||||
*
|
||||
* +----------------------+ ------- KVM_VM_DATA_SIZE
|
||||
* | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
|
||||
* | | | / |
|
||||
* | .......... | | /vcpu's struct&stack |
|
||||
* | .......... | | /---------------------|---- 0
|
||||
* | vcpu[5]'s data | | / vpd |
|
||||
* | vcpu[4]'s data | |/-----------------------|
|
||||
* | vcpu[3]'s data | / vtlb |
|
||||
* | vcpu[2]'s data | /|------------------------|
|
||||
* | vcpu[1]'s data |/ | vhpt |
|
||||
* | vcpu[0]'s data |____________________________|
|
||||
* +----------------------+ |
|
||||
* | memory dirty log | |
|
||||
* +----------------------+ |
|
||||
* | vm's data struct | |
|
||||
* +----------------------+ |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | vm's p2m table | |
|
||||
* | | |
|
||||
* | | |
|
||||
* | | | |
|
||||
* vm's data->| | | |
|
||||
* +----------------------+ ------- 0
|
||||
* To support large memory, needs to increase the size of p2m.
|
||||
* To support more vcpus, needs to ensure it has enough space to
|
||||
* hold vcpus' data.
|
||||
*/
|
||||
|
||||
#define KVM_VM_DATA_SHIFT 26
|
||||
#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
|
||||
#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
|
||||
|
||||
#define KVM_P2M_BASE KVM_VM_DATA_BASE
|
||||
#define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
|
||||
|
||||
#define VHPT_SHIFT 16
|
||||
#define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
|
||||
#define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
|
||||
|
||||
#define VTLB_SHIFT 16
|
||||
#define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
|
||||
#define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
|
||||
|
||||
#define VPD_SHIFT 16
|
||||
#define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
|
||||
|
||||
#define VCPU_STRUCT_SHIFT 16
|
||||
#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
|
||||
|
||||
/*
|
||||
* This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
|
||||
*/
|
||||
#define KVM_STK_SHIFT 16
|
||||
#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
|
||||
|
||||
#define KVM_VM_STRUCT_SHIFT 19
|
||||
#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
|
||||
|
||||
#define KVM_MEM_DIRY_LOG_SHIFT 19
|
||||
#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*Define the max vcpus and memory for Guests.*/
|
||||
#define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
|
||||
KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
|
||||
#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
|
||||
|
||||
#define VMM_LOG_LEN 256
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <linux/kvm_types.h>
|
||||
|
||||
#include <asm/pal.h>
|
||||
#include <asm/sal.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
struct kvm_vcpu_data {
|
||||
char vcpu_vhpt[VHPT_SIZE];
|
||||
char vcpu_vtlb[VTLB_SIZE];
|
||||
char vcpu_vpd[VPD_SIZE];
|
||||
char vcpu_struct[VCPU_STRUCT_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_vm_data {
|
||||
char kvm_p2m[KVM_P2M_SIZE];
|
||||
char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
|
||||
char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
|
||||
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
|
||||
};
|
||||
|
||||
#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, vcpu_data[n]))
|
||||
#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, kvm_vm_struct))
|
||||
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
|
||||
|
||||
#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
|
||||
#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
|
||||
#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
|
||||
#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
|
||||
offsetof(struct kvm_vcpu_data, vcpu_struct))
|
||||
|
||||
/*IO section definitions*/
|
||||
#define IOREQ_READ 1
|
||||
#define IOREQ_WRITE 0
|
||||
|
||||
#define STATE_IOREQ_NONE 0
|
||||
#define STATE_IOREQ_READY 1
|
||||
#define STATE_IOREQ_INPROCESS 2
|
||||
#define STATE_IORESP_READY 3
|
||||
|
||||
/*Guest Physical address layout.*/
|
||||
#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
|
||||
#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
|
||||
#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
|
||||
#define GPFN_PIB (3UL << 60) /* PIB base */
|
||||
#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
|
||||
#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
|
||||
#define GPFN_GFW (6UL << 60) /* Guest Firmware */
|
||||
#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
|
||||
|
||||
#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
|
||||
#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
|
||||
#define INVALID_MFN (~0UL)
|
||||
#define MEM_G (1UL << 30)
|
||||
#define MEM_M (1UL << 20)
|
||||
#define MMIO_START (3 * MEM_G)
|
||||
#define MMIO_SIZE (512 * MEM_M)
|
||||
#define VGA_IO_START 0xA0000UL
|
||||
#define VGA_IO_SIZE 0x20000
|
||||
#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
|
||||
#define LEGACY_IO_SIZE (64 * MEM_M)
|
||||
#define IO_SAPIC_START 0xfec00000UL
|
||||
#define IO_SAPIC_SIZE 0x100000
|
||||
#define PIB_START 0xfee00000UL
|
||||
#define PIB_SIZE 0x200000
|
||||
#define GFW_START (4 * MEM_G - 16 * MEM_M)
|
||||
#define GFW_SIZE (16 * MEM_M)
|
||||
|
||||
/*Deliver mode, defined for ioapic.c*/
|
||||
#define dest_Fixed IOSAPIC_FIXED
|
||||
#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
|
||||
|
||||
#define NMI_VECTOR 2
|
||||
#define ExtINT_VECTOR 0
|
||||
#define NULL_VECTOR (-1)
|
||||
#define IA64_SPURIOUS_INT_VECTOR 0x0f
|
||||
|
||||
#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
|
||||
|
||||
/*
|
||||
*Delivery mode
|
||||
*/
|
||||
#define SAPIC_DELIV_SHIFT 8
|
||||
#define SAPIC_FIXED 0x0
|
||||
#define SAPIC_LOWEST_PRIORITY 0x1
|
||||
#define SAPIC_PMI 0x2
|
||||
#define SAPIC_NMI 0x4
|
||||
#define SAPIC_INIT 0x5
|
||||
#define SAPIC_EXTINT 0x7
|
||||
|
||||
/*
|
||||
* vcpu->requests bit members for arch
|
||||
*/
|
||||
#define KVM_REQ_PTC_G 32
|
||||
#define KVM_REQ_RESUME 33
|
||||
|
||||
struct kvm_mmio_req {
|
||||
uint64_t addr; /* physical address */
|
||||
uint64_t size; /* size in bytes */
|
||||
uint64_t data; /* data (or paddr of data) */
|
||||
uint8_t state:4;
|
||||
uint8_t dir:1; /* 1=read, 0=write */
|
||||
};
|
||||
|
||||
/*Pal data struct */
|
||||
struct kvm_pal_call{
|
||||
/*In area*/
|
||||
uint64_t gr28;
|
||||
uint64_t gr29;
|
||||
uint64_t gr30;
|
||||
uint64_t gr31;
|
||||
/*Out area*/
|
||||
struct ia64_pal_retval ret;
|
||||
};
|
||||
|
||||
/* Sal data structure */
|
||||
struct kvm_sal_call{
|
||||
/*In area*/
|
||||
uint64_t in0;
|
||||
uint64_t in1;
|
||||
uint64_t in2;
|
||||
uint64_t in3;
|
||||
uint64_t in4;
|
||||
uint64_t in5;
|
||||
uint64_t in6;
|
||||
uint64_t in7;
|
||||
struct sal_ret_values ret;
|
||||
};
|
||||
|
||||
/*Guest change rr6*/
|
||||
struct kvm_switch_rr6 {
|
||||
uint64_t old_rr;
|
||||
uint64_t new_rr;
|
||||
};
|
||||
|
||||
union ia64_ipi_a{
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long rv : 3;
|
||||
unsigned long ir : 1;
|
||||
unsigned long eid : 8;
|
||||
unsigned long id : 8;
|
||||
unsigned long ib_base : 44;
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_ipi_d {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long vector : 8;
|
||||
unsigned long dm : 3;
|
||||
unsigned long ig : 53;
|
||||
};
|
||||
};
|
||||
|
||||
/*ipi check exit data*/
|
||||
struct kvm_ipi_data{
|
||||
union ia64_ipi_a addr;
|
||||
union ia64_ipi_d data;
|
||||
};
|
||||
|
||||
/*global purge data*/
|
||||
struct kvm_ptc_g {
|
||||
unsigned long vaddr;
|
||||
unsigned long rr;
|
||||
unsigned long ps;
|
||||
struct kvm_vcpu *vcpu;
|
||||
};
|
||||
|
||||
/*Exit control data */
|
||||
struct exit_ctl_data{
|
||||
uint32_t exit_reason;
|
||||
uint32_t vm_status;
|
||||
union {
|
||||
struct kvm_mmio_req ioreq;
|
||||
struct kvm_pal_call pal_data;
|
||||
struct kvm_sal_call sal_data;
|
||||
struct kvm_switch_rr6 rr_data;
|
||||
struct kvm_ipi_data ipi_data;
|
||||
struct kvm_ptc_g ptc_g_data;
|
||||
} u;
|
||||
};
|
||||
|
||||
union pte_flags {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long p : 1; /*0 */
|
||||
unsigned long : 1; /* 1 */
|
||||
unsigned long ma : 3; /* 2-4 */
|
||||
unsigned long a : 1; /* 5 */
|
||||
unsigned long d : 1; /* 6 */
|
||||
unsigned long pl : 2; /* 7-8 */
|
||||
unsigned long ar : 3; /* 9-11 */
|
||||
unsigned long ppn : 38; /* 12-49 */
|
||||
unsigned long : 2; /* 50-51 */
|
||||
unsigned long ed : 1; /* 52 */
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_pta {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long ve : 1;
|
||||
unsigned long reserved0 : 1;
|
||||
unsigned long size : 6;
|
||||
unsigned long vf : 1;
|
||||
unsigned long reserved1 : 6;
|
||||
unsigned long base : 49;
|
||||
};
|
||||
};
|
||||
|
||||
struct thash_cb {
|
||||
/* THASH base information */
|
||||
struct thash_data *hash; /* hash table pointer */
|
||||
union ia64_pta pta;
|
||||
int num;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
int launched;
|
||||
int last_exit;
|
||||
int last_run_cpu;
|
||||
int vmm_tr_slot;
|
||||
int vm_tr_slot;
|
||||
int sn_rtc_tr_slot;
|
||||
|
||||
#define KVM_MP_STATE_RUNNABLE 0
|
||||
#define KVM_MP_STATE_UNINITIALIZED 1
|
||||
#define KVM_MP_STATE_INIT_RECEIVED 2
|
||||
#define KVM_MP_STATE_HALTED 3
|
||||
int mp_state;
|
||||
|
||||
#define MAX_PTC_G_NUM 3
|
||||
int ptc_g_count;
|
||||
struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
|
||||
|
||||
/*halt timer to wake up sleepy vcpus*/
|
||||
struct hrtimer hlt_timer;
|
||||
long ht_active;
|
||||
|
||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||
struct vpd *vpd;
|
||||
|
||||
/* Exit data for vmm_transition*/
|
||||
struct exit_ctl_data exit_data;
|
||||
|
||||
cpumask_t cache_coherent_map;
|
||||
|
||||
unsigned long vmm_rr;
|
||||
unsigned long host_rr6;
|
||||
unsigned long psbits[8];
|
||||
unsigned long cr_iipa;
|
||||
unsigned long cr_isr;
|
||||
unsigned long vsa_base;
|
||||
unsigned long dirty_log_lock_pa;
|
||||
unsigned long __gp;
|
||||
/* TR and TC. */
|
||||
struct thash_data itrs[NITRS];
|
||||
struct thash_data dtrs[NDTRS];
|
||||
/* Bit is set if there is a tr/tc for the region. */
|
||||
unsigned char itr_regions;
|
||||
unsigned char dtr_regions;
|
||||
unsigned char tc_regions;
|
||||
/* purge all */
|
||||
unsigned long ptce_base;
|
||||
unsigned long ptce_count[2];
|
||||
unsigned long ptce_stride[2];
|
||||
/* itc/itm */
|
||||
unsigned long last_itc;
|
||||
long itc_offset;
|
||||
unsigned long itc_check;
|
||||
unsigned long timer_check;
|
||||
unsigned int timer_pending;
|
||||
unsigned int timer_fired;
|
||||
|
||||
unsigned long vrr[8];
|
||||
unsigned long ibr[8];
|
||||
unsigned long dbr[8];
|
||||
unsigned long insvc[4]; /* Interrupt in service. */
|
||||
unsigned long xtp;
|
||||
|
||||
unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
|
||||
unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
|
||||
unsigned long metaphysical_saved_rr0; /* from kvm_arch */
|
||||
unsigned long metaphysical_saved_rr4; /* from kvm_arch */
|
||||
unsigned long fp_psr; /*used for lazy float register */
|
||||
unsigned long saved_gp;
|
||||
/*for phycial emulation */
|
||||
int mode_flags;
|
||||
struct thash_cb vtlb;
|
||||
struct thash_cb vhpt;
|
||||
char irq_check;
|
||||
char irq_new_pending;
|
||||
|
||||
unsigned long opcode;
|
||||
unsigned long cause;
|
||||
char log_buf[VMM_LOG_LEN];
|
||||
union context host;
|
||||
union context guest;
|
||||
|
||||
char mmio_data[8];
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
u64 remote_tlb_flush;
|
||||
};
|
||||
|
||||
struct kvm_sal_data {
|
||||
unsigned long boot_ip;
|
||||
unsigned long boot_gp;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
spinlock_t dirty_log_lock;
|
||||
|
||||
unsigned long vm_base;
|
||||
unsigned long metaphysical_rr0;
|
||||
unsigned long metaphysical_rr4;
|
||||
unsigned long vmm_init_rr;
|
||||
|
||||
int is_sn2;
|
||||
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_vm_stat stat;
|
||||
struct kvm_sal_data rdv_sal_data;
|
||||
|
||||
struct list_head assigned_dev_head;
|
||||
struct iommu_domain *iommu_domain;
|
||||
bool iommu_noncoherent;
|
||||
|
||||
unsigned long irq_sources_bitmap;
|
||||
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
|
||||
};
|
||||
|
||||
union cpuid3_t {
|
||||
u64 value;
|
||||
struct {
|
||||
u64 number : 8;
|
||||
u64 revision : 8;
|
||||
u64 model : 8;
|
||||
u64 family : 8;
|
||||
u64 archrev : 8;
|
||||
u64 rv : 24;
|
||||
};
|
||||
};
|
||||
|
||||
struct kvm_pt_regs {
|
||||
/* The following registers are saved by SAVE_MIN: */
|
||||
unsigned long b6; /* scratch */
|
||||
unsigned long b7; /* scratch */
|
||||
|
||||
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
|
||||
unsigned long ar_ssd; /* reserved for future use (scratch) */
|
||||
|
||||
unsigned long r8; /* scratch (return value register 0) */
|
||||
unsigned long r9; /* scratch (return value register 1) */
|
||||
unsigned long r10; /* scratch (return value register 2) */
|
||||
unsigned long r11; /* scratch (return value register 3) */
|
||||
|
||||
unsigned long cr_ipsr; /* interrupted task's psr */
|
||||
unsigned long cr_iip; /* interrupted task's instruction pointer */
|
||||
unsigned long cr_ifs; /* interrupted task's function state */
|
||||
|
||||
unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
|
||||
unsigned long ar_pfs; /* prev function state */
|
||||
unsigned long ar_rsc; /* RSE configuration */
|
||||
/* The following two are valid only if cr_ipsr.cpl > 0: */
|
||||
unsigned long ar_rnat; /* RSE NaT */
|
||||
unsigned long ar_bspstore; /* RSE bspstore */
|
||||
|
||||
unsigned long pr; /* 64 predicate registers (1 bit each) */
|
||||
unsigned long b0; /* return pointer (bp) */
|
||||
unsigned long loadrs; /* size of dirty partition << 16 */
|
||||
|
||||
unsigned long r1; /* the gp pointer */
|
||||
unsigned long r12; /* interrupted task's memory stack pointer */
|
||||
unsigned long r13; /* thread pointer */
|
||||
|
||||
unsigned long ar_fpsr; /* floating point status (preserved) */
|
||||
unsigned long r15; /* scratch */
|
||||
|
||||
/* The remaining registers are NOT saved for system calls. */
|
||||
unsigned long r14; /* scratch */
|
||||
unsigned long r2; /* scratch */
|
||||
unsigned long r3; /* scratch */
|
||||
unsigned long r16; /* scratch */
|
||||
unsigned long r17; /* scratch */
|
||||
unsigned long r18; /* scratch */
|
||||
unsigned long r19; /* scratch */
|
||||
unsigned long r20; /* scratch */
|
||||
unsigned long r21; /* scratch */
|
||||
unsigned long r22; /* scratch */
|
||||
unsigned long r23; /* scratch */
|
||||
unsigned long r24; /* scratch */
|
||||
unsigned long r25; /* scratch */
|
||||
unsigned long r26; /* scratch */
|
||||
unsigned long r27; /* scratch */
|
||||
unsigned long r28; /* scratch */
|
||||
unsigned long r29; /* scratch */
|
||||
unsigned long r30; /* scratch */
|
||||
unsigned long r31; /* scratch */
|
||||
unsigned long ar_ccv; /* compare/exchange value (scratch) */
|
||||
|
||||
/*
|
||||
* Floating point registers that the kernel considers scratch:
|
||||
*/
|
||||
struct ia64_fpreg f6; /* scratch */
|
||||
struct ia64_fpreg f7; /* scratch */
|
||||
struct ia64_fpreg f8; /* scratch */
|
||||
struct ia64_fpreg f9; /* scratch */
|
||||
struct ia64_fpreg f10; /* scratch */
|
||||
struct ia64_fpreg f11; /* scratch */
|
||||
|
||||
unsigned long r4; /* preserved */
|
||||
unsigned long r5; /* preserved */
|
||||
unsigned long r6; /* preserved */
|
||||
unsigned long r7; /* preserved */
|
||||
unsigned long eml_unat; /* used for emulating instruction */
|
||||
unsigned long pad0; /* alignment pad */
|
||||
};
|
||||
|
||||
static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
|
||||
{
|
||||
return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
|
||||
}
|
||||
|
||||
typedef int kvm_vmm_entry(void);
|
||||
typedef void kvm_tramp_entry(union context *host, union context *guest);
|
||||
|
||||
struct kvm_vmm_info{
|
||||
struct module *module;
|
||||
kvm_vmm_entry *vmm_entry;
|
||||
kvm_tramp_entry *tramp_entry;
|
||||
unsigned long vmm_ivt;
|
||||
unsigned long patch_mov_ar;
|
||||
unsigned long patch_mov_ar_sn2;
|
||||
};
|
||||
|
||||
int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
|
||||
void kvm_sal_emul(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define __KVM_HAVE_ARCH_VM_ALLOC 1
|
||||
struct kvm *kvm_arch_alloc_vm(void);
|
||||
void kvm_arch_free_vm(struct kvm *kvm);
|
||||
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
enum kvm_mr_change change) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
|
||||
#endif /* __ASSEMBLY__*/
|
||||
|
||||
#endif
|
12
arch/ia64/include/asm/libata-portmap.h
Normal file
12
arch/ia64/include/asm/libata-portmap.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __ASM_IA64_LIBATA_PORTMAP_H
|
||||
#define __ASM_IA64_LIBATA_PORTMAP_H
|
||||
|
||||
#define ATA_PRIMARY_CMD 0x1F0
|
||||
#define ATA_PRIMARY_CTL 0x3F6
|
||||
#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
|
||||
|
||||
#define ATA_SECONDARY_CMD 0x170
|
||||
#define ATA_SECONDARY_CTL 0x376
|
||||
#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
|
||||
|
||||
#endif
|
18
arch/ia64/include/asm/linkage.h
Normal file
18
arch/ia64/include/asm/linkage.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef __ASM_LINKAGE_H
|
||||
#define __ASM_LINKAGE_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
|
||||
|
||||
#else
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
|
||||
#endif
|
||||
|
||||
#define cond_syscall(x) asm(".weak\t" #x "#\n" #x "#\t=\tsys_ni_syscall#")
|
||||
#define SYSCALL_ALIAS(alias, name) \
|
||||
asm ( #alias "# = " #name "#\n\t.globl " #alias "#")
|
||||
|
||||
#endif
|
1
arch/ia64/include/asm/local.h
Normal file
1
arch/ia64/include/asm/local.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/local.h>
|
1
arch/ia64/include/asm/local64.h
Normal file
1
arch/ia64/include/asm/local64.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/local64.h>
|
367
arch/ia64/include/asm/machvec.h
Normal file
367
arch/ia64/include/asm/machvec.h
Normal file
|
@ -0,0 +1,367 @@
|
|||
/*
|
||||
* Machine vector for IA-64.
|
||||
*
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
|
||||
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
|
||||
* Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_MACHVEC_H
|
||||
#define _ASM_IA64_MACHVEC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* forward declarations: */
|
||||
struct device;
|
||||
struct pt_regs;
|
||||
struct scatterlist;
|
||||
struct page;
|
||||
struct mm_struct;
|
||||
struct pci_bus;
|
||||
struct task_struct;
|
||||
struct pci_dev;
|
||||
struct msi_desc;
|
||||
struct dma_attrs;
|
||||
|
||||
typedef void ia64_mv_setup_t (char **);
|
||||
typedef void ia64_mv_cpu_init_t (void);
|
||||
typedef void ia64_mv_irq_init_t (void);
|
||||
typedef void ia64_mv_send_ipi_t (int, int, int, int);
|
||||
typedef void ia64_mv_timer_interrupt_t (int, void *);
|
||||
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
|
||||
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
|
||||
typedef u8 ia64_mv_irq_to_vector (int);
|
||||
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
|
||||
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
|
||||
typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
|
||||
u8 size);
|
||||
typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
|
||||
u8 size);
|
||||
typedef void ia64_mv_migrate_t(struct task_struct * task);
|
||||
typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
|
||||
typedef void ia64_mv_kernel_launch_event_t(void);
|
||||
|
||||
/* DMA-mapping interface: */
|
||||
typedef void ia64_mv_dma_init (void);
|
||||
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
|
||||
typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
|
||||
|
||||
/*
|
||||
* WARNING: The legacy I/O space is _architected_. Platforms are
|
||||
* expected to follow this architected model (see Section 10.7 in the
|
||||
* IA-64 Architecture Software Developer's Manual). Unfortunately,
|
||||
* some broken machines do not follow that model, which is why we have
|
||||
* to make the inX/outX operations part of the machine vector.
|
||||
* Platform designers should follow the architected model whenever
|
||||
* possible.
|
||||
*/
|
||||
typedef unsigned int ia64_mv_inb_t (unsigned long);
|
||||
typedef unsigned int ia64_mv_inw_t (unsigned long);
|
||||
typedef unsigned int ia64_mv_inl_t (unsigned long);
|
||||
typedef void ia64_mv_outb_t (unsigned char, unsigned long);
|
||||
typedef void ia64_mv_outw_t (unsigned short, unsigned long);
|
||||
typedef void ia64_mv_outl_t (unsigned int, unsigned long);
|
||||
typedef void ia64_mv_mmiowb_t (void);
|
||||
typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
|
||||
typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
|
||||
typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
|
||||
typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
|
||||
typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
|
||||
typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
|
||||
typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
|
||||
typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
|
||||
|
||||
typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *);
|
||||
typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
|
||||
|
||||
static inline void
|
||||
machvec_noop (void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
machvec_noop_mm (struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
machvec_noop_task (struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
machvec_noop_bus (struct pci_bus *bus)
|
||||
{
|
||||
}
|
||||
|
||||
extern void machvec_setup (char **);
|
||||
extern void machvec_timer_interrupt (int, void *);
|
||||
extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||
|
||||
# if defined (CONFIG_IA64_HP_SIM)
|
||||
# include <asm/machvec_hpsim.h>
|
||||
# elif defined (CONFIG_IA64_DIG)
|
||||
# include <asm/machvec_dig.h>
|
||||
# elif defined(CONFIG_IA64_DIG_VTD)
|
||||
# include <asm/machvec_dig_vtd.h>
|
||||
# elif defined (CONFIG_IA64_HP_ZX1)
|
||||
# include <asm/machvec_hpzx1.h>
|
||||
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
|
||||
# include <asm/machvec_hpzx1_swiotlb.h>
|
||||
# elif defined (CONFIG_IA64_SGI_SN2)
|
||||
# include <asm/machvec_sn2.h>
|
||||
# elif defined (CONFIG_IA64_SGI_UV)
|
||||
# include <asm/machvec_uv.h>
|
||||
# elif defined (CONFIG_IA64_GENERIC)
|
||||
|
||||
# ifdef MACHVEC_PLATFORM_HEADER
|
||||
# include MACHVEC_PLATFORM_HEADER
|
||||
# else
|
||||
# define ia64_platform_name ia64_mv.name
|
||||
# define platform_setup ia64_mv.setup
|
||||
# define platform_cpu_init ia64_mv.cpu_init
|
||||
# define platform_irq_init ia64_mv.irq_init
|
||||
# define platform_send_ipi ia64_mv.send_ipi
|
||||
# define platform_timer_interrupt ia64_mv.timer_interrupt
|
||||
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
|
||||
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
|
||||
# define platform_dma_init ia64_mv.dma_init
|
||||
# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
|
||||
# define platform_dma_get_ops ia64_mv.dma_get_ops
|
||||
# define platform_irq_to_vector ia64_mv.irq_to_vector
|
||||
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
|
||||
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
|
||||
# define platform_pci_legacy_read ia64_mv.pci_legacy_read
|
||||
# define platform_pci_legacy_write ia64_mv.pci_legacy_write
|
||||
# define platform_inb ia64_mv.inb
|
||||
# define platform_inw ia64_mv.inw
|
||||
# define platform_inl ia64_mv.inl
|
||||
# define platform_outb ia64_mv.outb
|
||||
# define platform_outw ia64_mv.outw
|
||||
# define platform_outl ia64_mv.outl
|
||||
# define platform_mmiowb ia64_mv.mmiowb
|
||||
# define platform_readb ia64_mv.readb
|
||||
# define platform_readw ia64_mv.readw
|
||||
# define platform_readl ia64_mv.readl
|
||||
# define platform_readq ia64_mv.readq
|
||||
# define platform_readb_relaxed ia64_mv.readb_relaxed
|
||||
# define platform_readw_relaxed ia64_mv.readw_relaxed
|
||||
# define platform_readl_relaxed ia64_mv.readl_relaxed
|
||||
# define platform_readq_relaxed ia64_mv.readq_relaxed
|
||||
# define platform_migrate ia64_mv.migrate
|
||||
# define platform_setup_msi_irq ia64_mv.setup_msi_irq
|
||||
# define platform_teardown_msi_irq ia64_mv.teardown_msi_irq
|
||||
# define platform_pci_fixup_bus ia64_mv.pci_fixup_bus
|
||||
# define platform_kernel_launch_event ia64_mv.kernel_launch_event
|
||||
# endif
|
||||
|
||||
/* __attribute__((__aligned__(16))) is required to make size of the
|
||||
* structure multiple of 16 bytes.
|
||||
* This will fillup the holes created because of section 3.3.1 in
|
||||
* Software Conventions guide.
|
||||
*/
|
||||
struct ia64_machine_vector {
|
||||
const char *name;
|
||||
ia64_mv_setup_t *setup;
|
||||
ia64_mv_cpu_init_t *cpu_init;
|
||||
ia64_mv_irq_init_t *irq_init;
|
||||
ia64_mv_send_ipi_t *send_ipi;
|
||||
ia64_mv_timer_interrupt_t *timer_interrupt;
|
||||
ia64_mv_global_tlb_purge_t *global_tlb_purge;
|
||||
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
|
||||
ia64_mv_dma_init *dma_init;
|
||||
ia64_mv_dma_get_required_mask *dma_get_required_mask;
|
||||
ia64_mv_dma_get_ops *dma_get_ops;
|
||||
ia64_mv_irq_to_vector *irq_to_vector;
|
||||
ia64_mv_local_vector_to_irq *local_vector_to_irq;
|
||||
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
|
||||
ia64_mv_pci_legacy_read_t *pci_legacy_read;
|
||||
ia64_mv_pci_legacy_write_t *pci_legacy_write;
|
||||
ia64_mv_inb_t *inb;
|
||||
ia64_mv_inw_t *inw;
|
||||
ia64_mv_inl_t *inl;
|
||||
ia64_mv_outb_t *outb;
|
||||
ia64_mv_outw_t *outw;
|
||||
ia64_mv_outl_t *outl;
|
||||
ia64_mv_mmiowb_t *mmiowb;
|
||||
ia64_mv_readb_t *readb;
|
||||
ia64_mv_readw_t *readw;
|
||||
ia64_mv_readl_t *readl;
|
||||
ia64_mv_readq_t *readq;
|
||||
ia64_mv_readb_relaxed_t *readb_relaxed;
|
||||
ia64_mv_readw_relaxed_t *readw_relaxed;
|
||||
ia64_mv_readl_relaxed_t *readl_relaxed;
|
||||
ia64_mv_readq_relaxed_t *readq_relaxed;
|
||||
ia64_mv_migrate_t *migrate;
|
||||
ia64_mv_setup_msi_irq_t *setup_msi_irq;
|
||||
ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
|
||||
ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
|
||||
ia64_mv_kernel_launch_event_t *kernel_launch_event;
|
||||
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
|
||||
|
||||
#define MACHVEC_INIT(name) \
|
||||
{ \
|
||||
#name, \
|
||||
platform_setup, \
|
||||
platform_cpu_init, \
|
||||
platform_irq_init, \
|
||||
platform_send_ipi, \
|
||||
platform_timer_interrupt, \
|
||||
platform_global_tlb_purge, \
|
||||
platform_tlb_migrate_finish, \
|
||||
platform_dma_init, \
|
||||
platform_dma_get_required_mask, \
|
||||
platform_dma_get_ops, \
|
||||
platform_irq_to_vector, \
|
||||
platform_local_vector_to_irq, \
|
||||
platform_pci_get_legacy_mem, \
|
||||
platform_pci_legacy_read, \
|
||||
platform_pci_legacy_write, \
|
||||
platform_inb, \
|
||||
platform_inw, \
|
||||
platform_inl, \
|
||||
platform_outb, \
|
||||
platform_outw, \
|
||||
platform_outl, \
|
||||
platform_mmiowb, \
|
||||
platform_readb, \
|
||||
platform_readw, \
|
||||
platform_readl, \
|
||||
platform_readq, \
|
||||
platform_readb_relaxed, \
|
||||
platform_readw_relaxed, \
|
||||
platform_readl_relaxed, \
|
||||
platform_readq_relaxed, \
|
||||
platform_migrate, \
|
||||
platform_setup_msi_irq, \
|
||||
platform_teardown_msi_irq, \
|
||||
platform_pci_fixup_bus, \
|
||||
platform_kernel_launch_event \
|
||||
}
|
||||
|
||||
extern struct ia64_machine_vector ia64_mv;
|
||||
extern void machvec_init (const char *name);
|
||||
extern void machvec_init_from_cmdline(const char *cmdline);
|
||||
|
||||
# else
|
||||
# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
|
||||
# endif /* CONFIG_IA64_GENERIC */
|
||||
|
||||
extern void swiotlb_dma_init(void);
|
||||
extern struct dma_map_ops *dma_get_ops(struct device *);
|
||||
|
||||
/*
|
||||
* Define default versions so we can extend machvec for new platforms without having
|
||||
* to update the machvec files for all existing platforms.
|
||||
*/
|
||||
#ifndef platform_setup
|
||||
# define platform_setup machvec_setup
|
||||
#endif
|
||||
#ifndef platform_cpu_init
|
||||
# define platform_cpu_init machvec_noop
|
||||
#endif
|
||||
#ifndef platform_irq_init
|
||||
# define platform_irq_init machvec_noop
|
||||
#endif
|
||||
|
||||
#ifndef platform_send_ipi
|
||||
# define platform_send_ipi ia64_send_ipi /* default to architected version */
|
||||
#endif
|
||||
#ifndef platform_timer_interrupt
|
||||
# define platform_timer_interrupt machvec_timer_interrupt
|
||||
#endif
|
||||
#ifndef platform_global_tlb_purge
|
||||
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
|
||||
#endif
|
||||
#ifndef platform_tlb_migrate_finish
|
||||
# define platform_tlb_migrate_finish machvec_noop_mm
|
||||
#endif
|
||||
#ifndef platform_kernel_launch_event
|
||||
# define platform_kernel_launch_event machvec_noop
|
||||
#endif
|
||||
#ifndef platform_dma_init
|
||||
# define platform_dma_init swiotlb_dma_init
|
||||
#endif
|
||||
#ifndef platform_dma_get_ops
|
||||
# define platform_dma_get_ops dma_get_ops
|
||||
#endif
|
||||
#ifndef platform_dma_get_required_mask
|
||||
# define platform_dma_get_required_mask ia64_dma_get_required_mask
|
||||
#endif
|
||||
#ifndef platform_irq_to_vector
|
||||
# define platform_irq_to_vector __ia64_irq_to_vector
|
||||
#endif
|
||||
#ifndef platform_local_vector_to_irq
|
||||
# define platform_local_vector_to_irq __ia64_local_vector_to_irq
|
||||
#endif
|
||||
#ifndef platform_pci_get_legacy_mem
|
||||
# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
|
||||
#endif
|
||||
#ifndef platform_pci_legacy_read
|
||||
# define platform_pci_legacy_read ia64_pci_legacy_read
|
||||
extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
|
||||
#endif
|
||||
#ifndef platform_pci_legacy_write
|
||||
# define platform_pci_legacy_write ia64_pci_legacy_write
|
||||
extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
|
||||
#endif
|
||||
#ifndef platform_inb
|
||||
# define platform_inb __ia64_inb
|
||||
#endif
|
||||
#ifndef platform_inw
|
||||
# define platform_inw __ia64_inw
|
||||
#endif
|
||||
#ifndef platform_inl
|
||||
# define platform_inl __ia64_inl
|
||||
#endif
|
||||
#ifndef platform_outb
|
||||
# define platform_outb __ia64_outb
|
||||
#endif
|
||||
#ifndef platform_outw
|
||||
# define platform_outw __ia64_outw
|
||||
#endif
|
||||
#ifndef platform_outl
|
||||
# define platform_outl __ia64_outl
|
||||
#endif
|
||||
#ifndef platform_mmiowb
|
||||
# define platform_mmiowb __ia64_mmiowb
|
||||
#endif
|
||||
#ifndef platform_readb
|
||||
# define platform_readb __ia64_readb
|
||||
#endif
|
||||
#ifndef platform_readw
|
||||
# define platform_readw __ia64_readw
|
||||
#endif
|
||||
#ifndef platform_readl
|
||||
# define platform_readl __ia64_readl
|
||||
#endif
|
||||
#ifndef platform_readq
|
||||
# define platform_readq __ia64_readq
|
||||
#endif
|
||||
#ifndef platform_readb_relaxed
|
||||
# define platform_readb_relaxed __ia64_readb_relaxed
|
||||
#endif
|
||||
#ifndef platform_readw_relaxed
|
||||
# define platform_readw_relaxed __ia64_readw_relaxed
|
||||
#endif
|
||||
#ifndef platform_readl_relaxed
|
||||
# define platform_readl_relaxed __ia64_readl_relaxed
|
||||
#endif
|
||||
#ifndef platform_readq_relaxed
|
||||
# define platform_readq_relaxed __ia64_readq_relaxed
|
||||
#endif
|
||||
#ifndef platform_migrate
|
||||
# define platform_migrate machvec_noop_task
|
||||
#endif
|
||||
#ifndef platform_setup_msi_irq
|
||||
# define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
|
||||
#endif
|
||||
#ifndef platform_teardown_msi_irq
|
||||
# define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
|
||||
#endif
|
||||
#ifndef platform_pci_fixup_bus
|
||||
# define platform_pci_fixup_bus machvec_noop_bus
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_H */
|
16
arch/ia64/include/asm/machvec_dig.h
Normal file
16
arch/ia64/include/asm/machvec_dig.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
#ifndef _ASM_IA64_MACHVEC_DIG_h
|
||||
#define _ASM_IA64_MACHVEC_DIG_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "dig"
|
||||
#define platform_setup dig_setup
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_DIG_h */
|
18
arch/ia64/include/asm/machvec_dig_vtd.h
Normal file
18
arch/ia64/include/asm/machvec_dig_vtd.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
|
||||
#define _ASM_IA64_MACHVEC_DIG_VTD_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_init pci_iommu_alloc;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "dig_vtd"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init pci_iommu_alloc
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
|
18
arch/ia64/include/asm/machvec_hpsim.h
Normal file
18
arch/ia64/include/asm/machvec_hpsim.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef _ASM_IA64_MACHVEC_HPSIM_h
|
||||
#define _ASM_IA64_MACHVEC_HPSIM_h
|
||||
|
||||
extern ia64_mv_setup_t hpsim_setup;
|
||||
extern ia64_mv_irq_init_t hpsim_irq_init;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "hpsim"
|
||||
#define platform_setup hpsim_setup
|
||||
#define platform_irq_init hpsim_irq_init
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
|
18
arch/ia64/include/asm/machvec_hpzx1.h
Normal file
18
arch/ia64/include/asm/machvec_hpzx1.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef _ASM_IA64_MACHVEC_HPZX1_h
|
||||
#define _ASM_IA64_MACHVEC_HPZX1_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_init sba_dma_init;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "hpzx1"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init sba_dma_init
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
|
19
arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
Normal file
19
arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
|
||||
#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "hpzx1_swiotlb"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init machvec_noop
|
||||
#define platform_dma_get_ops hwsw_dma_get_ops
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
|
35
arch/ia64/include/asm/machvec_init.h
Normal file
35
arch/ia64/include/asm/machvec_init.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
#include <asm/iommu.h>
|
||||
#include <asm/machvec.h>
|
||||
|
||||
extern ia64_mv_send_ipi_t ia64_send_ipi;
|
||||
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
|
||||
extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
|
||||
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
|
||||
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
|
||||
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
|
||||
extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
|
||||
extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
|
||||
|
||||
extern ia64_mv_inb_t __ia64_inb;
|
||||
extern ia64_mv_inw_t __ia64_inw;
|
||||
extern ia64_mv_inl_t __ia64_inl;
|
||||
extern ia64_mv_outb_t __ia64_outb;
|
||||
extern ia64_mv_outw_t __ia64_outw;
|
||||
extern ia64_mv_outl_t __ia64_outl;
|
||||
extern ia64_mv_mmiowb_t __ia64_mmiowb;
|
||||
extern ia64_mv_readb_t __ia64_readb;
|
||||
extern ia64_mv_readw_t __ia64_readw;
|
||||
extern ia64_mv_readl_t __ia64_readl;
|
||||
extern ia64_mv_readq_t __ia64_readq;
|
||||
extern ia64_mv_readb_t __ia64_readb_relaxed;
|
||||
extern ia64_mv_readw_t __ia64_readw_relaxed;
|
||||
extern ia64_mv_readl_t __ia64_readl_relaxed;
|
||||
extern ia64_mv_readq_t __ia64_readq_relaxed;
|
||||
|
||||
#define MACHVEC_HELPER(name) \
|
||||
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
|
||||
= MACHVEC_INIT(name);
|
||||
|
||||
#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
|
||||
|
||||
MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
|
118
arch/ia64/include/asm/machvec_sn2.h
Normal file
118
arch/ia64/include/asm/machvec_sn2.h
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* Further, this software is distributed without any warranty that it is
|
||||
* free of the rightful claim of any third person regarding infringement
|
||||
* or the like. Any license provided herein, whether implied or
|
||||
* otherwise, applies only to this software file. Patent licenses, if
|
||||
* any, provided herein do not apply to combinations of this program with
|
||||
* other software, or any other product whatsoever.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
* For further information regarding this notice, see:
|
||||
*
|
||||
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_MACHVEC_SN2_H
|
||||
#define _ASM_IA64_MACHVEC_SN2_H
|
||||
|
||||
extern ia64_mv_setup_t sn_setup;
|
||||
extern ia64_mv_cpu_init_t sn_cpu_init;
|
||||
extern ia64_mv_irq_init_t sn_irq_init;
|
||||
extern ia64_mv_send_ipi_t sn2_send_IPI;
|
||||
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
|
||||
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
|
||||
extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
|
||||
extern ia64_mv_irq_to_vector sn_irq_to_vector;
|
||||
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
|
||||
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
|
||||
extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
|
||||
extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
|
||||
extern ia64_mv_inb_t __sn_inb;
|
||||
extern ia64_mv_inw_t __sn_inw;
|
||||
extern ia64_mv_inl_t __sn_inl;
|
||||
extern ia64_mv_outb_t __sn_outb;
|
||||
extern ia64_mv_outw_t __sn_outw;
|
||||
extern ia64_mv_outl_t __sn_outl;
|
||||
extern ia64_mv_mmiowb_t __sn_mmiowb;
|
||||
extern ia64_mv_readb_t __sn_readb;
|
||||
extern ia64_mv_readw_t __sn_readw;
|
||||
extern ia64_mv_readl_t __sn_readl;
|
||||
extern ia64_mv_readq_t __sn_readq;
|
||||
extern ia64_mv_readb_t __sn_readb_relaxed;
|
||||
extern ia64_mv_readw_t __sn_readw_relaxed;
|
||||
extern ia64_mv_readl_t __sn_readl_relaxed;
|
||||
extern ia64_mv_readq_t __sn_readq_relaxed;
|
||||
extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
|
||||
extern ia64_mv_dma_init sn_dma_init;
|
||||
extern ia64_mv_migrate_t sn_migrate;
|
||||
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
|
||||
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
|
||||
extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
|
||||
extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
|
||||
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "sn2"
|
||||
#define platform_setup sn_setup
|
||||
#define platform_cpu_init sn_cpu_init
|
||||
#define platform_irq_init sn_irq_init
|
||||
#define platform_send_ipi sn2_send_IPI
|
||||
#define platform_timer_interrupt sn_timer_interrupt
|
||||
#define platform_global_tlb_purge sn2_global_tlb_purge
|
||||
#define platform_tlb_migrate_finish sn_tlb_migrate_finish
|
||||
#define platform_pci_fixup sn_pci_fixup
|
||||
#define platform_inb __sn_inb
|
||||
#define platform_inw __sn_inw
|
||||
#define platform_inl __sn_inl
|
||||
#define platform_outb __sn_outb
|
||||
#define platform_outw __sn_outw
|
||||
#define platform_outl __sn_outl
|
||||
#define platform_mmiowb __sn_mmiowb
|
||||
#define platform_readb __sn_readb
|
||||
#define platform_readw __sn_readw
|
||||
#define platform_readl __sn_readl
|
||||
#define platform_readq __sn_readq
|
||||
#define platform_readb_relaxed __sn_readb_relaxed
|
||||
#define platform_readw_relaxed __sn_readw_relaxed
|
||||
#define platform_readl_relaxed __sn_readl_relaxed
|
||||
#define platform_readq_relaxed __sn_readq_relaxed
|
||||
#define platform_irq_to_vector sn_irq_to_vector
|
||||
#define platform_local_vector_to_irq sn_local_vector_to_irq
|
||||
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
|
||||
#define platform_pci_legacy_read sn_pci_legacy_read
|
||||
#define platform_pci_legacy_write sn_pci_legacy_write
|
||||
#define platform_dma_get_required_mask sn_dma_get_required_mask
|
||||
#define platform_dma_init sn_dma_init
|
||||
#define platform_migrate sn_migrate
|
||||
#define platform_kernel_launch_event sn_kernel_launch_event
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
#define platform_setup_msi_irq sn_setup_msi_irq
|
||||
#define platform_teardown_msi_irq sn_teardown_msi_irq
|
||||
#else
|
||||
#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
|
||||
#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
|
||||
#endif
|
||||
#define platform_pci_fixup_bus sn_pci_fixup_bus
|
||||
|
||||
#include <asm/sn/io.h>
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_SN2_H */
|
26
arch/ia64/include/asm/machvec_uv.h
Normal file
26
arch/ia64/include/asm/machvec_uv.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* SGI UV Core Functions
|
||||
*
|
||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_MACHVEC_UV_H
|
||||
#define _ASM_IA64_MACHVEC_UV_H
|
||||
|
||||
extern ia64_mv_setup_t uv_setup;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "uv"
|
||||
#define platform_setup uv_setup
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_UV_H */
|
10
arch/ia64/include/asm/mc146818rtc.h
Normal file
10
arch/ia64/include/asm/mc146818rtc.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
#ifndef _ASM_IA64_MC146818RTC_H
|
||||
#define _ASM_IA64_MC146818RTC_H
|
||||
|
||||
/*
|
||||
* Machine dependent access functions for RTC registers.
|
||||
*/
|
||||
|
||||
/* empty include file to satisfy the include in genrtc.c */
|
||||
|
||||
#endif /* _ASM_IA64_MC146818RTC_H */
|
187
arch/ia64/include/asm/mca.h
Normal file
187
arch/ia64/include/asm/mca.h
Normal file
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* File: mca.h
|
||||
* Purpose: Machine check handling specific defines
|
||||
*
|
||||
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
|
||||
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
|
||||
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
|
||||
* Copyright (C) Russ Anderson <rja@sgi.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_MCA_H
|
||||
#define _ASM_IA64_MCA_H
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/param.h>
|
||||
#include <asm/sal.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mca_asm.h>
|
||||
|
||||
#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
|
||||
|
||||
typedef struct ia64_fptr {
|
||||
unsigned long fp;
|
||||
unsigned long gp;
|
||||
} ia64_fptr_t;
|
||||
|
||||
typedef union cmcv_reg_u {
|
||||
u64 cmcv_regval;
|
||||
struct {
|
||||
u64 cmcr_vector : 8;
|
||||
u64 cmcr_reserved1 : 4;
|
||||
u64 cmcr_ignored1 : 1;
|
||||
u64 cmcr_reserved2 : 3;
|
||||
u64 cmcr_mask : 1;
|
||||
u64 cmcr_ignored2 : 47;
|
||||
} cmcv_reg_s;
|
||||
|
||||
} cmcv_reg_t;
|
||||
|
||||
#define cmcv_mask cmcv_reg_s.cmcr_mask
|
||||
#define cmcv_vector cmcv_reg_s.cmcr_vector
|
||||
|
||||
enum {
|
||||
IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
|
||||
IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1,
|
||||
IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2,
|
||||
IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA = 0x3,
|
||||
};
|
||||
|
||||
/* Information maintained by the MC infrastructure */
|
||||
typedef struct ia64_mc_info_s {
|
||||
u64 imi_mca_handler;
|
||||
size_t imi_mca_handler_size;
|
||||
u64 imi_monarch_init_handler;
|
||||
size_t imi_monarch_init_handler_size;
|
||||
u64 imi_slave_init_handler;
|
||||
size_t imi_slave_init_handler_size;
|
||||
u8 imi_rendez_checkin[NR_CPUS];
|
||||
|
||||
} ia64_mc_info_t;
|
||||
|
||||
/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
|
||||
* Besides the handover state, it also contains some saved registers from the
|
||||
* time of the event.
|
||||
* Note: mca_asm.S depends on the precise layout of this structure.
|
||||
*/
|
||||
|
||||
struct ia64_sal_os_state {
|
||||
|
||||
/* SAL to OS */
|
||||
unsigned long os_gp; /* GP of the os registered with the SAL, physical */
|
||||
unsigned long pal_proc; /* PAL_PROC entry point, physical */
|
||||
unsigned long sal_proc; /* SAL_PROC entry point, physical */
|
||||
unsigned long rv_rc; /* MCA - Rendezvous state, INIT - reason code */
|
||||
unsigned long proc_state_param; /* from R18 */
|
||||
unsigned long monarch; /* 1 for a monarch event, 0 for a slave */
|
||||
|
||||
/* common */
|
||||
unsigned long sal_ra; /* Return address in SAL, physical */
|
||||
unsigned long sal_gp; /* GP of the SAL - physical */
|
||||
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
|
||||
/* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
|
||||
* Note: if the MCA/INIT recovery code wants to resume to a new context
|
||||
* then it must change these values to reflect the new kernel stack.
|
||||
*/
|
||||
unsigned long prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
|
||||
unsigned long prev_IA64_KR_CURRENT_STACK;
|
||||
struct task_struct *prev_task; /* previous task, NULL if it is not useful */
|
||||
/* Some interrupt registers are not saved in minstate, pt_regs or
|
||||
* switch_stack. Because MCA/INIT can occur when interrupts are
|
||||
* disabled, we need to save the additional interrupt registers over
|
||||
* MCA/INIT and resume.
|
||||
*/
|
||||
unsigned long isr;
|
||||
unsigned long ifa;
|
||||
unsigned long itir;
|
||||
unsigned long iipa;
|
||||
unsigned long iim;
|
||||
unsigned long iha;
|
||||
|
||||
/* OS to SAL */
|
||||
unsigned long os_status; /* OS status to SAL, enum below */
|
||||
unsigned long context; /* 0 if return to same context
|
||||
1 if return to new context */
|
||||
|
||||
/* I-resources */
|
||||
unsigned long iip;
|
||||
unsigned long ipsr;
|
||||
unsigned long ifs;
|
||||
};
|
||||
|
||||
enum {
|
||||
IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
|
||||
IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
|
||||
IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
|
||||
IA64_MCA_HALT = -3 /* System to be halted by SAL */
|
||||
};
|
||||
|
||||
enum {
|
||||
IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */
|
||||
IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
|
||||
};
|
||||
|
||||
enum {
|
||||
IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
|
||||
IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
|
||||
};
|
||||
|
||||
/* Per-CPU MCA state that is too big for normal per-CPU variables. */
|
||||
|
||||
struct ia64_mca_cpu {
|
||||
u64 mca_stack[KERNEL_STACK_SIZE/8];
|
||||
u64 init_stack[KERNEL_STACK_SIZE/8];
|
||||
};
|
||||
|
||||
/* Array of physical addresses of each CPU's MCA area. */
|
||||
extern unsigned long __per_cpu_mca[NR_CPUS];
|
||||
|
||||
extern int cpe_vector;
|
||||
extern int ia64_cpe_irq;
|
||||
extern void ia64_mca_init(void);
|
||||
extern void ia64_mca_irq_init(void);
|
||||
extern void ia64_mca_cpu_init(void *);
|
||||
extern void ia64_os_mca_dispatch(void);
|
||||
extern void ia64_os_mca_dispatch_end(void);
|
||||
extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
|
||||
extern void ia64_init_handler(struct pt_regs *,
|
||||
struct switch_stack *,
|
||||
struct ia64_sal_os_state *);
|
||||
extern void ia64_os_init_on_kdump(void);
|
||||
extern void ia64_monarch_init_handler(void);
|
||||
extern void ia64_slave_init_handler(void);
|
||||
extern void ia64_mca_cmc_vector_setup(void);
|
||||
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
|
||||
extern void ia64_unreg_MCA_extension(void);
|
||||
extern unsigned long ia64_get_rnat(unsigned long *);
|
||||
extern void ia64_set_psr_mc(void);
|
||||
extern void ia64_mca_printk(const char * fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
|
||||
struct ia64_mca_notify_die {
|
||||
struct ia64_sal_os_state *sos;
|
||||
int *monarch_cpu;
|
||||
int *data;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(u64, ia64_mca_pal_base);
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */
|
||||
#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */
|
||||
#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */
|
||||
#define IA64_MCA_HALT -3 /* System to be halted by SAL */
|
||||
|
||||
#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */
|
||||
#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */
|
||||
|
||||
#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */
|
||||
#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_MCA_H */
|
244
arch/ia64/include/asm/mca_asm.h
Normal file
244
arch/ia64/include/asm/mca_asm.h
Normal file
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* File: mca_asm.h
|
||||
* Purpose: Machine check handling specific defines
|
||||
*
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
|
||||
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
|
||||
* Copyright (C) 2000 Hewlett-Packard Co.
|
||||
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 2002 Intel Corp.
|
||||
* Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
|
||||
* Copyright (C) 2005 Silicon Graphics, Inc
|
||||
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_MCA_ASM_H
|
||||
#define _ASM_IA64_MCA_ASM_H
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#define PSR_IC 13
|
||||
#define PSR_I 14
|
||||
#define PSR_DT 17
|
||||
#define PSR_RT 27
|
||||
#define PSR_MC 35
|
||||
#define PSR_IT 36
|
||||
#define PSR_BN 44
|
||||
|
||||
/*
|
||||
* This macro converts a instruction virtual address to a physical address
|
||||
* Right now for simulation purposes the virtual addresses are
|
||||
* direct mapped to physical addresses.
|
||||
* 1. Lop off bits 61 thru 63 in the virtual address
|
||||
*/
|
||||
#define INST_VA_TO_PA(addr) \
|
||||
dep addr = 0, addr, 61, 3
|
||||
/*
|
||||
* This macro converts a data virtual address to a physical address
|
||||
* Right now for simulation purposes the virtual addresses are
|
||||
* direct mapped to physical addresses.
|
||||
* 1. Lop off bits 61 thru 63 in the virtual address
|
||||
*/
|
||||
#define DATA_VA_TO_PA(addr) \
|
||||
tpa addr = addr
|
||||
/*
|
||||
* This macro converts a data physical address to a virtual address
|
||||
* Right now for simulation purposes the virtual addresses are
|
||||
* direct mapped to physical addresses.
|
||||
* 1. Put 0x7 in bits 61 thru 63.
|
||||
*/
|
||||
#define DATA_PA_TO_VA(addr,temp) \
|
||||
mov temp = 0x7 ;; \
|
||||
dep addr = temp, addr, 61, 3
|
||||
|
||||
#define GET_THIS_PADDR(reg, var) \
|
||||
mov reg = IA64_KR(PER_CPU_DATA);; \
|
||||
addl reg = THIS_CPU(var), reg
|
||||
|
||||
/*
|
||||
* This macro jumps to the instruction at the given virtual address
|
||||
* and starts execution in physical mode with all the address
|
||||
* translations turned off.
|
||||
* 1. Save the current psr
|
||||
* 2. Make sure that all the upper 32 bits are off
|
||||
*
|
||||
* 3. Clear the interrupt enable and interrupt state collection bits
|
||||
* in the psr before updating the ipsr and iip.
|
||||
*
|
||||
* 4. Turn off the instruction, data and rse translation bits of the psr
|
||||
* and store the new value into ipsr
|
||||
* Also make sure that the interrupts are disabled.
|
||||
* Ensure that we are in little endian mode.
|
||||
* [psr.{rt, it, dt, i, be} = 0]
|
||||
*
|
||||
* 5. Get the physical address corresponding to the virtual address
|
||||
* of the next instruction bundle and put it in iip.
|
||||
* (Using magic numbers 24 and 40 in the deposint instruction since
|
||||
* the IA64_SDK code directly maps to lower 24bits as physical address
|
||||
* from a virtual address).
|
||||
*
|
||||
* 6. Do an rfi to move the values from ipsr to psr and iip to ip.
|
||||
*/
|
||||
#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
|
||||
mov old_psr = psr; \
|
||||
;; \
|
||||
dep old_psr = 0, old_psr, 32, 32; \
|
||||
\
|
||||
mov ar.rsc = 0 ; \
|
||||
;; \
|
||||
srlz.d; \
|
||||
mov temp2 = ar.bspstore; \
|
||||
;; \
|
||||
DATA_VA_TO_PA(temp2); \
|
||||
;; \
|
||||
mov temp1 = ar.rnat; \
|
||||
;; \
|
||||
mov ar.bspstore = temp2; \
|
||||
;; \
|
||||
mov ar.rnat = temp1; \
|
||||
mov temp1 = psr; \
|
||||
mov temp2 = psr; \
|
||||
;; \
|
||||
\
|
||||
dep temp2 = 0, temp2, PSR_IC, 2; \
|
||||
;; \
|
||||
mov psr.l = temp2; \
|
||||
;; \
|
||||
srlz.d; \
|
||||
dep temp1 = 0, temp1, 32, 32; \
|
||||
;; \
|
||||
dep temp1 = 0, temp1, PSR_IT, 1; \
|
||||
;; \
|
||||
dep temp1 = 0, temp1, PSR_DT, 1; \
|
||||
;; \
|
||||
dep temp1 = 0, temp1, PSR_RT, 1; \
|
||||
;; \
|
||||
dep temp1 = 0, temp1, PSR_I, 1; \
|
||||
;; \
|
||||
dep temp1 = 0, temp1, PSR_IC, 1; \
|
||||
;; \
|
||||
dep temp1 = -1, temp1, PSR_MC, 1; \
|
||||
;; \
|
||||
mov cr.ipsr = temp1; \
|
||||
;; \
|
||||
LOAD_PHYSICAL(p0, temp2, start_addr); \
|
||||
;; \
|
||||
mov cr.iip = temp2; \
|
||||
mov cr.ifs = r0; \
|
||||
DATA_VA_TO_PA(sp); \
|
||||
DATA_VA_TO_PA(gp); \
|
||||
;; \
|
||||
srlz.i; \
|
||||
;; \
|
||||
nop 1; \
|
||||
nop 2; \
|
||||
nop 1; \
|
||||
nop 2; \
|
||||
rfi; \
|
||||
;;
|
||||
|
||||
/*
|
||||
* This macro jumps to the instruction at the given virtual address
|
||||
* and starts execution in virtual mode with all the address
|
||||
* translations turned on.
|
||||
* 1. Get the old saved psr
|
||||
*
|
||||
* 2. Clear the interrupt state collection bit in the current psr.
|
||||
*
|
||||
* 3. Set the instruction translation bit back in the old psr
|
||||
* Note we have to do this since we are right now saving only the
|
||||
* lower 32-bits of old psr.(Also the old psr has the data and
|
||||
* rse translation bits on)
|
||||
*
|
||||
* 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
|
||||
*
|
||||
* 5. Reset the current thread pointer (r13).
|
||||
*
|
||||
* 6. Set iip to the virtual address of the next instruction bundle.
|
||||
*
|
||||
* 7. Do an rfi to move ipsr to psr and iip to ip.
|
||||
*/
|
||||
|
||||
#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
|
||||
mov temp2 = psr; \
|
||||
;; \
|
||||
mov old_psr = temp2; \
|
||||
;; \
|
||||
dep temp2 = 0, temp2, PSR_IC, 2; \
|
||||
;; \
|
||||
mov psr.l = temp2; \
|
||||
mov ar.rsc = 0; \
|
||||
;; \
|
||||
srlz.d; \
|
||||
mov r13 = ar.k6; \
|
||||
mov temp2 = ar.bspstore; \
|
||||
;; \
|
||||
DATA_PA_TO_VA(temp2,temp1); \
|
||||
;; \
|
||||
mov temp1 = ar.rnat; \
|
||||
;; \
|
||||
mov ar.bspstore = temp2; \
|
||||
;; \
|
||||
mov ar.rnat = temp1; \
|
||||
;; \
|
||||
mov temp1 = old_psr; \
|
||||
;; \
|
||||
mov temp2 = 1; \
|
||||
;; \
|
||||
dep temp1 = temp2, temp1, PSR_IC, 1; \
|
||||
;; \
|
||||
dep temp1 = temp2, temp1, PSR_IT, 1; \
|
||||
;; \
|
||||
dep temp1 = temp2, temp1, PSR_DT, 1; \
|
||||
;; \
|
||||
dep temp1 = temp2, temp1, PSR_RT, 1; \
|
||||
;; \
|
||||
dep temp1 = temp2, temp1, PSR_BN, 1; \
|
||||
;; \
|
||||
\
|
||||
mov cr.ipsr = temp1; \
|
||||
movl temp2 = start_addr; \
|
||||
;; \
|
||||
mov cr.iip = temp2; \
|
||||
movl gp = __gp \
|
||||
;; \
|
||||
DATA_PA_TO_VA(sp, temp1); \
|
||||
srlz.i; \
|
||||
;; \
|
||||
nop 1; \
|
||||
nop 2; \
|
||||
nop 1; \
|
||||
rfi \
|
||||
;;
|
||||
|
||||
/*
|
||||
* The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
|
||||
* stacks, except that the SAL/OS state and a switch_stack are stored near the
|
||||
* top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
|
||||
* well as MCA over INIT, each event needs its own SAL/OS state. All entries
|
||||
* are 16 byte aligned.
|
||||
*
|
||||
* +---------------------------+
|
||||
* | pt_regs |
|
||||
* +---------------------------+
|
||||
* | switch_stack |
|
||||
* +---------------------------+
|
||||
* | SAL/OS state |
|
||||
* +---------------------------+
|
||||
* | 16 byte scratch area |
|
||||
* +---------------------------+ <-------- SP at start of C MCA handler
|
||||
* | ..... |
|
||||
* +---------------------------+
|
||||
* | RBS for MCA/INIT handler |
|
||||
* +---------------------------+
|
||||
* | struct task for MCA/INIT |
|
||||
* +---------------------------+ <-------- Bottom of MCA/INIT stack
|
||||
*/
|
||||
|
||||
#define ALIGN16(x) ((x)&~15)
|
||||
#define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
|
||||
#define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
|
||||
#define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
|
||||
#define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
|
||||
|
||||
#endif /* _ASM_IA64_MCA_ASM_H */
|
74
arch/ia64/include/asm/meminit.h
Normal file
74
arch/ia64/include/asm/meminit.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
#ifndef meminit_h
|
||||
#define meminit_h
|
||||
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Entries defined so far:
|
||||
* - boot param structure itself
|
||||
* - memory map
|
||||
* - initrd (optional)
|
||||
* - command line string
|
||||
* - kernel code & data
|
||||
* - crash dumping code reserved region
|
||||
* - Kernel memory map built from EFI memory map
|
||||
* - ELF core header
|
||||
*
|
||||
* More could be added if necessary
|
||||
*/
|
||||
#define IA64_MAX_RSVD_REGIONS 9
|
||||
|
||||
struct rsvd_region {
|
||||
u64 start; /* virtual address of beginning of element */
|
||||
u64 end; /* virtual address of end of element + 1 */
|
||||
};
|
||||
|
||||
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
|
||||
extern int num_rsvd_regions;
|
||||
|
||||
extern void find_memory (void);
|
||||
extern void reserve_memory (void);
|
||||
extern void find_initrd (void);
|
||||
extern int filter_rsvd_memory (u64 start, u64 end, void *arg);
|
||||
extern int filter_memory (u64 start, u64 end, void *arg);
|
||||
extern unsigned long efi_memmap_init(u64 *s, u64 *e);
|
||||
extern int find_max_min_low_pfn (u64, u64, void *);
|
||||
|
||||
extern unsigned long vmcore_find_descriptor_size(unsigned long address);
|
||||
extern int reserve_elfcorehdr(u64 *start, u64 *end);
|
||||
|
||||
/*
|
||||
* For rounding an address to the next IA64_GRANULE_SIZE or order
|
||||
*/
|
||||
#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
|
||||
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
|
||||
#else
|
||||
# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
|
||||
#endif
|
||||
|
||||
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
|
||||
|
||||
extern int register_active_ranges(u64 start, u64 len, int nid);
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
||||
extern unsigned long VMALLOC_END;
|
||||
extern struct page *vmem_map;
|
||||
extern int find_largest_hole(u64 start, u64 end, void *arg);
|
||||
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
|
||||
extern int vmemmap_find_next_valid_pfn(int, int);
|
||||
#else
|
||||
static inline int vmemmap_find_next_valid_pfn(int node, int i)
|
||||
{
|
||||
return i + 1;
|
||||
}
|
||||
#endif
|
||||
#endif /* meminit_h */
|
17
arch/ia64/include/asm/mman.h
Normal file
17
arch/ia64/include/asm/mman.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Based on <asm-i386/mman.h>.
|
||||
*
|
||||
* Modified 1998-2000, 2002
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
||||
*/
|
||||
#ifndef _ASM_IA64_MMAN_H
|
||||
#define _ASM_IA64_MMAN_H
|
||||
|
||||
#include <uapi/asm/mman.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define arch_mmap_check ia64_mmap_check
|
||||
int ia64_mmap_check(unsigned long addr, unsigned long len,
|
||||
unsigned long flags);
|
||||
#endif
|
||||
#endif /* _ASM_IA64_MMAN_H */
|
13
arch/ia64/include/asm/mmu.h
Normal file
13
arch/ia64/include/asm/mmu.h
Normal file
|
@ -0,0 +1,13 @@
|
|||
#ifndef __MMU_H
|
||||
#define __MMU_H
|
||||
|
||||
/*
|
||||
* Type for a context number. We declare it volatile to ensure proper
|
||||
* ordering when it's accessed outside of spinlock'd critical sections
|
||||
* (e.g., as done in activate_mm() and init_new_context()).
|
||||
*/
|
||||
typedef volatile unsigned long mm_context_t;
|
||||
|
||||
typedef unsigned long nv_mm_context_t;
|
||||
|
||||
#endif
|
198
arch/ia64/include/asm/mmu_context.h
Normal file
198
arch/ia64/include/asm/mmu_context.h
Normal file
|
@ -0,0 +1,198 @@
|
|||
#ifndef _ASM_IA64_MMU_CONTEXT_H
|
||||
#define _ASM_IA64_MMU_CONTEXT_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Routines to manage the allocation of task context numbers. Task context
|
||||
* numbers are used to reduce or eliminate the need to perform TLB flushes
|
||||
* due to context switches. Context numbers are implemented using ia-64
|
||||
* region ids. Since the IA-64 TLB does not consider the region number when
|
||||
* performing a TLB lookup, we need to assign a unique region id to each
|
||||
* region in a process. We use the least significant three bits in aregion
|
||||
* id for this purpose.
|
||||
*/
|
||||
|
||||
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
|
||||
|
||||
#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
|
||||
|
||||
# include <asm/page.h>
|
||||
# ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
struct ia64_ctx {
|
||||
spinlock_t lock;
|
||||
unsigned int next; /* next context number to use */
|
||||
unsigned int limit; /* available free range */
|
||||
unsigned int max_ctx; /* max. context value supported by all CPUs */
|
||||
/* call wrap_mmu_context when next >= max */
|
||||
unsigned long *bitmap; /* bitmap size is max_ctx+1 */
|
||||
unsigned long *flushmap;/* pending rid to be flushed */
|
||||
};
|
||||
|
||||
extern struct ia64_ctx ia64_ctx;
|
||||
DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
|
||||
|
||||
extern void mmu_context_init (void);
|
||||
extern void wrap_mmu_context (struct mm_struct *mm);
|
||||
|
||||
static inline void
|
||||
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* When the context counter wraps around all TLBs need to be flushed because
|
||||
* an old context number might have been reused. This is signalled by the
|
||||
* ia64_need_tlb_flush per-CPU variable, which is checked in the routine
|
||||
* below. Called by activate_mm(). <efocht@ess.nec.de>
|
||||
*/
|
||||
static inline void
|
||||
delayed_tlb_flush (void)
|
||||
{
|
||||
extern void local_flush_tlb_all (void);
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
|
||||
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
||||
if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
|
||||
local_flush_tlb_all();
|
||||
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static inline nv_mm_context_t
|
||||
get_mmu_context (struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
nv_mm_context_t context = mm->context;
|
||||
|
||||
if (likely(context))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
||||
/* re-check, now that we've got the lock: */
|
||||
context = mm->context;
|
||||
if (context == 0) {
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
if (ia64_ctx.next >= ia64_ctx.limit) {
|
||||
ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
|
||||
ia64_ctx.max_ctx, ia64_ctx.next);
|
||||
ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
|
||||
ia64_ctx.max_ctx, ia64_ctx.next);
|
||||
if (ia64_ctx.next >= ia64_ctx.max_ctx)
|
||||
wrap_mmu_context(mm);
|
||||
}
|
||||
mm->context = context = ia64_ctx.next++;
|
||||
__set_bit(context, ia64_ctx.bitmap);
|
||||
}
|
||||
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
||||
out:
|
||||
/*
|
||||
* Ensure we're not starting to use "context" before any old
|
||||
* uses of it are gone from our TLB.
|
||||
*/
|
||||
delayed_tlb_flush();
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize context number to some sane value. MM is guaranteed to be a
|
||||
* brand-new address-space, so no TLB flushing is needed, ever.
|
||||
*/
|
||||
static inline int
|
||||
init_new_context (struct task_struct *p, struct mm_struct *mm)
|
||||
{
|
||||
mm->context = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
destroy_context (struct mm_struct *mm)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
}
|
||||
|
||||
static inline void
|
||||
reload_context (nv_mm_context_t context)
|
||||
{
|
||||
unsigned long rid;
|
||||
unsigned long rid_incr = 0;
|
||||
unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
|
||||
|
||||
old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
|
||||
rid = context << 3; /* make space for encoding the region number */
|
||||
rid_incr = 1 << 8;
|
||||
|
||||
/* encode the region id, preferred page size, and VHPT enable bit: */
|
||||
rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
|
||||
rr1 = rr0 + 1*rid_incr;
|
||||
rr2 = rr0 + 2*rid_incr;
|
||||
rr3 = rr0 + 3*rid_incr;
|
||||
rr4 = rr0 + 4*rid_incr;
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
|
||||
|
||||
# if RGN_HPAGE != 4
|
||||
# error "reload_context assumes RGN_HPAGE is 4"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
|
||||
ia64_srlz_i(); /* srlz.i implies srlz.d */
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with preemption off
|
||||
*/
|
||||
static inline void
|
||||
activate_context (struct mm_struct *mm)
|
||||
{
|
||||
nv_mm_context_t context;
|
||||
|
||||
do {
|
||||
context = get_mmu_context(mm);
|
||||
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
reload_context(context);
|
||||
/*
|
||||
* in the unlikely event of a TLB-flush by another thread,
|
||||
* redo the load.
|
||||
*/
|
||||
} while (unlikely(context != mm->context));
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* Switch from address space PREV to address space NEXT.
|
||||
*/
|
||||
static inline void
|
||||
activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
/*
|
||||
* We may get interrupts here, but that's OK because interrupt
|
||||
* handlers cannot touch user-space.
|
||||
*/
|
||||
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
|
||||
activate_context(next);
|
||||
}
|
||||
|
||||
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
|
||||
|
||||
# endif /* ! __ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_MMU_CONTEXT_H */
|
42
arch/ia64/include/asm/mmzone.h
Normal file
42
arch/ia64/include/asm/mmzone.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved.
|
||||
* Copyright (c) 2002 NEC Corp.
|
||||
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
|
||||
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_MMZONE_H
|
||||
#define _ASM_IA64_MMZONE_H
|
||||
|
||||
#include <linux/numa.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/meminit.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
static inline int pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
extern int paddr_to_nid(unsigned long);
|
||||
int nid = paddr_to_nid(pfn << PAGE_SHIFT);
|
||||
if (nid < 0)
|
||||
return 0;
|
||||
else
|
||||
return nid;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
|
||||
# define MAX_PHYSNODE_ID 8
|
||||
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
|
||||
#else /* sn2 is the biggest case, so we use that if !DIG */
|
||||
# define MAX_PHYSNODE_ID 2048
|
||||
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_NUMA */
|
||||
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#endif /* _ASM_IA64_MMZONE_H */
|
40
arch/ia64/include/asm/module.h
Normal file
40
arch/ia64/include/asm/module.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
#ifndef _ASM_IA64_MODULE_H
|
||||
#define _ASM_IA64_MODULE_H
|
||||
|
||||
#include <asm-generic/module.h>
|
||||
|
||||
/*
|
||||
* IA-64-specific support for kernel module loader.
|
||||
*
|
||||
* Copyright (C) 2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
struct elf64_shdr; /* forward declration */
|
||||
|
||||
struct mod_arch_specific {
|
||||
struct elf64_shdr *core_plt; /* core PLT section */
|
||||
struct elf64_shdr *init_plt; /* init PLT section */
|
||||
struct elf64_shdr *got; /* global offset table */
|
||||
struct elf64_shdr *opd; /* official procedure descriptors */
|
||||
struct elf64_shdr *unwind; /* unwind-table section */
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
struct elf64_shdr *paravirt_bundles;
|
||||
/* paravirt_alt_bundle_patch table */
|
||||
struct elf64_shdr *paravirt_insts;
|
||||
/* paravirt_alt_inst_patch table */
|
||||
#endif
|
||||
unsigned long gp; /* global-pointer for module */
|
||||
|
||||
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
|
||||
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
|
||||
unsigned int next_got_entry; /* index of next available got entry */
|
||||
};
|
||||
|
||||
#define MODULE_PROC_FAMILY "ia64"
|
||||
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY \
|
||||
"gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
|
||||
|
||||
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
|
||||
|
||||
#endif /* _ASM_IA64_MODULE_H */
|
42
arch/ia64/include/asm/msidef.h
Normal file
42
arch/ia64/include/asm/msidef.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
#ifndef _IA64_MSI_DEF_H
|
||||
#define _IA64_MSI_DEF_H
|
||||
|
||||
/*
|
||||
* Shifts for APIC-based data
|
||||
*/
|
||||
|
||||
#define MSI_DATA_VECTOR_SHIFT 0
|
||||
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
|
||||
#define MSI_DATA_VECTOR_MASK 0xffffff00
|
||||
|
||||
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
|
||||
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
|
||||
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
|
||||
|
||||
#define MSI_DATA_LEVEL_SHIFT 14
|
||||
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
|
||||
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
|
||||
|
||||
#define MSI_DATA_TRIGGER_SHIFT 15
|
||||
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
|
||||
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
|
||||
|
||||
/*
|
||||
* Shift/mask fields for APIC-based bus address
|
||||
*/
|
||||
|
||||
#define MSI_ADDR_DEST_ID_SHIFT 4
|
||||
#define MSI_ADDR_HEADER 0xfee00000
|
||||
|
||||
#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
|
||||
#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
|
||||
|
||||
#define MSI_ADDR_DEST_MODE_SHIFT 2
|
||||
#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
|
||||
#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
|
||||
|
||||
#define MSI_ADDR_REDIRECTION_SHIFT 3
|
||||
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
|
||||
#endif/* _IA64_MSI_DEF_H */
|
90
arch/ia64/include/asm/mutex.h
Normal file
90
arch/ia64/include/asm/mutex.h
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* ia64 implementation of the mutex fastpath.
|
||||
*
|
||||
* Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, then the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int ret = ia64_fetchadd4_rel(count, 1);
|
||||
if (unlikely(ret < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (cmpxchg_acq(count, 1, 0) == 1)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
194
arch/ia64/include/asm/native/inst.h
Normal file
194
arch/ia64/include/asm/native/inst.h
Normal file
|
@ -0,0 +1,194 @@
|
|||
/******************************************************************************
|
||||
* arch/ia64/include/asm/native/inst.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
|
||||
|
||||
#define __paravirt_switch_to ia64_native_switch_to
|
||||
#define __paravirt_leave_syscall ia64_native_leave_syscall
|
||||
#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
|
||||
#define __paravirt_leave_kernel ia64_native_leave_kernel
|
||||
#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
|
||||
#define __paravirt_work_processed_syscall_target \
|
||||
ia64_work_processed_syscall
|
||||
|
||||
#define paravirt_fsyscall_table ia64_native_fsyscall_table
|
||||
#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
|
||||
# define PARAVIRT_POISON 0xdeadbeefbaadf00d
|
||||
# define CLOBBER(clob) \
|
||||
;; \
|
||||
movl clob = PARAVIRT_POISON; \
|
||||
;;
|
||||
# define CLOBBER_PRED(pred_clob) \
|
||||
;; \
|
||||
cmp.eq pred_clob, p0 = r0, r0 \
|
||||
;;
|
||||
#else
|
||||
# define CLOBBER(clob) /* nothing */
|
||||
# define CLOBBER_PRED(pred_clob) /* nothing */
|
||||
#endif
|
||||
|
||||
#define MOV_FROM_IFA(reg) \
|
||||
mov reg = cr.ifa
|
||||
|
||||
#define MOV_FROM_ITIR(reg) \
|
||||
mov reg = cr.itir
|
||||
|
||||
#define MOV_FROM_ISR(reg) \
|
||||
mov reg = cr.isr
|
||||
|
||||
#define MOV_FROM_IHA(reg) \
|
||||
mov reg = cr.iha
|
||||
|
||||
#define MOV_FROM_IPSR(pred, reg) \
|
||||
(pred) mov reg = cr.ipsr
|
||||
|
||||
#define MOV_FROM_IIM(reg) \
|
||||
mov reg = cr.iim
|
||||
|
||||
#define MOV_FROM_IIP(reg) \
|
||||
mov reg = cr.iip
|
||||
|
||||
#define MOV_FROM_IVR(reg, clob) \
|
||||
mov reg = cr.ivr \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_FROM_PSR(pred, reg, clob) \
|
||||
(pred) mov reg = psr \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
|
||||
(pred) mov reg = ar.itc \
|
||||
CLOBBER(clob) \
|
||||
CLOBBER_PRED(pred_clob)
|
||||
|
||||
#define MOV_TO_IFA(reg, clob) \
|
||||
mov cr.ifa = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_ITIR(pred, reg, clob) \
|
||||
(pred) mov cr.itir = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_IHA(pred, reg, clob) \
|
||||
(pred) mov cr.iha = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_IPSR(pred, reg, clob) \
|
||||
(pred) mov cr.ipsr = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_IFS(pred, reg, clob) \
|
||||
(pred) mov cr.ifs = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_IIP(reg, clob) \
|
||||
mov cr.iip = reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define MOV_TO_KR(kr, reg, clob0, clob1) \
|
||||
mov IA64_KR(kr) = reg \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
|
||||
#define ITC_I(pred, reg, clob) \
|
||||
(pred) itc.i reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define ITC_D(pred, reg, clob) \
|
||||
(pred) itc.d reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
|
||||
(pred_i) itc.i reg; \
|
||||
(pred_d) itc.d reg \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define THASH(pred, reg0, reg1, clob) \
|
||||
(pred) thash reg0 = reg1 \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
|
||||
ssm psr.ic | PSR_DEFAULT_BITS \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
;; \
|
||||
srlz.i /* guarantee that interruption collectin is on */ \
|
||||
;;
|
||||
|
||||
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
|
||||
ssm psr.ic \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
;; \
|
||||
srlz.d
|
||||
|
||||
#define RSM_PSR_IC(clob) \
|
||||
rsm psr.ic \
|
||||
CLOBBER(clob)
|
||||
|
||||
#define SSM_PSR_I(pred, pred_clob, clob) \
|
||||
(pred) ssm psr.i \
|
||||
CLOBBER(clob) \
|
||||
CLOBBER_PRED(pred_clob)
|
||||
|
||||
#define RSM_PSR_I(pred, clob0, clob1) \
|
||||
(pred) rsm psr.i \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
|
||||
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
|
||||
rsm psr.i | psr.ic \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
CLOBBER(clob2)
|
||||
|
||||
#define RSM_PSR_DT \
|
||||
rsm psr.dt
|
||||
|
||||
#define RSM_PSR_BE_I(clob0, clob1) \
|
||||
rsm psr.be | psr.i \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
|
||||
#define SSM_PSR_DT_AND_SRLZ_I \
|
||||
ssm psr.dt \
|
||||
;; \
|
||||
srlz.i
|
||||
|
||||
#define BSW_0(clob0, clob1, clob2) \
|
||||
bsw.0 \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
CLOBBER(clob2)
|
||||
|
||||
#define BSW_1(clob0, clob1) \
|
||||
bsw.1 \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
|
||||
#define COVER \
|
||||
cover
|
||||
|
||||
#define RFI \
|
||||
rfi
|
33
arch/ia64/include/asm/native/irq.h
Normal file
33
arch/ia64/include/asm/native/irq.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
/******************************************************************************
|
||||
* arch/ia64/include/asm/native/irq.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_NATIVE_IRQ_H
|
||||
#define _ASM_IA64_NATIVE_IRQ_H
|
||||
|
||||
#define NR_VECTORS 256
|
||||
|
||||
#if (NR_VECTORS + 32 * NR_CPUS) < 1024
|
||||
#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
|
||||
#else
|
||||
#define IA64_NATIVE_NR_IRQS 1024
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_NATIVE_IRQ_H */
|
38
arch/ia64/include/asm/native/patchlist.h
Normal file
38
arch/ia64/include/asm/native/patchlist.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
/******************************************************************************
|
||||
* arch/ia64/include/asm/native/inst.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#define __paravirt_start_gate_fsyscall_patchlist \
|
||||
__ia64_native_start_gate_fsyscall_patchlist
|
||||
#define __paravirt_end_gate_fsyscall_patchlist \
|
||||
__ia64_native_end_gate_fsyscall_patchlist
|
||||
#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
|
||||
__ia64_native_start_gate_brl_fsys_bubble_down_patchlist
|
||||
#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
|
||||
__ia64_native_end_gate_brl_fsys_bubble_down_patchlist
|
||||
#define __paravirt_start_gate_vtop_patchlist \
|
||||
__ia64_native_start_gate_vtop_patchlist
|
||||
#define __paravirt_end_gate_vtop_patchlist \
|
||||
__ia64_native_end_gate_vtop_patchlist
|
||||
#define __paravirt_start_gate_mckinley_e9_patchlist \
|
||||
__ia64_native_start_gate_mckinley_e9_patchlist
|
||||
#define __paravirt_end_gate_mckinley_e9_patchlist \
|
||||
__ia64_native_end_gate_mckinley_e9_patchlist
|
271
arch/ia64/include/asm/native/pvchk_inst.h
Normal file
271
arch/ia64/include/asm/native/pvchk_inst.h
Normal file
|
@ -0,0 +1,271 @@
|
|||
#ifndef _ASM_NATIVE_PVCHK_INST_H
|
||||
#define _ASM_NATIVE_PVCHK_INST_H
|
||||
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/native/pvchk_inst.h
|
||||
* Checker for paravirtualizations of privileged operations.
|
||||
*
|
||||
* Copyright (C) 2005 Hewlett-Packard Co
|
||||
* Dan Magenheimer <dan.magenheimer@hp.com>
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
/**********************************************
|
||||
* Instructions paravirtualized for correctness
|
||||
**********************************************/
|
||||
|
||||
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
|
||||
* may have different semantics depending on whether they are executed
|
||||
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
|
||||
* be allowed to execute directly, lest incorrect semantics result.
|
||||
*/
|
||||
|
||||
#define fc .error "fc should not be used directly."
|
||||
#define thash .error "thash should not be used directly."
|
||||
|
||||
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
|
||||
* is not currently used (though it may be in a long-format VHPT system!)
|
||||
* and the semantics of cover only change if psr.ic is off which is very
|
||||
* rare (and currently non-existent outside of assembly code
|
||||
*/
|
||||
#define ttag .error "ttag should not be used directly."
|
||||
#define cover .error "cover should not be used directly."
|
||||
|
||||
/* There are also privilege-sensitive registers. These registers are
|
||||
* readable at any privilege level but only writable at PL0.
|
||||
*/
|
||||
#define cpuid .error "cpuid should not be used directly."
|
||||
#define pmd .error "pmd should not be used directly."
|
||||
|
||||
/*
|
||||
* mov ar.eflag =
|
||||
* mov = ar.eflag
|
||||
*/
|
||||
|
||||
/**********************************************
|
||||
* Instructions paravirtualized for performance
|
||||
**********************************************/
|
||||
/*
|
||||
* Those instructions include '.' which can't be handled by cpp.
|
||||
* or can't be handled by cpp easily.
|
||||
* They are handled by sed instead of cpp.
|
||||
*/
|
||||
|
||||
/* for .S
|
||||
* itc.i
|
||||
* itc.d
|
||||
*
|
||||
* bsw.0
|
||||
* bsw.1
|
||||
*
|
||||
* ssm psr.ic | PSR_DEFAULT_BITS
|
||||
* ssm psr.ic
|
||||
* rsm psr.ic
|
||||
* ssm psr.i
|
||||
* rsm psr.i
|
||||
* rsm psr.i | psr.ic
|
||||
* rsm psr.dt
|
||||
* ssm psr.dt
|
||||
*
|
||||
* mov = cr.ifa
|
||||
* mov = cr.itir
|
||||
* mov = cr.isr
|
||||
* mov = cr.iha
|
||||
* mov = cr.ipsr
|
||||
* mov = cr.iim
|
||||
* mov = cr.iip
|
||||
* mov = cr.ivr
|
||||
* mov = psr
|
||||
*
|
||||
* mov cr.ifa =
|
||||
* mov cr.itir =
|
||||
* mov cr.iha =
|
||||
* mov cr.ipsr =
|
||||
* mov cr.ifs =
|
||||
* mov cr.iip =
|
||||
* mov cr.kr =
|
||||
*/
|
||||
|
||||
/* for intrinsics
|
||||
* ssm psr.i
|
||||
* rsm psr.i
|
||||
* mov = psr
|
||||
* mov = ivr
|
||||
* mov = tpr
|
||||
* mov cr.itm =
|
||||
* mov eoi =
|
||||
* mov rr[] =
|
||||
* mov = rr[]
|
||||
* mov = kr
|
||||
* mov kr =
|
||||
* ptc.ga
|
||||
*/
|
||||
|
||||
/*************************************************************
|
||||
* define paravirtualized instrcution macros as nop to ingore.
|
||||
* and check whether arguments are appropriate.
|
||||
*************************************************************/
|
||||
|
||||
/* check whether reg is a regular register */
|
||||
.macro is_rreg_in reg
|
||||
.ifc "\reg", "r0"
|
||||
nop 0
|
||||
.exitm
|
||||
.endif
|
||||
;;
|
||||
mov \reg = r0
|
||||
;;
|
||||
.endm
|
||||
#define IS_RREG_IN(reg) is_rreg_in reg ;
|
||||
|
||||
#define IS_RREG_OUT(reg) \
|
||||
;; \
|
||||
mov reg = r0 \
|
||||
;;
|
||||
|
||||
#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
|
||||
|
||||
/* check whether pred is a predicate register */
|
||||
#define IS_PRED_IN(pred) \
|
||||
;; \
|
||||
(pred) nop 0 \
|
||||
;;
|
||||
|
||||
#define IS_PRED_OUT(pred) \
|
||||
;; \
|
||||
cmp.eq pred, p0 = r0, r0 \
|
||||
;;
|
||||
|
||||
#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
|
||||
|
||||
|
||||
#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
|
||||
nop 0
|
||||
#define MOV_FROM_IFA(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_ITIR(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_ISR(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IHA(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IPSR(pred, reg) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IIM(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IIP(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IVR(reg, clob) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_FROM_PSR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_PRED_CLOB(pred_clob) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IFA(reg, clob) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_ITIR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IHA(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IPSR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IFS(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IIP(reg, clob) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_KR(kr, reg, clob0, clob1) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define ITC_I(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define ITC_D(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
|
||||
IS_PRED_IN(pred_i) \
|
||||
IS_PRED_IN(pred_d) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define THASH(pred, reg0, reg1, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg0) \
|
||||
IS_RREG_IN(reg1) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define RSM_PSR_IC(clob) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define SSM_PSR_I(pred, pred_clob, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_PRED_CLOB(pred_clob) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define RSM_PSR_I(pred, clob0, clob1) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1) \
|
||||
IS_RREG_CLOB(clob2)
|
||||
#define RSM_PSR_DT \
|
||||
nop 0
|
||||
#define RSM_PSR_BE_I(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define SSM_PSR_DT_AND_SRLZ_I \
|
||||
nop 0
|
||||
#define BSW_0(clob0, clob1, clob2) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1) \
|
||||
IS_RREG_CLOB(clob2)
|
||||
#define BSW_1(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define COVER \
|
||||
nop 0
|
||||
#define RFI \
|
||||
br.ret.sptk.many rp /* defining nop causes dependency error */
|
||||
|
||||
#endif /* _ASM_NATIVE_PVCHK_INST_H */
|
63
arch/ia64/include/asm/nodedata.h
Normal file
63
arch/ia64/include/asm/nodedata.h
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
|
||||
* Copyright (c) 2002 NEC Corp.
|
||||
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
|
||||
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_NODEDATA_H
|
||||
#define _ASM_IA64_NODEDATA_H
|
||||
|
||||
#include <linux/numa.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/mmzone.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
/*
|
||||
* Node Data. One of these structures is located on each node of a NUMA system.
|
||||
*/
|
||||
|
||||
struct pglist_data;
|
||||
struct ia64_node_data {
|
||||
short active_cpu_count;
|
||||
short node;
|
||||
struct pglist_data *pg_data_ptrs[MAX_NUMNODES];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Return a pointer to the node_data structure for the executing cpu.
|
||||
*/
|
||||
#define local_node_data (local_cpu_data->node_data)
|
||||
|
||||
/*
|
||||
* Given a node id, return a pointer to the pg_data_t for the node.
|
||||
*
|
||||
* NODE_DATA - should be used in all code not related to system
|
||||
* initialization. It uses pernode data structures to minimize
|
||||
* offnode memory references. However, these structure are not
|
||||
* present during boot. This macro can be used once cpu_init
|
||||
* completes.
|
||||
*/
|
||||
#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
|
||||
|
||||
/*
|
||||
* LOCAL_DATA_ADDR - This is to calculate the address of other node's
|
||||
* "local_node_data" at hot-plug phase. The local_node_data
|
||||
* is pointed by per_cpu_page. Kernel usually use it for
|
||||
* just executing cpu. However, when new node is hot-added,
|
||||
* the addresses of local data for other nodes are necessary
|
||||
* to update all of them.
|
||||
*/
|
||||
#define LOCAL_DATA_ADDR(pgdat) \
|
||||
((struct ia64_node_data *)((u64)(pgdat) + \
|
||||
L1_CACHE_ALIGN(sizeof(struct pglist_data))))
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#endif /* _ASM_IA64_NODEDATA_H */
|
79
arch/ia64/include/asm/numa.h
Normal file
79
arch/ia64/include/asm/numa.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* This file contains NUMA specific prototypes and definitions.
|
||||
*
|
||||
* 2002/08/05 Erich Focht <efocht@ess.nec.de>
|
||||
*
|
||||
*/
|
||||
#ifndef _ASM_IA64_NUMA_H
|
||||
#define _ASM_IA64_NUMA_H
|
||||
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
#include <asm/mmzone.h>
|
||||
|
||||
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
|
||||
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
|
||||
extern pg_data_t *pgdat_list[MAX_NUMNODES];
|
||||
|
||||
/* Stuff below this line could be architecture independent */
|
||||
|
||||
extern int num_node_memblks; /* total number of memory chunks */
|
||||
|
||||
/*
|
||||
* List of node memory chunks. Filled when parsing SRAT table to
|
||||
* obtain information about memory nodes.
|
||||
*/
|
||||
|
||||
struct node_memblk_s {
|
||||
unsigned long start_paddr;
|
||||
unsigned long size;
|
||||
int nid; /* which logical node contains this chunk? */
|
||||
int bank; /* which mem bank on this node */
|
||||
};
|
||||
|
||||
struct node_cpuid_s {
|
||||
u16 phys_id; /* id << 8 | eid */
|
||||
int nid; /* logical node containing this CPU */
|
||||
};
|
||||
|
||||
extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
|
||||
extern struct node_cpuid_s node_cpuid[NR_CPUS];
|
||||
|
||||
/*
|
||||
* ACPI 2.0 SLIT (System Locality Information Table)
|
||||
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
|
||||
*
|
||||
* This is a matrix with "distances" between nodes, they should be
|
||||
* proportional to the memory access latency ratios.
|
||||
*/
|
||||
|
||||
extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
|
||||
#define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
|
||||
|
||||
extern int paddr_to_nid(unsigned long paddr);
|
||||
|
||||
#define local_nodeid (cpu_to_node_map[smp_processor_id()])
|
||||
|
||||
extern void map_cpu_to_node(int cpu, int nid);
|
||||
extern void unmap_cpu_from_node(int cpu, int nid);
|
||||
extern void numa_clear_node(int cpu);
|
||||
|
||||
#else /* !CONFIG_NUMA */
|
||||
#define map_cpu_to_node(cpu, nid) do{}while(0)
|
||||
#define unmap_cpu_from_node(cpu, nid) do{}while(0)
|
||||
#define paddr_to_nid(addr) 0
|
||||
#define numa_clear_node(cpu) do { } while (0)
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#endif /* _ASM_IA64_NUMA_H */
|
236
arch/ia64/include/asm/page.h
Normal file
236
arch/ia64/include/asm/page.h
Normal file
|
@ -0,0 +1,236 @@
|
|||
#ifndef _ASM_IA64_PAGE_H
|
||||
#define _ASM_IA64_PAGE_H
|
||||
/*
|
||||
* Pagetable related stuff.
|
||||
*
|
||||
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
* The top three bits of an IA64 address are its Region Number.
|
||||
* Different regions are assigned to different purposes.
|
||||
*/
|
||||
#define RGN_SHIFT (61)
|
||||
#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
|
||||
#define RGN_BITS (RGN_BASE(-1))
|
||||
|
||||
#define RGN_KERNEL 7 /* Identity mapped region */
|
||||
#define RGN_UNCACHED 6 /* Identity mapped I/O region */
|
||||
#define RGN_GATE 5 /* Gate page, Kernel text, etc */
|
||||
#define RGN_HPAGE 4 /* For Huge TLB pages */
|
||||
|
||||
/*
|
||||
* PAGE_SHIFT determines the actual kernel page size.
|
||||
*/
|
||||
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
|
||||
# define PAGE_SHIFT 12
|
||||
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
|
||||
# define PAGE_SHIFT 13
|
||||
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
|
||||
# define PAGE_SHIFT 14
|
||||
#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
|
||||
# define PAGE_SHIFT 16
|
||||
#else
|
||||
# error Unsupported page size!
|
||||
#endif
|
||||
|
||||
#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
|
||||
#define PERCPU_PAGE_SHIFT 18 /* log2() of max. size of per-CPU area */
|
||||
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
|
||||
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
|
||||
# define HPAGE_SHIFT hpage_shift
|
||||
# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
|
||||
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
|
||||
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __pa(x) ((x) - PAGE_OFFSET)
|
||||
# define __va(x) ((x) + PAGE_OFFSET)
|
||||
#else /* !__ASSEMBLY */
|
||||
# define STRICT_MM_TYPECHECKS
|
||||
|
||||
extern void clear_page (void *page);
|
||||
extern void copy_page (void *to, void *from);
|
||||
|
||||
/*
|
||||
* clear_user_page() and copy_user_page() can't be inline functions because
|
||||
* flush_dcache_page() can't be defined until later...
|
||||
*/
|
||||
#define clear_user_page(addr, vaddr, page) \
|
||||
do { \
|
||||
clear_page(addr); \
|
||||
flush_dcache_page(page); \
|
||||
} while (0)
|
||||
|
||||
#define copy_user_page(to, from, vaddr, page) \
|
||||
do { \
|
||||
copy_page((to), (from)); \
|
||||
flush_dcache_page(page); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
({ \
|
||||
struct page *page = alloc_page_vma( \
|
||||
GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
|
||||
if (page) \
|
||||
flush_dcache_page(page); \
|
||||
page; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
extern int ia64_pfn_valid (unsigned long pfn);
|
||||
#else
|
||||
# define ia64_pfn_valid(pfn) 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
extern struct page *vmem_map;
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
|
||||
# define pfn_to_page(pfn) (vmem_map + (pfn))
|
||||
#else
|
||||
# include <asm-generic/memory_model.h>
|
||||
#endif
|
||||
#else
|
||||
# include <asm-generic/memory_model.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
|
||||
#elif defined(CONFIG_DISCONTIGMEM)
|
||||
extern unsigned long min_low_pfn;
|
||||
extern unsigned long max_low_pfn;
|
||||
# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
|
||||
#endif
|
||||
|
||||
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
|
||||
typedef union ia64_va {
|
||||
struct {
|
||||
unsigned long off : 61; /* intra-region offset */
|
||||
unsigned long reg : 3; /* region number */
|
||||
} f;
|
||||
unsigned long l;
|
||||
void *p;
|
||||
} ia64_va;
|
||||
|
||||
/*
|
||||
* Note: These macros depend on the fact that PAGE_OFFSET has all
|
||||
* region bits set to 1 and all other bits set to zero. They are
|
||||
* expressed in this way to ensure they result in a single "dep"
|
||||
* instruction.
|
||||
*/
|
||||
#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
|
||||
#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
|
||||
|
||||
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
|
||||
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
|
||||
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
|
||||
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
extern unsigned int hpage_shift;
|
||||
#endif
|
||||
|
||||
static __inline__ int
|
||||
get_order (unsigned long size)
|
||||
{
|
||||
long double d = size - 1;
|
||||
long order;
|
||||
|
||||
order = ia64_getf_exp(d);
|
||||
order = order - PAGE_SHIFT - 0xffff + 1;
|
||||
if (order < 0)
|
||||
order = 0;
|
||||
return order;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef STRICT_MM_TYPECHECKS
|
||||
/*
|
||||
* These are used to make use of C type-checking..
|
||||
*/
|
||||
typedef struct { unsigned long pte; } pte_t;
|
||||
typedef struct { unsigned long pmd; } pmd_t;
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
typedef struct { unsigned long pud; } pud_t;
|
||||
#endif
|
||||
typedef struct { unsigned long pgd; } pgd_t;
|
||||
typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
typedef struct page *pgtable_t;
|
||||
|
||||
# define pte_val(x) ((x).pte)
|
||||
# define pmd_val(x) ((x).pmd)
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
# define pud_val(x) ((x).pud)
|
||||
#endif
|
||||
# define pgd_val(x) ((x).pgd)
|
||||
# define pgprot_val(x) ((x).pgprot)
|
||||
|
||||
# define __pte(x) ((pte_t) { (x) } )
|
||||
# define __pmd(x) ((pmd_t) { (x) } )
|
||||
# define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
||||
#else /* !STRICT_MM_TYPECHECKS */
|
||||
/*
|
||||
* .. while these make it easier on the compiler
|
||||
*/
|
||||
# ifndef __ASSEMBLY__
|
||||
typedef unsigned long pte_t;
|
||||
typedef unsigned long pmd_t;
|
||||
typedef unsigned long pgd_t;
|
||||
typedef unsigned long pgprot_t;
|
||||
typedef struct page *pgtable_t;
|
||||
# endif
|
||||
|
||||
# define pte_val(x) (x)
|
||||
# define pmd_val(x) (x)
|
||||
# define pgd_val(x) (x)
|
||||
# define pgprot_val(x) (x)
|
||||
|
||||
# define __pte(x) (x)
|
||||
# define __pgd(x) (x)
|
||||
# define __pgprot(x) (x)
|
||||
#endif /* !STRICT_MM_TYPECHECKS */
|
||||
|
||||
#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
|
||||
(((current->personality & READ_IMPLIES_EXEC) != 0) \
|
||||
? VM_EXEC : 0))
|
||||
|
||||
#define GATE_ADDR RGN_BASE(RGN_GATE)
|
||||
|
||||
/*
|
||||
* 0xa000000000000000+2*PERCPU_PAGE_SIZE
|
||||
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
|
||||
*/
|
||||
#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
|
||||
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
|
||||
#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
|
||||
|
||||
#define __HAVE_ARCH_GATE_AREA 1
|
||||
|
||||
#endif /* _ASM_IA64_PAGE_H */
|
1825
arch/ia64/include/asm/pal.h
Normal file
1825
arch/ia64/include/asm/pal.h
Normal file
File diff suppressed because it is too large
Load diff
17
arch/ia64/include/asm/param.h
Normal file
17
arch/ia64/include/asm/param.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Fundamental kernel parameters.
|
||||
*
|
||||
* Based on <asm-i386/param.h>.
|
||||
*
|
||||
* Modified 1998, 1999, 2002-2003
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
||||
*/
|
||||
#ifndef _ASM_IA64_PARAM_H
|
||||
#define _ASM_IA64_PARAM_H
|
||||
|
||||
#include <uapi/asm/param.h>
|
||||
|
||||
# define HZ CONFIG_HZ
|
||||
# define USER_HZ HZ
|
||||
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
|
||||
#endif /* _ASM_IA64_PARAM_H */
|
321
arch/ia64/include/asm/paravirt.h
Normal file
321
arch/ia64/include/asm/paravirt.h
Normal file
|
@ -0,0 +1,321 @@
|
|||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __ASM_PARAVIRT_H
|
||||
#define __ASM_PARAVIRT_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/******************************************************************************
|
||||
* fsys related addresses
|
||||
*/
|
||||
struct pv_fsys_data {
|
||||
unsigned long *fsyscall_table;
|
||||
void *fsys_bubble_down;
|
||||
};
|
||||
|
||||
extern struct pv_fsys_data pv_fsys_data;
|
||||
|
||||
unsigned long *paravirt_get_fsyscall_table(void);
|
||||
char *paravirt_get_fsys_bubble_down(void);
|
||||
|
||||
/******************************************************************************
|
||||
* patchlist addresses for gate page
|
||||
*/
|
||||
enum pv_gate_patchlist {
|
||||
PV_GATE_START_FSYSCALL,
|
||||
PV_GATE_END_FSYSCALL,
|
||||
|
||||
PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
|
||||
PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
|
||||
|
||||
PV_GATE_START_VTOP,
|
||||
PV_GATE_END_VTOP,
|
||||
|
||||
PV_GATE_START_MCKINLEY_E9,
|
||||
PV_GATE_END_MCKINLEY_E9,
|
||||
};
|
||||
|
||||
struct pv_patchdata {
|
||||
unsigned long start_fsyscall_patchlist;
|
||||
unsigned long end_fsyscall_patchlist;
|
||||
unsigned long start_brl_fsys_bubble_down_patchlist;
|
||||
unsigned long end_brl_fsys_bubble_down_patchlist;
|
||||
unsigned long start_vtop_patchlist;
|
||||
unsigned long end_vtop_patchlist;
|
||||
unsigned long start_mckinley_e9_patchlist;
|
||||
unsigned long end_mckinley_e9_patchlist;
|
||||
|
||||
void *gate_section;
|
||||
};
|
||||
|
||||
extern struct pv_patchdata pv_patchdata;
|
||||
|
||||
unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
|
||||
void *paravirt_get_gate_section(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
|
||||
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/meminit.h>
|
||||
|
||||
/******************************************************************************
|
||||
* general info
|
||||
*/
|
||||
struct pv_info {
|
||||
unsigned int kernel_rpl;
|
||||
int paravirt_enabled;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
extern struct pv_info pv_info;
|
||||
|
||||
static inline int paravirt_enabled(void)
|
||||
{
|
||||
return pv_info.paravirt_enabled;
|
||||
}
|
||||
|
||||
static inline unsigned int get_kernel_rpl(void)
|
||||
{
|
||||
return pv_info.kernel_rpl;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* initialization hooks.
|
||||
*/
|
||||
struct rsvd_region;
|
||||
|
||||
struct pv_init_ops {
|
||||
void (*banner)(void);
|
||||
|
||||
int (*reserve_memory)(struct rsvd_region *region);
|
||||
|
||||
void (*arch_setup_early)(void);
|
||||
void (*arch_setup_console)(char **cmdline_p);
|
||||
int (*arch_setup_nomca)(void);
|
||||
|
||||
void (*post_smp_prepare_boot_cpu)(void);
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
|
||||
unsigned long type);
|
||||
unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
|
||||
unsigned long type);
|
||||
#endif
|
||||
void (*patch_branch)(unsigned long tag, unsigned long type);
|
||||
};
|
||||
|
||||
extern struct pv_init_ops pv_init_ops;
|
||||
|
||||
static inline void paravirt_banner(void)
|
||||
{
|
||||
if (pv_init_ops.banner)
|
||||
pv_init_ops.banner();
|
||||
}
|
||||
|
||||
static inline int paravirt_reserve_memory(struct rsvd_region *region)
|
||||
{
|
||||
if (pv_init_ops.reserve_memory)
|
||||
return pv_init_ops.reserve_memory(region);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void paravirt_arch_setup_early(void)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_early)
|
||||
pv_init_ops.arch_setup_early();
|
||||
}
|
||||
|
||||
static inline void paravirt_arch_setup_console(char **cmdline_p)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_console)
|
||||
pv_init_ops.arch_setup_console(cmdline_p);
|
||||
}
|
||||
|
||||
static inline int paravirt_arch_setup_nomca(void)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_nomca)
|
||||
return pv_init_ops.arch_setup_nomca();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void paravirt_post_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
if (pv_init_ops.post_smp_prepare_boot_cpu)
|
||||
pv_init_ops.post_smp_prepare_boot_cpu();
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of iosapic operations.
|
||||
*/
|
||||
|
||||
struct pv_iosapic_ops {
|
||||
void (*pcat_compat_init)(void);
|
||||
|
||||
struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
|
||||
|
||||
unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
|
||||
void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
|
||||
};
|
||||
|
||||
extern struct pv_iosapic_ops pv_iosapic_ops;
|
||||
|
||||
static inline void
|
||||
iosapic_pcat_compat_init(void)
|
||||
{
|
||||
if (pv_iosapic_ops.pcat_compat_init)
|
||||
pv_iosapic_ops.pcat_compat_init();
|
||||
}
|
||||
|
||||
static inline struct irq_chip*
|
||||
iosapic_get_irq_chip(unsigned long trigger)
|
||||
{
|
||||
return pv_iosapic_ops.__get_irq_chip(trigger);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
__iosapic_read(char __iomem *iosapic, unsigned int reg)
|
||||
{
|
||||
return pv_iosapic_ops.__read(iosapic, reg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
|
||||
{
|
||||
return pv_iosapic_ops.__write(iosapic, reg, val);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of irq operations.
|
||||
*/
|
||||
|
||||
struct pv_irq_ops {
|
||||
void (*register_ipi)(void);
|
||||
|
||||
int (*assign_irq_vector)(int irq);
|
||||
void (*free_irq_vector)(int vector);
|
||||
|
||||
void (*register_percpu_irq)(ia64_vector vec,
|
||||
struct irqaction *action);
|
||||
|
||||
void (*resend_irq)(unsigned int vector);
|
||||
};
|
||||
|
||||
extern struct pv_irq_ops pv_irq_ops;
|
||||
|
||||
static inline void
|
||||
ia64_register_ipi(void)
|
||||
{
|
||||
pv_irq_ops.register_ipi();
|
||||
}
|
||||
|
||||
static inline int
|
||||
assign_irq_vector(int irq)
|
||||
{
|
||||
return pv_irq_ops.assign_irq_vector(irq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
free_irq_vector(int vector)
|
||||
{
|
||||
return pv_irq_ops.free_irq_vector(vector);
|
||||
}
|
||||
|
||||
static inline void
|
||||
register_percpu_irq(ia64_vector vec, struct irqaction *action)
|
||||
{
|
||||
pv_irq_ops.register_percpu_irq(vec, action);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_resend_irq(unsigned int vector)
|
||||
{
|
||||
pv_irq_ops.resend_irq(vector);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of time operations.
|
||||
*/
|
||||
|
||||
extern struct itc_jitter_data_t itc_jitter_data;
|
||||
extern volatile int time_keeper_id;
|
||||
|
||||
struct pv_time_ops {
|
||||
void (*init_missing_ticks_accounting)(int cpu);
|
||||
int (*do_steal_accounting)(unsigned long *new_itm);
|
||||
|
||||
void (*clocksource_resume)(void);
|
||||
|
||||
unsigned long long (*sched_clock)(void);
|
||||
};
|
||||
|
||||
extern struct pv_time_ops pv_time_ops;
|
||||
|
||||
static inline void
|
||||
paravirt_init_missing_ticks_accounting(int cpu)
|
||||
{
|
||||
if (pv_time_ops.init_missing_ticks_accounting)
|
||||
pv_time_ops.init_missing_ticks_accounting(cpu);
|
||||
}
|
||||
|
||||
struct static_key;
|
||||
extern struct static_key paravirt_steal_enabled;
|
||||
extern struct static_key paravirt_steal_rq_enabled;
|
||||
|
||||
static inline int
|
||||
paravirt_do_steal_accounting(unsigned long *new_itm)
|
||||
{
|
||||
return pv_time_ops.do_steal_accounting(new_itm);
|
||||
}
|
||||
|
||||
static inline unsigned long long paravirt_sched_clock(void)
|
||||
{
|
||||
return pv_time_ops.sched_clock();
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#else
|
||||
/* fallback for native case */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define paravirt_banner() do { } while (0)
|
||||
#define paravirt_reserve_memory(region) 0
|
||||
|
||||
#define paravirt_arch_setup_early() do { } while (0)
|
||||
#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
|
||||
#define paravirt_arch_setup_nomca() 0
|
||||
#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
|
||||
|
||||
#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
|
||||
#define paravirt_do_steal_accounting(new_itm) 0
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_GUEST */
|
||||
|
||||
#endif /* __ASM_PARAVIRT_H */
|
143
arch/ia64/include/asm/paravirt_patch.h
Normal file
143
arch/ia64/include/asm/paravirt_patch.h
Normal file
|
@ -0,0 +1,143 @@
|
|||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASM_PARAVIRT_PATCH_H
|
||||
#define __ASM_PARAVIRT_PATCH_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.section .paravirt_branches, "a"
|
||||
.previous
|
||||
#define PARAVIRT_PATCH_SITE_BR(type) \
|
||||
{ \
|
||||
[1:] ; \
|
||||
br.cond.sptk.many 2f ; \
|
||||
nop.b 0 ; \
|
||||
nop.b 0;; ; \
|
||||
} ; \
|
||||
2: \
|
||||
.xdata8 ".paravirt_branches", 1b, type
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/intrinsics.h>
|
||||
|
||||
/* for binary patch */
|
||||
struct paravirt_patch_site_bundle {
|
||||
void *sbundle;
|
||||
void *ebundle;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
/* label means the beginning of new bundle */
|
||||
#define paravirt_alt_bundle(instr, privop) \
|
||||
"\t998:\n" \
|
||||
"\t" instr "\n" \
|
||||
"\t999:\n" \
|
||||
"\t.pushsection .paravirt_bundles, \"a\"\n" \
|
||||
"\t.popsection\n" \
|
||||
"\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
|
||||
__stringify(privop) "\n"
|
||||
|
||||
|
||||
struct paravirt_patch_bundle_elem {
|
||||
const void *sbundle;
|
||||
const void *ebundle;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
|
||||
struct paravirt_patch_site_inst {
|
||||
unsigned long stag;
|
||||
unsigned long etag;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
#define paravirt_alt_inst(instr, privop) \
|
||||
"\t[998:]\n" \
|
||||
"\t" instr "\n" \
|
||||
"\t[999:]\n" \
|
||||
"\t.pushsection .paravirt_insts, \"a\"\n" \
|
||||
"\t.popsection\n" \
|
||||
"\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
|
||||
__stringify(privop) "\n"
|
||||
|
||||
struct paravirt_patch_site_branch {
|
||||
unsigned long tag;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
struct paravirt_patch_branch_target {
|
||||
const void *entry;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
void
|
||||
__paravirt_patch_apply_branch(
|
||||
unsigned long tag, unsigned long type,
|
||||
const struct paravirt_patch_branch_target *entries,
|
||||
unsigned int nr_entries);
|
||||
|
||||
void
|
||||
paravirt_patch_reloc_br(unsigned long tag, const void *target);
|
||||
|
||||
void
|
||||
paravirt_patch_reloc_brl(unsigned long tag, const void *target);
|
||||
|
||||
|
||||
#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
|
||||
unsigned long
|
||||
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
|
||||
|
||||
unsigned long
|
||||
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
|
||||
const struct paravirt_patch_bundle_elem *elems,
|
||||
unsigned long nelems,
|
||||
const struct paravirt_patch_bundle_elem **found);
|
||||
|
||||
void
|
||||
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
|
||||
const struct paravirt_patch_site_bundle *end);
|
||||
|
||||
void
|
||||
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
|
||||
const struct paravirt_patch_site_inst *end);
|
||||
|
||||
void paravirt_patch_apply(void);
|
||||
#else
|
||||
#define paravirt_patch_apply_bundle(start, end) do { } while (0)
|
||||
#define paravirt_patch_apply_inst(start, end) do { } while (0)
|
||||
#define paravirt_patch_apply() do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLEY__ */
|
||||
|
||||
#endif /* __ASM_PARAVIRT_PATCH_H */
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* mode: C
|
||||
* c-set-style: "linux"
|
||||
* c-basic-offset: 8
|
||||
* tab-width: 8
|
||||
* indent-tabs-mode: t
|
||||
* End:
|
||||
*/
|
479
arch/ia64/include/asm/paravirt_privop.h
Normal file
479
arch/ia64/include/asm/paravirt_privop.h
Normal file
|
@ -0,0 +1,479 @@
|
|||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
|
||||
#define _ASM_IA64_PARAVIRT_PRIVOP_H
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/kregs.h> /* for IA64_PSR_I */
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of intrinsics operations.
|
||||
*/
|
||||
|
||||
struct pv_cpu_ops {
|
||||
void (*fc)(void *addr);
|
||||
unsigned long (*thash)(unsigned long addr);
|
||||
unsigned long (*get_cpuid)(int index);
|
||||
unsigned long (*get_pmd)(int index);
|
||||
unsigned long (*getreg)(int reg);
|
||||
void (*setreg)(int reg, unsigned long val);
|
||||
void (*ptcga)(unsigned long addr, unsigned long size);
|
||||
unsigned long (*get_rr)(unsigned long index);
|
||||
void (*set_rr)(unsigned long index, unsigned long val);
|
||||
void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
|
||||
unsigned long val2, unsigned long val3,
|
||||
unsigned long val4);
|
||||
void (*ssm_i)(void);
|
||||
void (*rsm_i)(void);
|
||||
unsigned long (*get_psr_i)(void);
|
||||
void (*intrin_local_irq_restore)(unsigned long flags);
|
||||
};
|
||||
|
||||
extern struct pv_cpu_ops pv_cpu_ops;
|
||||
|
||||
extern void ia64_native_setreg_func(int regnum, unsigned long val);
|
||||
extern unsigned long ia64_native_getreg_func(int regnum);
|
||||
|
||||
/************************************************/
|
||||
/* Instructions paravirtualized for performance */
|
||||
/************************************************/
|
||||
|
||||
#ifndef ASM_SUPPORTED
|
||||
#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
|
||||
#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
|
||||
#define __paravirt_getreg() pv_cpu_ops.getreg()
|
||||
#endif
|
||||
|
||||
/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
|
||||
* static inline function doesn't satisfy it. */
|
||||
#define paravirt_ssm(mask) \
|
||||
do { \
|
||||
if ((mask) == IA64_PSR_I) \
|
||||
paravirt_ssm_i(); \
|
||||
else \
|
||||
ia64_native_ssm(mask); \
|
||||
} while (0)
|
||||
|
||||
#define paravirt_rsm(mask) \
|
||||
do { \
|
||||
if ((mask) == IA64_PSR_I) \
|
||||
paravirt_rsm_i(); \
|
||||
else \
|
||||
ia64_native_rsm(mask); \
|
||||
} while (0)
|
||||
|
||||
/* returned ip value should be the one in the caller,
|
||||
* not in __paravirt_getreg() */
|
||||
#define paravirt_getreg(reg) \
|
||||
({ \
|
||||
unsigned long res; \
|
||||
if ((reg) == _IA64_REG_IP) \
|
||||
res = ia64_native_getreg(_IA64_REG_IP); \
|
||||
else \
|
||||
res = __paravirt_getreg(reg); \
|
||||
res; \
|
||||
})
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of hand written assembly codes.
|
||||
*/
|
||||
struct pv_cpu_asm_switch {
|
||||
unsigned long switch_to;
|
||||
unsigned long leave_syscall;
|
||||
unsigned long work_processed_syscall;
|
||||
unsigned long leave_kernel;
|
||||
};
|
||||
void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
|
||||
|
||||
#else
|
||||
|
||||
/* fallback for native case */
|
||||
#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
|
||||
#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
|
||||
#else
|
||||
#define paravirt_dv_serialize_data() /* nothing */
|
||||
#endif
|
||||
|
||||
/* these routines utilize privilege-sensitive or performance-sensitive
|
||||
* privileged instructions so the code must be replaced with
|
||||
* paravirtualized versions */
|
||||
#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
|
||||
#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
|
||||
#define ia64_work_processed_syscall \
|
||||
IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
|
||||
#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
|
||||
|
||||
|
||||
#if defined(CONFIG_PARAVIRT)
|
||||
/******************************************************************************
|
||||
* binary patching infrastructure
|
||||
*/
|
||||
#define PARAVIRT_PATCH_TYPE_FC 1
|
||||
#define PARAVIRT_PATCH_TYPE_THASH 2
|
||||
#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
|
||||
#define PARAVIRT_PATCH_TYPE_GET_PMD 4
|
||||
#define PARAVIRT_PATCH_TYPE_PTCGA 5
|
||||
#define PARAVIRT_PATCH_TYPE_GET_RR 6
|
||||
#define PARAVIRT_PATCH_TYPE_SET_RR 7
|
||||
#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
|
||||
#define PARAVIRT_PATCH_TYPE_SSM_I 9
|
||||
#define PARAVIRT_PATCH_TYPE_RSM_I 10
|
||||
#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
|
||||
#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
|
||||
|
||||
/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
|
||||
#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
|
||||
#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
|
||||
|
||||
/*
|
||||
* struct task_struct* (*ia64_switch_to)(void* next_task);
|
||||
* void *ia64_leave_syscall;
|
||||
* void *ia64_work_processed_syscall
|
||||
* void *ia64_leave_kernel;
|
||||
*/
|
||||
|
||||
#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
|
||||
#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 0)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 1)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 2)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 3)
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
#include <asm/paravirt_patch.h>
|
||||
|
||||
/*
|
||||
* pv_cpu_ops calling stub.
|
||||
* normal function call convension can't be written by gcc
|
||||
* inline assembly.
|
||||
*
|
||||
* from the caller's point of view,
|
||||
* the following registers will be clobbered.
|
||||
* r2, r3
|
||||
* r8-r15
|
||||
* r16, r17
|
||||
* b6, b7
|
||||
* p6-p15
|
||||
* ar.ccv
|
||||
*
|
||||
* from the callee's point of view ,
|
||||
* the following registers can be used.
|
||||
* r2, r3: scratch
|
||||
* r8: scratch, input argument0 and return value
|
||||
* r0-r15: scratch, input argument1-5
|
||||
* b6: return pointer
|
||||
* b7: scratch
|
||||
* p6-p15: scratch
|
||||
* ar.ccv: scratch
|
||||
*
|
||||
* other registers must not be changed. especially
|
||||
* b0: rp: preserved. gcc ignores b0 in clobbered register.
|
||||
* r16: saved gp
|
||||
*/
|
||||
/* 5 bundles */
|
||||
#define __PARAVIRT_BR \
|
||||
";;\n" \
|
||||
"{ .mlx\n" \
|
||||
"nop 0\n" \
|
||||
"movl r2 = %[op_addr]\n"/* get function pointer address */ \
|
||||
";;\n" \
|
||||
"}\n" \
|
||||
"1:\n" \
|
||||
"{ .mii\n" \
|
||||
"ld8 r2 = [r2]\n" /* load function descriptor address */ \
|
||||
"mov r17 = ip\n" /* get ip to calc return address */ \
|
||||
"mov r16 = gp\n" /* save gp */ \
|
||||
";;\n" \
|
||||
"}\n" \
|
||||
"{ .mii\n" \
|
||||
"ld8 r3 = [r2], 8\n" /* load entry address */ \
|
||||
"adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
|
||||
";;\n" \
|
||||
"mov b7 = r3\n" /* set entry address */ \
|
||||
"}\n" \
|
||||
"{ .mib\n" \
|
||||
"ld8 gp = [r2]\n" /* load gp value */ \
|
||||
"mov b6 = r17\n" /* set return address */ \
|
||||
"br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
|
||||
"}\n" \
|
||||
"1:\n" \
|
||||
"{ .mii\n" \
|
||||
"mov gp = r16\n" /* restore gp value */ \
|
||||
"nop 0\n" \
|
||||
"nop 0\n" \
|
||||
";;\n" \
|
||||
"}\n"
|
||||
|
||||
#define PARAVIRT_OP(op) \
|
||||
[op_addr] "i"(&pv_cpu_ops.op)
|
||||
|
||||
#define PARAVIRT_TYPE(type) \
|
||||
PARAVIRT_PATCH_TYPE_ ## type
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS0 \
|
||||
"r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS1 \
|
||||
"r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS2 \
|
||||
"r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS5 \
|
||||
"r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_BR_CLOBBERS \
|
||||
"b6", "b7"
|
||||
|
||||
#define PARAVIRT_PR_CLOBBERS \
|
||||
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
|
||||
|
||||
#define PARAVIRT_AR_CLOBBERS \
|
||||
"ar.ccv"
|
||||
|
||||
#define PARAVIRT_CLOBBERS0 \
|
||||
PARAVIRT_REG_CLOBBERS0, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS1 \
|
||||
PARAVIRT_REG_CLOBBERS1, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS2 \
|
||||
PARAVIRT_REG_CLOBBERS2, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS5 \
|
||||
PARAVIRT_REG_CLOBBERS5, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_BR0(op, type) \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op) \
|
||||
: PARAVIRT_CLOBBERS0)
|
||||
|
||||
#define PARAVIRT_BR0_RET(op, type) \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(op) \
|
||||
: PARAVIRT_CLOBBERS0)
|
||||
|
||||
#define PARAVIRT_BR1(op, type, arg1) \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR1_RET(op, type, arg1) \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR1_VOID(op, type, arg1) \
|
||||
register void *__##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR2(op, type, arg1, arg2) \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long __##arg2 asm ("r9") = arg2; \
|
||||
register unsigned long ia64_clobber1 asm ("r8"); \
|
||||
register unsigned long ia64_clobber2 asm ("r9"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
|
||||
: PARAVIRT_CLOBBERS2)
|
||||
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (void) \
|
||||
{ \
|
||||
PARAVIRT_BR0(op, type); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
|
||||
static inline unsigned long \
|
||||
paravirt_ ## op (void) \
|
||||
{ \
|
||||
PARAVIRT_BR0_RET(op, type); \
|
||||
return ia64_intri_res; \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (void *arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1_VOID(op, type, arg1); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (unsigned long arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1(op, type, arg1); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
|
||||
static inline unsigned long \
|
||||
paravirt_ ## op (unsigned long arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1_RET(op, type, arg1); \
|
||||
return ia64_intri_res; \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (unsigned long arg1, \
|
||||
unsigned long arg2) \
|
||||
{ \
|
||||
PARAVIRT_BR2(op, type, arg1, arg2); \
|
||||
}
|
||||
|
||||
|
||||
PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
|
||||
PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
|
||||
PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
|
||||
PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
|
||||
PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
|
||||
PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
|
||||
PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
|
||||
|
||||
static inline void
|
||||
paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
|
||||
unsigned long val2, unsigned long val3,
|
||||
unsigned long val4)
|
||||
{
|
||||
register unsigned long __val0 asm ("r8") = val0;
|
||||
register unsigned long __val1 asm ("r9") = val1;
|
||||
register unsigned long __val2 asm ("r10") = val2;
|
||||
register unsigned long __val3 asm ("r11") = val3;
|
||||
register unsigned long __val4 asm ("r14") = val4;
|
||||
|
||||
register unsigned long ia64_clobber0 asm ("r8");
|
||||
register unsigned long ia64_clobber1 asm ("r9");
|
||||
register unsigned long ia64_clobber2 asm ("r10");
|
||||
register unsigned long ia64_clobber3 asm ("r11");
|
||||
register unsigned long ia64_clobber4 asm ("r14");
|
||||
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
|
||||
PARAVIRT_TYPE(SET_RR0_TO_RR4))
|
||||
: "=r"(ia64_clobber0),
|
||||
"=r"(ia64_clobber1),
|
||||
"=r"(ia64_clobber2),
|
||||
"=r"(ia64_clobber3),
|
||||
"=r"(ia64_clobber4)
|
||||
: PARAVIRT_OP(set_rr0_to_rr4),
|
||||
"0"(__val0), "1"(__val1), "2"(__val2),
|
||||
"3"(__val3), "4"(__val4)
|
||||
: PARAVIRT_CLOBBERS5);
|
||||
}
|
||||
|
||||
/* unsigned long paravirt_getreg(int reg) */
|
||||
#define __paravirt_getreg(reg) \
|
||||
({ \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
register unsigned long __reg asm ("r8") = (reg); \
|
||||
\
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(GETREG) \
|
||||
+ (reg)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(getreg), "0"(__reg) \
|
||||
: PARAVIRT_CLOBBERS1); \
|
||||
\
|
||||
ia64_intri_res; \
|
||||
})
|
||||
|
||||
/* void paravirt_setreg(int reg, unsigned long val) */
|
||||
#define paravirt_setreg(reg, val) \
|
||||
do { \
|
||||
register unsigned long __val asm ("r8") = val; \
|
||||
register unsigned long __reg asm ("r9") = reg; \
|
||||
register unsigned long ia64_clobber1 asm ("r8"); \
|
||||
register unsigned long ia64_clobber2 asm ("r9"); \
|
||||
\
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(SETREG) \
|
||||
+ (reg)) \
|
||||
: "=r"(ia64_clobber1), \
|
||||
"=r"(ia64_clobber2) \
|
||||
: PARAVIRT_OP(setreg), \
|
||||
"1"(__reg), "0"(__val) \
|
||||
: PARAVIRT_CLOBBERS2); \
|
||||
} while (0)
|
||||
|
||||
#endif /* ASM_SUPPORTED */
|
||||
#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
|
||||
|
||||
#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
|
19
arch/ia64/include/asm/parport.h
Normal file
19
arch/ia64/include/asm/parport.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* parport.h: platform-specific PC-style parport initialisation
|
||||
*
|
||||
* Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
|
||||
*
|
||||
* This file should only be included by drivers/parport/parport_pc.c.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_PARPORT_H
|
||||
#define _ASM_IA64_PARPORT_H 1
|
||||
|
||||
static int parport_pc_find_isa_ports(int autoirq, int autodma);
|
||||
|
||||
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
|
||||
{
|
||||
return parport_pc_find_isa_ports(autoirq, autodma);
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_PARPORT_H */
|
27
arch/ia64/include/asm/patch.h
Normal file
27
arch/ia64/include/asm/patch.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef _ASM_IA64_PATCH_H
|
||||
#define _ASM_IA64_PATCH_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*
|
||||
* There are a number of reasons for patching instructions. Rather than duplicating code
|
||||
* all over the place, we put the common stuff here. Reasons for patching: in-kernel
|
||||
* module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
|
||||
* shared library. Undoubtedly, some of these reasons will disappear and others will
|
||||
* be added over time.
|
||||
*/
|
||||
#include <linux/elf.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
|
||||
extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
|
||||
extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
|
||||
|
||||
extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
|
||||
extern void ia64_patch_vtop (unsigned long start, unsigned long end);
|
||||
extern void ia64_patch_phys_stack_reg(unsigned long val);
|
||||
extern void ia64_patch_rse (unsigned long start, unsigned long end);
|
||||
extern void ia64_patch_gate (void);
|
||||
|
||||
#endif /* _ASM_IA64_PATCH_H */
|
133
arch/ia64/include/asm/pci.h
Normal file
133
arch/ia64/include/asm/pci.h
Normal file
|
@ -0,0 +1,133 @@
|
|||
#ifndef _ASM_IA64_PCI_H
|
||||
#define _ASM_IA64_PCI_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
struct pci_vector_struct {
|
||||
__u16 segment; /* PCI Segment number */
|
||||
__u16 bus; /* PCI Bus number */
|
||||
__u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
|
||||
__u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
|
||||
__u32 irq; /* IRQ assigned */
|
||||
};
|
||||
|
||||
/*
|
||||
* Can be used to override the logic in pci_scan_bus for skipping already-configured bus
|
||||
* numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
|
||||
* loader.
|
||||
*/
|
||||
#define pcibios_assign_all_busses() 0
|
||||
|
||||
#define PCIBIOS_MIN_IO 0x1000
|
||||
#define PCIBIOS_MIN_MEM 0x10000000
|
||||
|
||||
void pcibios_config_init(void);
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
/*
|
||||
* PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
|
||||
* correspondence between device bus addresses and CPU physical addresses.
|
||||
* Platforms with a hardware I/O MMU _must_ turn this off to suppress the
|
||||
* bounce buffer handling code in the block and network device layers.
|
||||
* Platforms with separate bus address spaces _must_ turn this off and provide
|
||||
* a device DMA mapping implementation that takes care of the necessary
|
||||
* address translation.
|
||||
*
|
||||
* For now, the ia64 platforms which may have separate/multiple bus address
|
||||
* spaces all have I/O MMUs which support the merging of physically
|
||||
* discontiguous buffers, so we can use that as the sole factor to determine
|
||||
* the setting of PCI_DMA_BUS_IS_PHYS.
|
||||
*/
|
||||
extern unsigned long ia64_max_iommu_merge_mask;
|
||||
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
|
||||
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
enum pci_dma_burst_strategy *strat,
|
||||
unsigned long *strategy_parameter)
|
||||
{
|
||||
unsigned long cacheline_size;
|
||||
u8 byte;
|
||||
|
||||
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
|
||||
if (byte == 0)
|
||||
cacheline_size = 1024;
|
||||
else
|
||||
cacheline_size = (int) byte * 4;
|
||||
|
||||
*strat = PCI_DMA_BURST_MULTIPLE;
|
||||
*strategy_parameter = cacheline_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define HAVE_PCI_MMAP
|
||||
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine);
|
||||
#define HAVE_PCI_LEGACY
|
||||
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
|
||||
struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state);
|
||||
|
||||
#define pci_get_legacy_mem platform_pci_get_legacy_mem
|
||||
#define pci_legacy_read platform_pci_legacy_read
|
||||
#define pci_legacy_write platform_pci_legacy_write
|
||||
|
||||
struct iospace_resource {
|
||||
struct list_head list;
|
||||
struct resource res;
|
||||
};
|
||||
|
||||
struct pci_controller {
|
||||
struct acpi_device *companion;
|
||||
void *iommu;
|
||||
int segment;
|
||||
int node; /* nearest node with memory or NUMA_NO_NODE for global allocation */
|
||||
|
||||
void *platform_data;
|
||||
};
|
||||
|
||||
|
||||
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
|
||||
#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
|
||||
|
||||
extern struct pci_ops pci_root_ops;
|
||||
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
return (pci_domain_nr(bus) != 0);
|
||||
}
|
||||
|
||||
static inline struct resource *
|
||||
pcibios_select_root(struct pci_dev *pdev, struct resource *res)
|
||||
{
|
||||
struct resource *root = NULL;
|
||||
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
root = &ioport_resource;
|
||||
if (res->flags & IORESOURCE_MEM)
|
||||
root = &iomem_resource;
|
||||
|
||||
return root;
|
||||
}
|
||||
|
||||
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern void pci_iommu_alloc(void);
|
||||
#endif
|
||||
#endif /* _ASM_IA64_PCI_H */
|
54
arch/ia64/include/asm/percpu.h
Normal file
54
arch/ia64/include/asm/percpu.h
Normal file
|
@ -0,0 +1,54 @@
|
|||
#ifndef _ASM_IA64_PERCPU_H
|
||||
#define _ASM_IA64_PERCPU_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2002-2003 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
|
||||
#include <linux/threads.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
|
||||
# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
|
||||
#endif
|
||||
|
||||
#define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset)
|
||||
|
||||
extern void *per_cpu_init(void);
|
||||
|
||||
#else /* ! SMP */
|
||||
|
||||
#define per_cpu_init() (__phys_per_cpu_start)
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#define PER_CPU_BASE_SECTION ".data..percpu"
|
||||
|
||||
/*
|
||||
* Be extremely careful when taking the address of this variable! Due to virtual
|
||||
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
|
||||
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
|
||||
* more efficient.
|
||||
*/
|
||||
#define __ia64_per_cpu_var(var) (*({ \
|
||||
__verify_pcpu_ptr(&(var)); \
|
||||
((typeof(var) __kernel __force *)&(var)); \
|
||||
}))
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
|
||||
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_IA64_PERCPU_H */
|
110
arch/ia64/include/asm/perfmon.h
Normal file
110
arch/ia64/include/asm/perfmon.h
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (C) 2001-2003 Hewlett-Packard Co
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
*/
|
||||
#ifndef _ASM_IA64_PERFMON_H
|
||||
#define _ASM_IA64_PERFMON_H
|
||||
|
||||
#include <uapi/asm/perfmon.h>
|
||||
|
||||
|
||||
extern long perfmonctl(int fd, int cmd, void *arg, int narg);
|
||||
|
||||
typedef struct {
|
||||
void (*handler)(int irq, void *arg, struct pt_regs *regs);
|
||||
} pfm_intr_handler_desc_t;
|
||||
|
||||
extern void pfm_save_regs (struct task_struct *);
|
||||
extern void pfm_load_regs (struct task_struct *);
|
||||
|
||||
extern void pfm_exit_thread(struct task_struct *);
|
||||
extern int pfm_use_debug_registers(struct task_struct *);
|
||||
extern int pfm_release_debug_registers(struct task_struct *);
|
||||
extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
|
||||
extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
|
||||
extern void pfm_init_percpu(void);
|
||||
extern void pfm_handle_work(void);
|
||||
extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
|
||||
extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Reset PMD register flags
|
||||
*/
|
||||
#define PFM_PMD_SHORT_RESET 0
|
||||
#define PFM_PMD_LONG_RESET 1
|
||||
|
||||
typedef union {
|
||||
unsigned int val;
|
||||
struct {
|
||||
unsigned int notify_user:1; /* notify user program of overflow */
|
||||
unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
|
||||
unsigned int block_task:1; /* block monitored task on kernel exit */
|
||||
unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
|
||||
unsigned int reserved:28; /* for future use */
|
||||
} bits;
|
||||
} pfm_ovfl_ctrl_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned char ovfl_pmd; /* index of overflowed PMD */
|
||||
unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
|
||||
unsigned short active_set; /* event set active at the time of the overflow */
|
||||
pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
|
||||
|
||||
unsigned long pmd_last_reset; /* last reset value of of the PMD */
|
||||
unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
|
||||
unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
|
||||
unsigned long pmd_value; /* current 64-bit value of the PMD */
|
||||
unsigned long pmd_eventid; /* eventid associated with PMD */
|
||||
} pfm_ovfl_arg_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
char *fmt_name;
|
||||
pfm_uuid_t fmt_uuid;
|
||||
size_t fmt_arg_size;
|
||||
unsigned long fmt_flags;
|
||||
|
||||
int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
|
||||
int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
|
||||
int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
|
||||
int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
|
||||
int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
|
||||
int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
|
||||
int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
|
||||
|
||||
struct list_head fmt_list;
|
||||
} pfm_buffer_fmt_t;
|
||||
|
||||
extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
|
||||
extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
|
||||
|
||||
/*
|
||||
* perfmon interface exported to modules
|
||||
*/
|
||||
extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
|
||||
extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
|
||||
extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
|
||||
extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* describe the content of the local_cpu_date->pfm_syst_info field
|
||||
*/
|
||||
#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
|
||||
#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
|
||||
#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
|
||||
|
||||
/*
|
||||
* sysctl control structure. visible to sampling formats
|
||||
*/
|
||||
typedef struct {
|
||||
int debug; /* turn on/off debugging via syslog */
|
||||
int debug_ovfl; /* turn on/off debug printk in overflow handler */
|
||||
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
|
||||
int expert_mode; /* turn on/off value checking */
|
||||
} pfm_sysctl_t;
|
||||
extern pfm_sysctl_t pfm_sysctl;
|
||||
|
||||
|
||||
#endif /* _ASM_IA64_PERFMON_H */
|
125
arch/ia64/include/asm/pgalloc.h
Normal file
125
arch/ia64/include/asm/pgalloc.h
Normal file
|
@ -0,0 +1,125 @@
|
|||
#ifndef _ASM_IA64_PGALLOC_H
|
||||
#define _ASM_IA64_PGALLOC_H
|
||||
|
||||
/*
|
||||
* This file contains the functions and defines necessary to allocate
|
||||
* page tables.
|
||||
*
|
||||
* This hopefully works with any (fixed) ia-64 page-size, as defined
|
||||
* in <asm/page.h> (currently 8192).
|
||||
*
|
||||
* Copyright (C) 1998-2001 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/quicklist.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
quicklist_free(0, NULL, pgd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
static inline void
|
||||
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
|
||||
{
|
||||
pgd_val(*pgd_entry) = __pa(pud);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
{
|
||||
quicklist_free(0, NULL, pud);
|
||||
}
|
||||
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
|
||||
#endif /* CONFIG_PGTABLE_4 */
|
||||
|
||||
static inline void
|
||||
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
|
||||
{
|
||||
pud_val(*pud_entry) = __pa(pmd);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
quicklist_free(0, NULL, pmd);
|
||||
}
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
|
||||
|
||||
static inline void
|
||||
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
|
||||
{
|
||||
pmd_val(*pmd_entry) = page_to_phys(pte);
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
|
||||
{
|
||||
pmd_val(*pmd_entry) = __pa(pte);
|
||||
}
|
||||
|
||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct page *page;
|
||||
void *pg;
|
||||
|
||||
pg = quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
if (!pg)
|
||||
return NULL;
|
||||
page = virt_to_page(pg);
|
||||
if (!pgtable_page_ctor(page)) {
|
||||
quicklist_free(0, NULL, pg);
|
||||
return NULL;
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
unsigned long addr)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
||||
{
|
||||
pgtable_page_dtor(pte);
|
||||
quicklist_free_page(0, NULL, pte);
|
||||
}
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
quicklist_free(0, NULL, pte);
|
||||
}
|
||||
|
||||
static inline void check_pgt_cache(void)
|
||||
{
|
||||
quicklist_trim(0, NULL, 25, 16);
|
||||
}
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
|
||||
|
||||
#endif /* _ASM_IA64_PGALLOC_H */
|
609
arch/ia64/include/asm/pgtable.h
Normal file
609
arch/ia64/include/asm/pgtable.h
Normal file
|
@ -0,0 +1,609 @@
|
|||
#ifndef _ASM_IA64_PGTABLE_H
|
||||
#define _ASM_IA64_PGTABLE_H
|
||||
|
||||
/*
|
||||
* This file contains the functions and defines necessary to modify and use
|
||||
* the IA-64 page table tree.
|
||||
*
|
||||
* This hopefully works with any (fixed) IA-64 page-size, as defined
|
||||
* in <asm/page.h>.
|
||||
*
|
||||
* Copyright (C) 1998-2005 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/mman.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
|
||||
|
||||
/*
|
||||
* First, define the various bits in a PTE. Note that the PTE format
|
||||
* matches the VHPT short format, the firt doubleword of the VHPD long
|
||||
* format, and the first doubleword of the TLB insertion format.
|
||||
*/
|
||||
#define _PAGE_P_BIT 0
|
||||
#define _PAGE_A_BIT 5
|
||||
#define _PAGE_D_BIT 6
|
||||
|
||||
#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
|
||||
#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
|
||||
#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
|
||||
#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
|
||||
#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
|
||||
#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
|
||||
#define _PAGE_MA_MASK (0x7 << 2)
|
||||
#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
|
||||
#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
|
||||
#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
|
||||
#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
|
||||
#define _PAGE_PL_MASK (3 << 7)
|
||||
#define _PAGE_AR_R (0 << 9) /* read only */
|
||||
#define _PAGE_AR_RX (1 << 9) /* read & execute */
|
||||
#define _PAGE_AR_RW (2 << 9) /* read & write */
|
||||
#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
|
||||
#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
|
||||
#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
|
||||
#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
|
||||
#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
|
||||
#define _PAGE_AR_MASK (7 << 9)
|
||||
#define _PAGE_AR_SHIFT 9
|
||||
#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
|
||||
#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
|
||||
#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
|
||||
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
|
||||
#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
|
||||
|
||||
/* Valid only for a PTE with the present bit cleared: */
|
||||
#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
|
||||
|
||||
#define _PFN_MASK _PAGE_PPN_MASK
|
||||
/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
|
||||
#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
|
||||
|
||||
#define _PAGE_SIZE_4K 12
|
||||
#define _PAGE_SIZE_8K 13
|
||||
#define _PAGE_SIZE_16K 14
|
||||
#define _PAGE_SIZE_64K 16
|
||||
#define _PAGE_SIZE_256K 18
|
||||
#define _PAGE_SIZE_1M 20
|
||||
#define _PAGE_SIZE_4M 22
|
||||
#define _PAGE_SIZE_16M 24
|
||||
#define _PAGE_SIZE_64M 26
|
||||
#define _PAGE_SIZE_256M 28
|
||||
#define _PAGE_SIZE_1G 30
|
||||
#define _PAGE_SIZE_4G 32
|
||||
|
||||
#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
|
||||
#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
|
||||
#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
|
||||
|
||||
/*
|
||||
* How many pointers will a page table level hold expressed in shift
|
||||
*/
|
||||
#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
|
||||
|
||||
/*
|
||||
* Definitions for fourth level:
|
||||
*/
|
||||
#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
|
||||
|
||||
/*
|
||||
* Definitions for third level:
|
||||
*
|
||||
* PMD_SHIFT determines the size of the area a third-level page table
|
||||
* can map.
|
||||
*/
|
||||
#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
/*
|
||||
* Definitions for second level:
|
||||
*
|
||||
* PUD_SHIFT determines the size of the area a second-level page table
|
||||
* can map.
|
||||
*/
|
||||
#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
#define PUD_SIZE (1UL << PUD_SHIFT)
|
||||
#define PUD_MASK (~(PUD_SIZE-1))
|
||||
#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Definitions for first level:
|
||||
*
|
||||
* PGDIR_SHIFT determines what a first-level page table entry can map.
|
||||
*/
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
#else
|
||||
#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
|
||||
#endif
|
||||
#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
|
||||
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
|
||||
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
/*
|
||||
* All the normal masks have the "page accessed" bits on, as any time
|
||||
* they are used, the page is accessed. They are cleared only by the
|
||||
* page-out routines.
|
||||
*/
|
||||
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
|
||||
#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
|
||||
#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
||||
#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
||||
#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
||||
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
|
||||
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
|
||||
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
|
||||
#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
|
||||
_PAGE_MA_UC)
|
||||
|
||||
# ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/sched.h> /* for mm_struct */
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/*
|
||||
* Next come the mappings that determine how mmap() protection bits
|
||||
* (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
|
||||
* _P version gets used for a private shared memory segment, the _S
|
||||
* version gets used for a shared memory segment with MAP_SHARED on.
|
||||
* In a private shared memory segment, we do a copy-on-write if a task
|
||||
* attempts to write to the page.
|
||||
*/
|
||||
/* xwr */
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
|
||||
#define __P011 PAGE_READONLY /* ditto */
|
||||
#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
|
||||
#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
|
||||
#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
||||
#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
|
||||
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
|
||||
|
||||
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
#endif
|
||||
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
|
||||
|
||||
/*
|
||||
* Some definitions to translate between mem_map, PTEs, and page addresses:
|
||||
*/
|
||||
|
||||
|
||||
/* Quick test to see if ADDR is a (potentially) valid physical address. */
|
||||
static inline long
|
||||
ia64_phys_addr_valid (unsigned long addr)
|
||||
{
|
||||
return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
|
||||
* memory. For the return value to be meaningful, ADDR must be >=
|
||||
* PAGE_OFFSET. This operation can be relatively expensive (e.g.,
|
||||
* require a hash-, or multi-level tree-lookup or something of that
|
||||
* sort) but it guarantees to return TRUE only if accessing the page
|
||||
* at that address does not cause an error. Note that there may be
|
||||
* addresses for which kern_addr_valid() returns FALSE even though an
|
||||
* access would not cause an error (e.g., this is typically true for
|
||||
* memory mapped I/O regions.
|
||||
*
|
||||
* XXX Need to implement this for IA-64.
|
||||
*/
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
|
||||
/*
|
||||
* Now come the defines and routines to manage and access the three-level
|
||||
* page table.
|
||||
*/
|
||||
|
||||
|
||||
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
|
||||
extern unsigned long VMALLOC_END;
|
||||
#else
|
||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
|
||||
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
|
||||
# define vmemmap ((struct page *)VMALLOC_END)
|
||||
#else
|
||||
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* fs/proc/kcore.c */
|
||||
#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
|
||||
#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
|
||||
|
||||
#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
|
||||
#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
|
||||
|
||||
/*
|
||||
* Conversion functions: convert page frame number (pfn) and a protection value to a page
|
||||
* table entry (pte).
|
||||
*/
|
||||
#define pfn_pte(pfn, pgprot) \
|
||||
({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
|
||||
|
||||
/* Extract pfn from pte. */
|
||||
#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
|
||||
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/* This takes a physical page address that is used by the remapping functions */
|
||||
#define mk_pte_phys(physpage, pgprot) \
|
||||
({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
|
||||
|
||||
#define pte_modify(_pte, newprot) \
|
||||
(__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
|
||||
|
||||
#define pte_none(pte) (!pte_val(pte))
|
||||
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
|
||||
#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
|
||||
/* pte_page() returns the "struct page *" corresponding to the PTE: */
|
||||
#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
|
||||
#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
|
||||
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
|
||||
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
|
||||
#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
|
||||
|
||||
#define pud_none(pud) (!pud_val(pud))
|
||||
#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
|
||||
#define pud_present(pud) (pud_val(pud) != 0UL)
|
||||
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
|
||||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
|
||||
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
#define pgd_none(pgd) (!pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
|
||||
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
|
||||
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
|
||||
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
|
||||
#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following have defined behavior only work if pte_present() is true.
|
||||
*/
|
||||
#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
|
||||
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
|
||||
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
|
||||
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
|
||||
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
|
||||
#define pte_special(pte) 0
|
||||
|
||||
/*
|
||||
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
|
||||
* access rights:
|
||||
*/
|
||||
#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
|
||||
#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
|
||||
#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
|
||||
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
|
||||
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
|
||||
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte)))
|
||||
#define pte_mkspecial(pte) (pte)
|
||||
|
||||
/*
|
||||
* Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
|
||||
* sync icache and dcache when we insert *new* executable page.
|
||||
* __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
|
||||
* if necessary.
|
||||
*
|
||||
* set_pte() is also called by the kernel, but we can expect that the kernel
|
||||
* flushes icache explicitly if necessary.
|
||||
*/
|
||||
#define pte_present_exec_user(pte)\
|
||||
((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
|
||||
(_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
|
||||
|
||||
extern void __ia64_sync_icache_dcache(pte_t pteval);
|
||||
static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
/* page is present && page is user && page is executable
|
||||
* && (page swapin or new page or page migraton
|
||||
* || copy_on_write with page copying.)
|
||||
*/
|
||||
if (pte_present_exec_user(pteval) &&
|
||||
(!pte_present(*ptep) ||
|
||||
pte_pfn(*ptep) != pte_pfn(pteval)))
|
||||
/* load_module() calles flush_icache_range() explicitly*/
|
||||
__ia64_sync_icache_dcache(pteval);
|
||||
*ptep = pteval;
|
||||
}
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
|
||||
|
||||
/*
|
||||
* Make page protection values cacheable, uncacheable, or write-
|
||||
* combining. Note that "protection" is really a misnomer here as the
|
||||
* protection value contains the memory attribute bits, dirty bits, and
|
||||
* various other bits as well.
|
||||
*/
|
||||
#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
|
||||
#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
|
||||
#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
|
||||
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
static inline unsigned long
|
||||
pgd_index (unsigned long address)
|
||||
{
|
||||
unsigned long region = address >> 61;
|
||||
unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
|
||||
|
||||
return (region << (PAGE_SHIFT - 6)) | l1index;
|
||||
}
|
||||
|
||||
/* The offset in the 1-level directory is given by the 3 region bits
|
||||
(61..63) and the level-1 bits. */
|
||||
static inline pgd_t*
|
||||
pgd_offset (const struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return mm->pgd + pgd_index(address);
|
||||
}
|
||||
|
||||
/* In the kernel's mapped region we completely ignore the region number
|
||||
(since we know it's in region number 5). */
|
||||
#define pgd_offset_k(addr) \
|
||||
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
|
||||
|
||||
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
|
||||
resides in the kernel-mapped segment, hence we use pgd_offset_k()
|
||||
here. */
|
||||
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
|
||||
|
||||
#ifdef CONFIG_PGTABLE_4
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pud_offset(dir,addr) \
|
||||
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
|
||||
#endif
|
||||
|
||||
/* Find an entry in the third-level page table.. */
|
||||
#define pmd_offset(dir,addr) \
|
||||
((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
|
||||
|
||||
/*
|
||||
* Find an entry in the third-level page table. This looks more complicated than it
|
||||
* should be because some platforms place page tables in high memory.
|
||||
*/
|
||||
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
|
||||
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
|
||||
/* atomic versions of the some PTE manipulations: */
|
||||
|
||||
static inline int
|
||||
ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
return test_and_clear_bit(_PAGE_A_BIT, ptep);
|
||||
#else
|
||||
pte_t pte = *ptep;
|
||||
if (!pte_young(pte))
|
||||
return 0;
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline pte_t
|
||||
ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return __pte(xchg((long *) ptep, 0));
|
||||
#else
|
||||
pte_t pte = *ptep;
|
||||
pte_clear(mm, addr, ptep);
|
||||
return pte;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long new, old;
|
||||
|
||||
do {
|
||||
old = pte_val(*ptep);
|
||||
new = pte_val(pte_wrprotect(__pte (old)));
|
||||
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
|
||||
#else
|
||||
pte_t old_pte = *ptep;
|
||||
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
pte_same (pte_t a, pte_t b)
|
||||
{
|
||||
return pte_val(a) == pte_val(b);
|
||||
}
|
||||
|
||||
#define update_mmu_cache(vma, address, ptep) do { } while (0)
|
||||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
extern void paging_init (void);
|
||||
|
||||
/*
|
||||
* Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
|
||||
* bits in the swap-type field of the swap pte. It would be nice to
|
||||
* enforce that, but we can't easily include <linux/swap.h> here.
|
||||
* (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
|
||||
*
|
||||
* Format of swap pte:
|
||||
* bit 0 : present bit (must be zero)
|
||||
* bit 1 : _PAGE_FILE (must be zero)
|
||||
* bits 2- 8: swap-type
|
||||
* bits 9-62: swap offset
|
||||
* bit 63 : _PAGE_PROTNONE bit
|
||||
*
|
||||
* Format of file pte:
|
||||
* bit 0 : present bit (must be zero)
|
||||
* bit 1 : _PAGE_FILE (must be one)
|
||||
* bits 2-62: file_offset/PAGE_SIZE
|
||||
* bit 63 : _PAGE_PROTNONE bit
|
||||
*/
|
||||
#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
|
||||
#define __swp_offset(entry) (((entry).val << 1) >> 10)
|
||||
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define PTE_FILE_MAX_BITS 61
|
||||
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
|
||||
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
||||
extern struct page *zero_page_memmap_ptr;
|
||||
#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
|
||||
|
||||
/* We provide our own get_unmapped_area to cope with VA holes for userland */
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
|
||||
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
|
||||
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
|
||||
#endif
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
/*
|
||||
* Update PTEP with ENTRY, which is guaranteed to be a less
|
||||
* restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
|
||||
* WRITABLE bits turned on, when the value at PTEP did not. The
|
||||
* WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
|
||||
*
|
||||
* SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
|
||||
* having to worry about races. On SMP machines, there are only two
|
||||
* cases where this is true:
|
||||
*
|
||||
* (1) *PTEP has the PRESENT bit turned OFF
|
||||
* (2) ENTRY has the DIRTY bit turned ON
|
||||
*
|
||||
* On ia64, we could implement this routine with a cmpxchg()-loop
|
||||
* which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
|
||||
* However, like on x86, we can get a more streamlined version by
|
||||
* observing that it is OK to drop ACCESSED bit updates when
|
||||
* SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
|
||||
* result in an extra Access-bit fault, which would then turn on the
|
||||
* ACCESSED bit in the low-level fault handler (iaccess_bit or
|
||||
* daccess_bit in ivt.S).
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
|
||||
({ \
|
||||
int __changed = !pte_same(*(__ptep), __entry); \
|
||||
if (__changed && __safely_writable) { \
|
||||
set_pte(__ptep, __entry); \
|
||||
flush_tlb_page(__vma, __addr); \
|
||||
} \
|
||||
__changed; \
|
||||
})
|
||||
#else
|
||||
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
|
||||
({ \
|
||||
int __changed = !pte_same(*(__ptep), __entry); \
|
||||
if (__changed) { \
|
||||
set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
|
||||
flush_tlb_page(__vma, __addr); \
|
||||
} \
|
||||
__changed; \
|
||||
})
|
||||
#endif
|
||||
|
||||
# ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
|
||||
# define __HAVE_ARCH_MEMMAP_INIT
|
||||
extern void memmap_init (unsigned long size, int nid, unsigned long zone,
|
||||
unsigned long start_pfn);
|
||||
# endif /* CONFIG_VIRTUAL_MEM_MAP */
|
||||
# endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Identity-mapped regions use a large page size. We'll call such large pages
|
||||
* "granules". If you can think of a better name that's unambiguous, let me
|
||||
* know...
|
||||
*/
|
||||
#if defined(CONFIG_IA64_GRANULE_64MB)
|
||||
# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
|
||||
#elif defined(CONFIG_IA64_GRANULE_16MB)
|
||||
# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
|
||||
#endif
|
||||
#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
|
||||
/*
|
||||
* log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
|
||||
*/
|
||||
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
|
||||
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
||||
/* These tell get_user_pages() that the first gate page is accessible from user-level. */
|
||||
#define FIXADDR_USER_START GATE_ADDR
|
||||
#ifdef HAVE_BUGGY_SEGREL
|
||||
# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
|
||||
#else
|
||||
# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
#define __HAVE_ARCH_PGD_OFFSET_GATE
|
||||
|
||||
|
||||
#ifndef CONFIG_PGTABLE_4
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#endif
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _ASM_IA64_PGTABLE_H */
|
711
arch/ia64/include/asm/processor.h
Normal file
711
arch/ia64/include/asm/processor.h
Normal file
|
@ -0,0 +1,711 @@
|
|||
#ifndef _ASM_IA64_PROCESSOR_H
|
||||
#define _ASM_IA64_PROCESSOR_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 1998-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
||||
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
||||
*
|
||||
* 11/24/98 S.Eranian added ia64_set_iva()
|
||||
* 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
|
||||
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/kregs.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/ustack.h>
|
||||
|
||||
#define ARCH_HAS_PREFETCH_SWITCH_STACK
|
||||
|
||||
#define IA64_NUM_PHYS_STACK_REG 96
|
||||
#define IA64_NUM_DBG_REGS 8
|
||||
|
||||
#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
|
||||
#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
|
||||
|
||||
/*
|
||||
* TASK_SIZE really is a mis-named. It really is the maximum user
|
||||
* space address (plus one). On IA-64, there are five regions of 2TB
|
||||
* each (assuming 8KB page size), for a total of 8TB of user virtual
|
||||
* address space.
|
||||
*/
|
||||
#define TASK_SIZE DEFAULT_TASK_SIZE
|
||||
|
||||
/*
|
||||
* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE (current->thread.map_base)
|
||||
|
||||
#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
|
||||
#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
|
||||
#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
|
||||
#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
|
||||
#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
|
||||
#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
|
||||
sync at ctx sw */
|
||||
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
|
||||
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
|
||||
|
||||
#define IA64_THREAD_UAC_SHIFT 3
|
||||
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
|
||||
#define IA64_THREAD_FPEMU_SHIFT 6
|
||||
#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
|
||||
|
||||
|
||||
/*
|
||||
* This shift should be large enough to be able to represent 1000000000/itc_freq with good
|
||||
* accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
|
||||
* (this will give enough slack to represent 10 seconds worth of time as a scaled number).
|
||||
*/
|
||||
#define IA64_NSEC_PER_CYC_SHIFT 30
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/rse.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <linux/atomic.h>
|
||||
#ifdef CONFIG_NUMA
|
||||
#include <asm/nodedata.h>
|
||||
#endif
|
||||
|
||||
/* like above but expressed as bitfields for more efficient access: */
|
||||
struct ia64_psr {
|
||||
__u64 reserved0 : 1;
|
||||
__u64 be : 1;
|
||||
__u64 up : 1;
|
||||
__u64 ac : 1;
|
||||
__u64 mfl : 1;
|
||||
__u64 mfh : 1;
|
||||
__u64 reserved1 : 7;
|
||||
__u64 ic : 1;
|
||||
__u64 i : 1;
|
||||
__u64 pk : 1;
|
||||
__u64 reserved2 : 1;
|
||||
__u64 dt : 1;
|
||||
__u64 dfl : 1;
|
||||
__u64 dfh : 1;
|
||||
__u64 sp : 1;
|
||||
__u64 pp : 1;
|
||||
__u64 di : 1;
|
||||
__u64 si : 1;
|
||||
__u64 db : 1;
|
||||
__u64 lp : 1;
|
||||
__u64 tb : 1;
|
||||
__u64 rt : 1;
|
||||
__u64 reserved3 : 4;
|
||||
__u64 cpl : 2;
|
||||
__u64 is : 1;
|
||||
__u64 mc : 1;
|
||||
__u64 it : 1;
|
||||
__u64 id : 1;
|
||||
__u64 da : 1;
|
||||
__u64 dd : 1;
|
||||
__u64 ss : 1;
|
||||
__u64 ri : 2;
|
||||
__u64 ed : 1;
|
||||
__u64 bn : 1;
|
||||
__u64 reserved4 : 19;
|
||||
};
|
||||
|
||||
union ia64_isr {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 code : 16;
|
||||
__u64 vector : 8;
|
||||
__u64 reserved1 : 8;
|
||||
__u64 x : 1;
|
||||
__u64 w : 1;
|
||||
__u64 r : 1;
|
||||
__u64 na : 1;
|
||||
__u64 sp : 1;
|
||||
__u64 rs : 1;
|
||||
__u64 ir : 1;
|
||||
__u64 ni : 1;
|
||||
__u64 so : 1;
|
||||
__u64 ei : 2;
|
||||
__u64 ed : 1;
|
||||
__u64 reserved2 : 20;
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_lid {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 rv : 16;
|
||||
__u64 eid : 8;
|
||||
__u64 id : 8;
|
||||
__u64 ig : 32;
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_tpr {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 ig0 : 4;
|
||||
__u64 mic : 4;
|
||||
__u64 rsv : 8;
|
||||
__u64 mmi : 1;
|
||||
__u64 ig1 : 47;
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_itir {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 rv3 : 2; /* 0-1 */
|
||||
__u64 ps : 6; /* 2-7 */
|
||||
__u64 key : 24; /* 8-31 */
|
||||
__u64 rv4 : 32; /* 32-63 */
|
||||
};
|
||||
};
|
||||
|
||||
union ia64_rr {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 ve : 1; /* enable hw walker */
|
||||
__u64 reserved0: 1; /* reserved */
|
||||
__u64 ps : 6; /* log page size */
|
||||
__u64 rid : 24; /* region id */
|
||||
__u64 reserved1: 32; /* reserved */
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* CPU type, hardware bug flags, and per-CPU state. Frequently used
|
||||
* state comes earlier:
|
||||
*/
|
||||
struct cpuinfo_ia64 {
|
||||
unsigned int softirq_pending;
|
||||
unsigned long itm_delta; /* # of clock cycles between clock ticks */
|
||||
unsigned long itm_next; /* interval timer mask value to use for next clock tick */
|
||||
unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
|
||||
unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
|
||||
unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
|
||||
unsigned long itc_freq; /* frequency of ITC counter */
|
||||
unsigned long proc_freq; /* frequency of processor */
|
||||
unsigned long cyc_per_usec; /* itc_freq/1000000 */
|
||||
unsigned long ptce_base;
|
||||
unsigned int ptce_count[2];
|
||||
unsigned int ptce_stride[2];
|
||||
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long loops_per_jiffy;
|
||||
int cpu;
|
||||
unsigned int socket_id; /* physical processor socket id */
|
||||
unsigned short core_id; /* core id */
|
||||
unsigned short thread_id; /* thread id */
|
||||
unsigned short num_log; /* Total number of logical processors on
|
||||
* this socket that were successfully booted */
|
||||
unsigned char cores_per_socket; /* Cores per processor socket */
|
||||
unsigned char threads_per_core; /* Threads per core */
|
||||
#endif
|
||||
|
||||
/* CPUID-derived information: */
|
||||
unsigned long ppn;
|
||||
unsigned long features;
|
||||
unsigned char number;
|
||||
unsigned char revision;
|
||||
unsigned char model;
|
||||
unsigned char family;
|
||||
unsigned char archrev;
|
||||
char vendor[16];
|
||||
char *model_name;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
struct ia64_node_data *node_data;
|
||||
#endif
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
||||
|
||||
/*
|
||||
* The "local" data variable. It refers to the per-CPU data of the currently executing
|
||||
* CPU, much like "current" points to the per-task data of the currently executing task.
|
||||
* Do not use the address of local_cpu_data, since it will be different from
|
||||
* cpu_data(smp_processor_id())!
|
||||
*/
|
||||
#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
|
||||
#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
|
||||
|
||||
extern void print_cpu_info (struct cpuinfo_ia64 *);
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
||||
#define SET_UNALIGN_CTL(task,value) \
|
||||
({ \
|
||||
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
|
||||
| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
|
||||
0; \
|
||||
})
|
||||
#define GET_UNALIGN_CTL(task,addr) \
|
||||
({ \
|
||||
put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
|
||||
(int __user *) (addr)); \
|
||||
})
|
||||
|
||||
#define SET_FPEMU_CTL(task,value) \
|
||||
({ \
|
||||
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
|
||||
| (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
|
||||
0; \
|
||||
})
|
||||
#define GET_FPEMU_CTL(task,addr) \
|
||||
({ \
|
||||
put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
|
||||
(int __user *) (addr)); \
|
||||
})
|
||||
|
||||
struct thread_struct {
|
||||
__u32 flags; /* various thread flags (see IA64_THREAD_*) */
|
||||
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
|
||||
__u8 on_ustack; /* executing on user-stacks? */
|
||||
__u8 pad[3];
|
||||
__u64 ksp; /* kernel stack pointer */
|
||||
__u64 map_base; /* base address for get_unmapped_area() */
|
||||
__u64 rbs_bot; /* the base address for the RBS */
|
||||
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
|
||||
|
||||
#ifdef CONFIG_PERFMON
|
||||
void *pfm_context; /* pointer to detailed PMU context */
|
||||
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
|
||||
# define INIT_THREAD_PM .pfm_context = NULL, \
|
||||
.pfm_needs_checking = 0UL,
|
||||
#else
|
||||
# define INIT_THREAD_PM
|
||||
#endif
|
||||
unsigned long dbr[IA64_NUM_DBG_REGS];
|
||||
unsigned long ibr[IA64_NUM_DBG_REGS];
|
||||
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
|
||||
};
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.flags = 0, \
|
||||
.on_ustack = 0, \
|
||||
.ksp = 0, \
|
||||
.map_base = DEFAULT_MAP_BASE, \
|
||||
.rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
|
||||
.last_fph_cpu = -1, \
|
||||
INIT_THREAD_PM \
|
||||
.dbr = {0, }, \
|
||||
.ibr = {0, }, \
|
||||
.fph = {{{{0}}}, } \
|
||||
}
|
||||
|
||||
#define start_thread(regs,new_ip,new_sp) do { \
|
||||
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
|
||||
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
|
||||
regs->cr_iip = new_ip; \
|
||||
regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
|
||||
regs->ar_rnat = 0; \
|
||||
regs->ar_bspstore = current->thread.rbs_bot; \
|
||||
regs->ar_fpsr = FPSR_DEFAULT; \
|
||||
regs->loadrs = 0; \
|
||||
regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
|
||||
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
|
||||
if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
|
||||
/* \
|
||||
* Zap scratch regs to avoid leaking bits between processes with different \
|
||||
* uid/privileges. \
|
||||
*/ \
|
||||
regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
|
||||
regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Forward declarations, a strange C thing... */
|
||||
struct mm_struct;
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* Free all resources held by a thread. This is called after the
|
||||
* parent of DEAD_TASK has collected the exit status of the task via
|
||||
* wait().
|
||||
*/
|
||||
#define release_thread(dead_task)
|
||||
|
||||
/* Get wait channel for task P. */
|
||||
extern unsigned long get_wchan (struct task_struct *p);
|
||||
|
||||
/* Return instruction pointer of blocked task TSK. */
|
||||
#define KSTK_EIP(tsk) \
|
||||
({ \
|
||||
struct pt_regs *_regs = task_pt_regs(tsk); \
|
||||
_regs->cr_iip + ia64_psr(_regs)->ri; \
|
||||
})
|
||||
|
||||
/* Return stack pointer of blocked task TSK. */
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
|
||||
|
||||
extern void ia64_getreg_unknown_kr (void);
|
||||
extern void ia64_setreg_unknown_kr (void);
|
||||
|
||||
#define ia64_get_kr(regnum) \
|
||||
({ \
|
||||
unsigned long r = 0; \
|
||||
\
|
||||
switch (regnum) { \
|
||||
case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
|
||||
case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
|
||||
case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
|
||||
case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
|
||||
case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
|
||||
case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
|
||||
case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
|
||||
case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
|
||||
default: ia64_getreg_unknown_kr(); break; \
|
||||
} \
|
||||
r; \
|
||||
})
|
||||
|
||||
#define ia64_set_kr(regnum, r) \
|
||||
({ \
|
||||
switch (regnum) { \
|
||||
case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
|
||||
case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
|
||||
case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
|
||||
case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
|
||||
case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
|
||||
case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
|
||||
case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
|
||||
case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
|
||||
default: ia64_setreg_unknown_kr(); break; \
|
||||
} \
|
||||
})
|
||||
|
||||
/*
|
||||
* The following three macros can't be inline functions because we don't have struct
|
||||
* task_struct at this point.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Return TRUE if task T owns the fph partition of the CPU we're running on.
|
||||
* Must be called from code that has preemption disabled.
|
||||
*/
|
||||
#define ia64_is_local_fpu_owner(t) \
|
||||
({ \
|
||||
struct task_struct *__ia64_islfo_task = (t); \
|
||||
(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
|
||||
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Mark task T as owning the fph partition of the CPU we're running on.
|
||||
* Must be called from code that has preemption disabled.
|
||||
*/
|
||||
#define ia64_set_local_fpu_owner(t) do { \
|
||||
struct task_struct *__ia64_slfo_task = (t); \
|
||||
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
|
||||
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
|
||||
} while (0)
|
||||
|
||||
/* Mark the fph partition of task T as being invalid on all CPUs. */
|
||||
#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
|
||||
|
||||
extern void __ia64_init_fpu (void);
|
||||
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
|
||||
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
|
||||
extern void ia64_save_debug_regs (unsigned long *save_area);
|
||||
extern void ia64_load_debug_regs (unsigned long *save_area);
|
||||
|
||||
#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
|
||||
#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
|
||||
|
||||
/* load fp 0.0 into fph */
|
||||
static inline void
|
||||
ia64_init_fpu (void) {
|
||||
ia64_fph_enable();
|
||||
__ia64_init_fpu();
|
||||
ia64_fph_disable();
|
||||
}
|
||||
|
||||
/* save f32-f127 at FPH */
|
||||
static inline void
|
||||
ia64_save_fpu (struct ia64_fpreg *fph) {
|
||||
ia64_fph_enable();
|
||||
__ia64_save_fpu(fph);
|
||||
ia64_fph_disable();
|
||||
}
|
||||
|
||||
/* load f32-f127 from FPH */
|
||||
static inline void
|
||||
ia64_load_fpu (struct ia64_fpreg *fph) {
|
||||
ia64_fph_enable();
|
||||
__ia64_load_fpu(fph);
|
||||
ia64_fph_disable();
|
||||
}
|
||||
|
||||
static inline __u64
|
||||
ia64_clear_ic (void)
|
||||
{
|
||||
__u64 psr;
|
||||
psr = ia64_getreg(_IA64_REG_PSR);
|
||||
ia64_stop();
|
||||
ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
|
||||
ia64_srlz_i();
|
||||
return psr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the psr.
|
||||
*/
|
||||
static inline void
|
||||
ia64_set_psr (__u64 psr)
|
||||
{
|
||||
ia64_stop();
|
||||
ia64_setreg(_IA64_REG_PSR_L, psr);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a translation into an instruction and/or data translation
|
||||
* register.
|
||||
*/
|
||||
static inline void
|
||||
ia64_itr (__u64 target_mask, __u64 tr_num,
|
||||
__u64 vmaddr, __u64 pte,
|
||||
__u64 log_page_size)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
|
||||
ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
|
||||
ia64_stop();
|
||||
if (target_mask & 0x1)
|
||||
ia64_itri(tr_num, pte);
|
||||
if (target_mask & 0x2)
|
||||
ia64_itrd(tr_num, pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a translation into the instruction and/or data translation
|
||||
* cache.
|
||||
*/
|
||||
static inline void
|
||||
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
|
||||
__u64 log_page_size)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
|
||||
ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
|
||||
ia64_stop();
|
||||
/* as per EAS2.6, itc must be the last instruction in an instruction group */
|
||||
if (target_mask & 0x1)
|
||||
ia64_itci(pte);
|
||||
if (target_mask & 0x2)
|
||||
ia64_itcd(pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge a range of addresses from instruction and/or data translation
|
||||
* register(s).
|
||||
*/
|
||||
static inline void
|
||||
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
|
||||
{
|
||||
if (target_mask & 0x1)
|
||||
ia64_ptri(vmaddr, (log_size << 2));
|
||||
if (target_mask & 0x2)
|
||||
ia64_ptrd(vmaddr, (log_size << 2));
|
||||
}
|
||||
|
||||
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
|
||||
static inline void
|
||||
ia64_set_iva (void *ivt_addr)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
|
||||
/* Set the page table address and control bits. */
|
||||
static inline void
|
||||
ia64_set_pta (__u64 pta)
|
||||
{
|
||||
/* Note: srlz.i implies srlz.d */
|
||||
ia64_setreg(_IA64_REG_CR_PTA, pta);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_eoi (void)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_EOI, 0);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
#define cpu_relax() ia64_hint(ia64_hint_pause)
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
static inline int
|
||||
ia64_get_irr(unsigned int vector)
|
||||
{
|
||||
unsigned int reg = vector / 64;
|
||||
unsigned int bit = vector % 64;
|
||||
u64 irr;
|
||||
|
||||
switch (reg) {
|
||||
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
|
||||
case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
|
||||
case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
|
||||
case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
|
||||
}
|
||||
|
||||
return test_bit(bit, &irr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_set_lrr0 (unsigned long val)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_LRR0, val);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_set_lrr1 (unsigned long val)
|
||||
{
|
||||
ia64_setreg(_IA64_REG_CR_LRR1, val);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given the address to which a spill occurred, return the unat bit
|
||||
* number that corresponds to this address.
|
||||
*/
|
||||
static inline __u64
|
||||
ia64_unat_pos (void *spill_addr)
|
||||
{
|
||||
return ((__u64) spill_addr >> 3) & 0x3f;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the NaT bit of an integer register which was spilled at address
|
||||
* SPILL_ADDR. UNAT is the mask to be updated.
|
||||
*/
|
||||
static inline void
|
||||
ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
|
||||
{
|
||||
__u64 bit = ia64_unat_pos(spill_addr);
|
||||
__u64 mask = 1UL << bit;
|
||||
|
||||
*unat = (*unat & ~mask) | (nat << bit);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
* Note that the only way T can block is through a call to schedule() -> switch_to().
|
||||
*/
|
||||
static inline unsigned long
|
||||
thread_saved_pc (struct task_struct *t)
|
||||
{
|
||||
struct unw_frame_info info;
|
||||
unsigned long ip;
|
||||
|
||||
unw_init_from_blocked_task(&info, t);
|
||||
if (unw_unwind(&info) < 0)
|
||||
return 0;
|
||||
unw_get_ip(&info, &ip);
|
||||
return ip;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the current instruction/program counter value.
|
||||
*/
|
||||
#define current_text_addr() \
|
||||
({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
|
||||
|
||||
static inline __u64
|
||||
ia64_get_ivr (void)
|
||||
{
|
||||
__u64 r;
|
||||
ia64_srlz_d();
|
||||
r = ia64_getreg(_IA64_REG_CR_IVR);
|
||||
ia64_srlz_d();
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_set_dbr (__u64 regnum, __u64 value)
|
||||
{
|
||||
__ia64_set_dbr(regnum, value);
|
||||
#ifdef CONFIG_ITANIUM
|
||||
ia64_srlz_d();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __u64
|
||||
ia64_get_dbr (__u64 regnum)
|
||||
{
|
||||
__u64 retval;
|
||||
|
||||
retval = __ia64_get_dbr(regnum);
|
||||
#ifdef CONFIG_ITANIUM
|
||||
ia64_srlz_d();
|
||||
#endif
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline __u64
|
||||
ia64_rotr (__u64 w, __u64 n)
|
||||
{
|
||||
return (w >> n) | (w << (64 - n));
|
||||
}
|
||||
|
||||
#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
|
||||
|
||||
/*
|
||||
* Take a mapped kernel address and return the equivalent address
|
||||
* in the region 7 identity mapped virtual area.
|
||||
*/
|
||||
static inline void *
|
||||
ia64_imva (void *addr)
|
||||
{
|
||||
void *result;
|
||||
result = (void *) ia64_tpa(addr);
|
||||
return __va(result);
|
||||
}
|
||||
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define ARCH_HAS_PREFETCHW
|
||||
#define ARCH_HAS_SPINLOCK_PREFETCH
|
||||
#define PREFETCH_STRIDE L1_CACHE_BYTES
|
||||
|
||||
static inline void
|
||||
prefetch (const void *x)
|
||||
{
|
||||
ia64_lfetch(ia64_lfhint_none, x);
|
||||
}
|
||||
|
||||
static inline void
|
||||
prefetchw (const void *x)
|
||||
{
|
||||
ia64_lfetch_excl(ia64_lfhint_none, x);
|
||||
}
|
||||
|
||||
#define spin_lock_prefetch(x) prefetchw(x)
|
||||
|
||||
extern unsigned long boot_option_idle_override;
|
||||
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
|
||||
IDLE_NOMWAIT, IDLE_POLL};
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
#define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_IA64_PROCESSOR_H */
|
151
arch/ia64/include/asm/ptrace.h
Normal file
151
arch/ia64/include/asm/ptrace.h
Normal file
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Copyright (C) 1998-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
* Copyright (C) 2003 Intel Co
|
||||
* Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
* Fenghua Yu <fenghua.yu@intel.com>
|
||||
* Arun Sharma <arun.sharma@intel.com>
|
||||
*
|
||||
* 12/07/98 S. Eranian added pt_regs & switch_stack
|
||||
* 12/21/98 D. Mosberger updated to match latest code
|
||||
* 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
|
||||
*
|
||||
*/
|
||||
#ifndef _ASM_IA64_PTRACE_H
|
||||
#define _ASM_IA64_PTRACE_H
|
||||
|
||||
#ifndef ASM_OFFSETS_C
|
||||
#include <asm/asm-offsets.h>
|
||||
#endif
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* Base-2 logarithm of number of pages to allocate per task structure
|
||||
* (including register backing store and memory stack):
|
||||
*/
|
||||
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
|
||||
# define KERNEL_STACK_SIZE_ORDER 3
|
||||
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
|
||||
# define KERNEL_STACK_SIZE_ORDER 2
|
||||
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
|
||||
# define KERNEL_STACK_SIZE_ORDER 1
|
||||
#else
|
||||
# define KERNEL_STACK_SIZE_ORDER 0
|
||||
#endif
|
||||
|
||||
#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
|
||||
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
|
||||
|
||||
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/current.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* We use the ia64_psr(regs)->ri to determine which of the three
|
||||
* instructions in bundle (16 bytes) took the sample. Generate
|
||||
* the canonical representation by adding to instruction pointer.
|
||||
*/
|
||||
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
|
||||
|
||||
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||
{
|
||||
/* FIXME: should this be bspstore + nr_dirty regs? */
|
||||
return regs->ar_bspstore;
|
||||
}
|
||||
|
||||
static inline int is_syscall_success(struct pt_regs *regs)
|
||||
{
|
||||
return regs->r10 != -1;
|
||||
}
|
||||
|
||||
static inline long regs_return_value(struct pt_regs *regs)
|
||||
{
|
||||
if (is_syscall_success(regs))
|
||||
return regs->r8;
|
||||
else
|
||||
return -regs->r8;
|
||||
}
|
||||
|
||||
/* Conserve space in histogram by encoding slot bits in address
|
||||
* bits 2 and 3 rather than bits 0 and 1.
|
||||
*/
|
||||
#define profile_pc(regs) \
|
||||
({ \
|
||||
unsigned long __ip = instruction_pointer(regs); \
|
||||
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
|
||||
})
|
||||
/*
|
||||
* Why not default? Because user_stack_pointer() on ia64 gives register
|
||||
* stack backing store instead...
|
||||
*/
|
||||
#define current_user_stack_pointer() (current_pt_regs()->r12)
|
||||
|
||||
/* given a pointer to a task_struct, return the user's pt_regs */
|
||||
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
|
||||
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
|
||||
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
|
||||
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
|
||||
# define fsys_mode(task,regs) \
|
||||
({ \
|
||||
struct task_struct *_task = (task); \
|
||||
struct pt_regs *_regs = (regs); \
|
||||
!user_mode(_regs) && user_stack(_task, _regs); \
|
||||
})
|
||||
|
||||
/*
|
||||
* System call handlers that, upon successful completion, need to return a negative value
|
||||
* should call force_successful_syscall_return() right before returning. On architectures
|
||||
* where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
|
||||
* ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
|
||||
* flag will not get set. On architectures which do not support a separate error flag,
|
||||
* the macro is a no-op and the spurious error condition needs to be filtered out by some
|
||||
* other means (e.g., in user-level, by passing an extra argument to the syscall handler,
|
||||
* or something along those lines).
|
||||
*
|
||||
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
|
||||
*/
|
||||
# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
|
||||
|
||||
struct task_struct; /* forward decl */
|
||||
struct unw_frame_info; /* forward decl */
|
||||
|
||||
extern void ia64_do_show_stack (struct unw_frame_info *, void *);
|
||||
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
|
||||
unsigned long *);
|
||||
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
|
||||
unsigned long, long *);
|
||||
extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
|
||||
unsigned long, long);
|
||||
extern void ia64_flush_fph (struct task_struct *);
|
||||
extern void ia64_sync_fph (struct task_struct *);
|
||||
extern void ia64_sync_krbs(void);
|
||||
extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
|
||||
unsigned long, unsigned long);
|
||||
|
||||
/* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
|
||||
extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
|
||||
/* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
|
||||
extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
|
||||
|
||||
extern void ia64_increment_ip (struct pt_regs *pt);
|
||||
extern void ia64_decrement_ip (struct pt_regs *pt);
|
||||
|
||||
extern void ia64_ptrace_stop(void);
|
||||
#define arch_ptrace_stop(code, info) \
|
||||
ia64_ptrace_stop()
|
||||
#define arch_ptrace_stop_needed(code, info) \
|
||||
(!test_thread_flag(TIF_RESTORE_RSE))
|
||||
|
||||
extern void ptrace_attach_sync_user_rbs (struct task_struct *);
|
||||
#define arch_ptrace_attach(child) \
|
||||
ptrace_attach_sync_user_rbs(child)
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
#define arch_has_block_step() (1)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_PTRACE_H */
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue