Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,23 @@
# User exported sparc header files
generic-y += clkdev.h
generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += mutex.h
generic-y += preempt.h
generic-y += scatterlist.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h

View file

@ -0,0 +1,16 @@
#ifndef AGP_H
#define AGP_H 1
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -0,0 +1,36 @@
/*
* apb.h: Advanced PCI Bridge Configuration Registers and Bits
*
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*/
#ifndef _SPARC64_APB_H
#define _SPARC64_APB_H
#define APB_TICK_REGISTER 0xb0
#define APB_INT_ACK 0xb8
#define APB_PRIMARY_MASTER_RETRY_LIMIT 0xc0
#define APB_DMA_ASFR 0xc8
#define APB_DMA_AFAR 0xd0
#define APB_PIO_TARGET_RETRY_LIMIT 0xd8
#define APB_PIO_TARGET_LATENCY_TIMER 0xd9
#define APB_DMA_TARGET_RETRY_LIMIT 0xda
#define APB_DMA_TARGET_LATENCY_TIMER 0xdb
#define APB_SECONDARY_MASTER_RETRY_LIMIT 0xdc
#define APB_SECONDARY_CONTROL 0xdd
#define APB_IO_ADDRESS_MAP 0xde
#define APB_MEM_ADDRESS_MAP 0xdf
#define APB_PCI_CONTROL_LOW 0xe0
# define APB_PCI_CTL_LOW_ARB_PARK (1 << 21)
# define APB_PCI_CTL_LOW_ERRINT_EN (1 << 8)
#define APB_PCI_CONTROL_HIGH 0xe4
# define APB_PCI_CTL_HIGH_SERR (1 << 2)
# define APB_PCI_CTL_HIGH_ARBITER_EN (1 << 0)
#define APB_PIO_ASFR 0xe8
#define APB_PIO_AFAR 0xf0
#define APB_DIAG_REGISTER 0xf8
#endif /* !(_SPARC64_APB_H) */

View file

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View file

@ -0,0 +1,40 @@
#ifndef _SPARC_ASM_H
#define _SPARC_ASM_H
/* Macros to assist the sharing of assembler code between 32-bit and
* 64-bit sparc.
*/
#ifdef CONFIG_SPARC64
#define BRANCH32(TYPE, PREDICT, DEST) \
TYPE,PREDICT %icc, DEST
#define BRANCH32_ANNUL(TYPE, PREDICT, DEST) \
TYPE,a,PREDICT %icc, DEST
#define BRANCH_REG_ZERO(PREDICT, REG, DEST) \
brz,PREDICT REG, DEST
#define BRANCH_REG_ZERO_ANNUL(PREDICT, REG, DEST) \
brz,a,PREDICT REG, DEST
#define BRANCH_REG_NOT_ZERO(PREDICT, REG, DEST) \
brnz,PREDICT REG, DEST
#define BRANCH_REG_NOT_ZERO_ANNUL(PREDICT, REG, DEST) \
brnz,a,PREDICT REG, DEST
#else
#define BRANCH32(TYPE, PREDICT, DEST) \
TYPE DEST
#define BRANCH32_ANNUL(TYPE, PREDICT, DEST) \
TYPE,a DEST
#define BRANCH_REG_ZERO(PREDICT, REG, DEST) \
cmp REG, 0; \
be DEST
#define BRANCH_REG_ZERO_ANNUL(PREDICT, REG, DEST) \
cmp REG, 0; \
be,a DEST
#define BRANCH_REG_NOT_ZERO(PREDICT, REG, DEST) \
cmp REG, 0; \
bne DEST
#define BRANCH_REG_NOT_ZERO_ANNUL(PREDICT, REG, DEST) \
cmp REG, 0; \
bne,a DEST
#endif
#endif /* _SPARC_ASM_H */

View file

@ -0,0 +1,45 @@
/* asmmacro.h: Assembler macros.
*
* Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
*/
#ifndef _SPARC_ASMMACRO_H
#define _SPARC_ASMMACRO_H
/* All trap entry points _must_ begin with this macro or else you
* lose. It makes sure the kernel has a proper window so that
* c-code can be called.
*/
#define SAVE_ALL_HEAD \
sethi %hi(trap_setup), %l4; \
jmpl %l4 + %lo(trap_setup), %l6;
#define SAVE_ALL \
SAVE_ALL_HEAD \
nop;
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b ret_trap_entry; clr %l6;
/* Support for run-time patching of single instructions.
* This is used to handle the differences in the ASI for
* MMUREGS for LEON and SUN.
*
* Sample:
* LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
* SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
* PI == Patch Instruction
*
* For LEON we will use the first variant,
* and for all other we will use the SUN variant.
* The order is important.
*/
#define LEON_PI(...) \
662: __VA_ARGS__
#define SUN_PI_(...) \
.section .leon_1insn_patch, "ax"; \
.word 662b; \
__VA_ARGS__; \
.previous
#endif /* !(_SPARC_ASMMACRO_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_ATOMIC_H
#define ___ASM_SPARC_ATOMIC_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/atomic_64.h>
#else
#include <asm/atomic_32.h>
#endif
#endif

View file

@ -0,0 +1,55 @@
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
#define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#endif /* !(__ARCH_SPARC_ATOMIC__) */

View file

@ -0,0 +1,113 @@
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
* Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
#define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
#define ATOMIC_OP(op) \
void atomic_##op(int, atomic_t *); \
void atomic64_##op(long, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \
int atomic_##op##_return(int, atomic_t *); \
long atomic64_##op##_return(long, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_dec_return(v) atomic_sub_return(1, v)
#define atomic64_dec_return(v) atomic64_sub_return(1, v)
#define atomic_inc_return(v) atomic_add_return(1, v)
#define atomic64_inc_return(v) atomic64_add_return(1, v)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c;
}
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
long atomic64_dec_if_positive(atomic64_t *v);
#endif /* !(__ARCH_SPARC64_ATOMIC__) */

View file

@ -0,0 +1,15 @@
#ifndef ___ASM_SPARC_AUXIO_H
#define ___ASM_SPARC_AUXIO_H
#ifndef __ASSEMBLY__
extern void __iomem *auxio_register;
#endif /* ifndef __ASSEMBLY__ */
#if defined(__sparc__) && defined(__arch64__)
#include <asm/auxio_64.h>
#else
#include <asm/auxio_32.h>
#endif
#endif

View file

@ -0,0 +1,88 @@
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
void set_auxio(unsigned char bits_on, unsigned char bits_off);
unsigned char get_auxio(void); /* .../asm/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern volatile u8 __iomem *auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */

View file

@ -0,0 +1,98 @@
/*
* auxio.h: Definitions and code for the Auxiliary I/O registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#ifndef _SPARC64_AUXIO_H
#define _SPARC64_AUXIO_H
/* AUXIO implementations:
* sbus-based NCR89C105 "Slavio"
* LED/Floppy (AUX1) register
* Power (AUX2) register
*
* ebus-based auxio on PCIO
* LED Auxio Register
* Power Auxio Register
*
* Register definitions from NCR _NCR89C105 Chip Specification_
*
* SLAVIO AUX1 @ 0x1900000
* -------------------------------------------------
* | (R) | (R) | D | (R) | E | M | T | L |
* -------------------------------------------------
* (R) - bit 7:6,4 are reserved and should be masked in s/w
* D - Floppy Density Sense (1=high density) R/O
* E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
* M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
* T - Terminal Count: sends TC pulse to 82077 floppy controller
* L - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
#define AUXIO_AUX1_LED 0x01 /* System LED */
/* SLAVIO AUX2 @ 0x1910000
* -------------------------------------------------
* | (R) | (R) | D | (R) | (R) | (R) | C | F |
* -------------------------------------------------
* (R) - bits 7:6,4:2 are reserved and should be masked in s/w
* D - Power Failure Detect (1=power fail)
* C - Clear Power Failure Detect Int (1=clear)
* F - Power Off (1=power off)
*/
#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
*
* PCIO LED Auxio @ 0x726000
* -------------------------------------------------
* | 31:1 Unused | LED |
* -------------------------------------------------
* Bits 31:1 unused
* LED - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_PCIO_LED 0x01 /* System LED */
/* PCIO Power Auxio @ 0x724000
* -------------------------------------------------
* | 31:2 Unused | CPO | SPO |
* -------------------------------------------------
* Bits 31:2 unused
* CPO - Courtesy Power Off (1=off)
* SPO - System Power Off (1=off)
*/
#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
#ifndef __ASSEMBLY__
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
void auxio_set_lte(int on);
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
void auxio_set_led(int on);
#endif /* ifndef __ASSEMBLY__ */
#endif /* !(_SPARC64_AUXIO_H) */

View file

@ -0,0 +1,85 @@
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
/* The macros in this file implement an exponential backoff facility
* for atomic operations.
*
* When multiple threads compete on an atomic operation, it is
* possible for one thread to be continually denied a successful
* completion of the compare-and-swap instruction. Heavily
* threaded cpu implementations like Niagara can compound this
* problem even further.
*
* When an atomic operation fails and needs to be retried, we spin a
* certain number of times. At each subsequent failure of the same
* operation we double the spin count, realizing an exponential
* backoff.
*
* When we spin, we try to use an operation that will cause the
* current cpu strand to block, and therefore make the core fully
* available to any other other runnable strands. There are two
* options, based upon cpu capabilities.
*
* On all cpus prior to SPARC-T4 we do three dummy reads of the
* condition code register. Each read blocks the strand for something
* between 40 and 50 cpu cycles.
*
* For SPARC-T4 and later we have a special "pause" instruction
* available. This is implemented using writes to register %asr27.
* The cpu will block the number of cycles written into the register,
* unless a disrupting trap happens first. SPARC-T4 specifically
* implements pause with a granularity of 8 cycles. Each strand has
* an internal pause counter which decrements every 8 cycles. So the
* chip shifts the %asr27 value down by 3 bits, and writes the result
* into the pause counter. If a value smaller than 8 is written, the
* chip blocks for 1 cycle.
*
* To achieve the same amount of backoff as the three %ccr reads give
* on earlier chips, we shift the backoff value up by 7 bits. (Three
* %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
* whole amount we want to block into the pause register, rather than
* loop writing 128 each time.
*/
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
#define BACKOFF_SETUP(reg) \
mov 1, reg
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
#define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \
88: rd %ccr, %g0; \
rd %ccr, %g0; \
rd %ccr, %g0; \
.section .pause_3insn_patch,"ax";\
.word 88b; \
sllx tmp, 7, tmp; \
wr tmp, 0, %asr27; \
clr tmp; \
.previous; \
brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \
cmp reg, tmp; \
bg,pn %xcc, label; \
nop; \
ba,pt %xcc, label; \
sllx reg, 1, reg;
#else
#define BACKOFF_SETUP(reg)
#define BACKOFF_LABEL(spin_label, continue_label) \
continue_label
#define BACKOFF_SPIN(reg, tmp, label)
#endif
#endif /* _SPARC64_BACKOFF_H */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_BARRIER_H
#define ___ASM_SPARC_BARRIER_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/barrier_64.h>
#else
#include <asm/barrier_32.h>
#endif
#endif

View file

@ -0,0 +1,7 @@
#ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H
#include <asm/processor.h> /* for nop() */
#include <asm-generic/barrier.h>
#endif /* !(__SPARC_BARRIER_H) */

View file

@ -0,0 +1,74 @@
#ifndef __SPARC64_BARRIER_H
#define __SPARC64_BARRIER_H
/* These are here in an effort to more fully work around Spitfire Errata
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
* branch, the chip can stop executing instructions until a trap occurs.
* Therefore, if interrupts are disabled, the chip can hang forever.
*
* It used to be believed that the memory barrier had to be right in the
* delay slot, but a case has been traced recently wherein the memory barrier
* was one instruction after the branch delay slot and the chip still hung.
* The offending sequence was the following in sym_wakeup_done() of the
* sym53c8xx_2 driver:
*
* call sym_ccb_from_dsa, 0
* movge %icc, 0, %l0
* brz,pn %o0, .LL1303
* mov %o0, %l2
* membar #LoadLoad
*
* The branch has to be mispredicted for the bug to occur. Therefore, we put
* the memory barrier explicitly into a "branch always, predicted taken"
* delay slot to avoid the problem case.
*/
#define membar_safe(type) \
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
" membar " type "\n" \
"1:\n" \
: : : "memory"); \
} while (0)
/* The kernel always executes in TSO memory model these days,
* and furthermore most sparc64 chips implement more stringent
* memory ordering than required by the specifications.
*/
#define mb() membar_safe("#StoreLoad")
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
#endif
#define smp_read_barrier_depends() do { } while(0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
#endif /* !(__SPARC64_BARRIER_H) */

View file

@ -0,0 +1,225 @@
/*
* bbc.h: Defines for BootBus Controller found on UltraSPARC-III
* systems.
*
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
#ifndef _SPARC64_BBC_H
#define _SPARC64_BBC_H
/* Register sizes are indicated by "B" (Byte, 1-byte),
* "H" (Half-word, 2 bytes), "W" (Word, 4 bytes) or
* "Q" (Quad, 8 bytes) inside brackets.
*/
#define BBC_AID 0x00 /* [B] Agent ID */
#define BBC_DEVP 0x01 /* [B] Device Present */
#define BBC_ARB 0x02 /* [B] Arbitration */
#define BBC_QUIESCE 0x03 /* [B] Quiesce */
#define BBC_WDACTION 0x04 /* [B] Watchdog Action */
#define BBC_SPG 0x06 /* [B] Soft POR Gen */
#define BBC_SXG 0x07 /* [B] Soft XIR Gen */
#define BBC_PSRC 0x08 /* [W] POR Source */
#define BBC_XSRC 0x0c /* [B] XIR Source */
#define BBC_CSC 0x0d /* [B] Clock Synthesizers Control*/
#define BBC_ES_CTRL 0x0e /* [H] Energy Star Control */
#define BBC_ES_ACT 0x10 /* [W] E* Assert Change Time */
#define BBC_ES_DACT 0x14 /* [B] E* De-Assert Change Time */
#define BBC_ES_DABT 0x15 /* [B] E* De-Assert Bypass Time */
#define BBC_ES_ABT 0x16 /* [H] E* Assert Bypass Time */
#define BBC_ES_PST 0x18 /* [W] E* PLL Settle Time */
#define BBC_ES_FSL 0x1c /* [W] E* Frequency Switch Latency*/
#define BBC_EBUST 0x20 /* [Q] EBUS Timing */
#define BBC_JTAG_CMD 0x28 /* [W] JTAG+ Command */
#define BBC_JTAG_CTRL 0x2c /* [B] JTAG+ Control */
#define BBC_I2C_SEL 0x2d /* [B] I2C Selection */
#define BBC_I2C_0_S1 0x2e /* [B] I2C ctrlr-0 reg S1 */
#define BBC_I2C_0_S0 0x2f /* [B] I2C ctrlr-0 regs S0,S0',S2,S3*/
#define BBC_I2C_1_S1 0x30 /* [B] I2C ctrlr-1 reg S1 */
#define BBC_I2C_1_S0 0x31 /* [B] I2C ctrlr-1 regs S0,S0',S2,S3*/
#define BBC_KBD_BEEP 0x32 /* [B] Keyboard Beep */
#define BBC_KBD_BCNT 0x34 /* [W] Keyboard Beep Counter */
#define BBC_REGS_SIZE 0x40
/* There is a 2K scratch ram area at offset 0x80000 but I doubt
* we will use it for anything.
*/
/* Agent ID register. This register shows the Safari Agent ID
* for the processors. The value returned depends upon which
* cpu is reading the register.
*/
#define BBC_AID_ID 0x07 /* Safari ID */
#define BBC_AID_RESV 0xf8 /* Reserved */
/* Device Present register. One can determine which cpus are actually
* present in the machine by interrogating this register.
*/
#define BBC_DEVP_CPU0 0x01 /* Processor 0 present */
#define BBC_DEVP_CPU1 0x02 /* Processor 1 present */
#define BBC_DEVP_CPU2 0x04 /* Processor 2 present */
#define BBC_DEVP_CPU3 0x08 /* Processor 3 present */
#define BBC_DEVP_RESV 0xf0 /* Reserved */
/* Arbitration register. This register is used to block access to
* the BBC from a particular cpu.
*/
#define BBC_ARB_CPU0 0x01 /* Enable cpu 0 BBC arbitratrion */
#define BBC_ARB_CPU1 0x02 /* Enable cpu 1 BBC arbitratrion */
#define BBC_ARB_CPU2 0x04 /* Enable cpu 2 BBC arbitratrion */
#define BBC_ARB_CPU3 0x08 /* Enable cpu 3 BBC arbitratrion */
#define BBC_ARB_RESV 0xf0 /* Reserved */
/* Quiesce register. Bus and BBC segments for cpus can be disabled
* with this register, ie. for hot plugging.
*/
#define BBC_QUIESCE_S02 0x01 /* Quiesce Safari segment for cpu 0 and 2 */
#define BBC_QUIESCE_S13 0x02 /* Quiesce Safari segment for cpu 1 and 3 */
#define BBC_QUIESCE_B02 0x04 /* Quiesce BBC segment for cpu 0 and 2 */
#define BBC_QUIESCE_B13 0x08 /* Quiesce BBC segment for cpu 1 and 3 */
#define BBC_QUIESCE_FD0 0x10 /* Disable Fatal_Error[0] reporting */
#define BBC_QUIESCE_FD1 0x20 /* Disable Fatal_Error[1] reporting */
#define BBC_QUIESCE_FD2 0x40 /* Disable Fatal_Error[2] reporting */
#define BBC_QUIESCE_FD3 0x80 /* Disable Fatal_Error[3] reporting */
/* Watchdog Action register. When the watchdog device timer expires
* a line is enabled to the BBC. The action BBC takes when this line
* is asserted can be controlled by this regiser.
*/
#define BBC_WDACTION_RST 0x01 /* When set, watchdog causes system reset.
* When clear, BBC ignores watchdog signal.
*/
#define BBC_WDACTION_RESV 0xfe /* Reserved */
/* Soft_POR_GEN register. The POR (Power On Reset) signal may be asserted
* for specific processors or all processors via this register.
*/
#define BBC_SPG_CPU0 0x01 /* Assert POR for processor 0 */
#define BBC_SPG_CPU1 0x02 /* Assert POR for processor 1 */
#define BBC_SPG_CPU2 0x04 /* Assert POR for processor 2 */
#define BBC_SPG_CPU3 0x08 /* Assert POR for processor 3 */
#define BBC_SPG_CPUALL 0x10 /* Reset all processors and reset
* the entire system.
*/
#define BBC_SPG_RESV 0xe0 /* Reserved */
/* Soft_XIR_GEN register. The XIR (eXternally Initiated Reset) signal
* may be asserted to specific processors via this register.
*/
#define BBC_SXG_CPU0 0x01 /* Assert XIR for processor 0 */
#define BBC_SXG_CPU1 0x02 /* Assert XIR for processor 1 */
#define BBC_SXG_CPU2 0x04 /* Assert XIR for processor 2 */
#define BBC_SXG_CPU3 0x08 /* Assert XIR for processor 3 */
#define BBC_SXG_RESV 0xf0 /* Reserved */
/* POR Source register. One may identify the cause of the most recent
* reset by reading this register.
*/
#define BBC_PSRC_SPG0 0x0001 /* CPU 0 reset via BBC_SPG register */
#define BBC_PSRC_SPG1 0x0002 /* CPU 1 reset via BBC_SPG register */
#define BBC_PSRC_SPG2 0x0004 /* CPU 2 reset via BBC_SPG register */
#define BBC_PSRC_SPG3 0x0008 /* CPU 3 reset via BBC_SPG register */
#define BBC_PSRC_SPGSYS 0x0010 /* System reset via BBC_SPG register */
#define BBC_PSRC_JTAG 0x0020 /* System reset via JTAG+ */
#define BBC_PSRC_BUTTON 0x0040 /* System reset via push-button dongle */
#define BBC_PSRC_PWRUP 0x0080 /* System reset via power-up */
#define BBC_PSRC_FE0 0x0100 /* CPU 0 reported Fatal_Error */
#define BBC_PSRC_FE1 0x0200 /* CPU 1 reported Fatal_Error */
#define BBC_PSRC_FE2 0x0400 /* CPU 2 reported Fatal_Error */
#define BBC_PSRC_FE3 0x0800 /* CPU 3 reported Fatal_Error */
#define BBC_PSRC_FE4 0x1000 /* Schizo reported Fatal_Error */
#define BBC_PSRC_FE5 0x2000 /* Safari device 5 reported Fatal_Error */
#define BBC_PSRC_FE6 0x4000 /* CPMS reported Fatal_Error */
#define BBC_PSRC_SYNTH 0x8000 /* System reset when on-board clock synthesizers
* were updated.
*/
#define BBC_PSRC_WDT 0x10000 /* System reset via Super I/O watchdog */
#define BBC_PSRC_RSC 0x20000 /* System reset via RSC remote monitoring
* device
*/
/* XIR Source register. The source of an XIR event sent to a processor may
* be determined via this register.
*/
#define BBC_XSRC_SXG0 0x01 /* CPU 0 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG1 0x02 /* CPU 1 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG2 0x04 /* CPU 2 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG3 0x08 /* CPU 3 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_JTAG 0x10 /* All CPUs received XIR via JTAG+ */
#define BBC_XSRC_W_OR_B 0x20 /* All CPUs received XIR either because:
* a) Super I/O watchdog fired, or
* b) XIR push button was activated
*/
#define BBC_XSRC_RESV 0xc0 /* Reserved */
/* Clock Synthesizers Control register. This register provides the big-bang
* programming interface to the two clock synthesizers of the machine.
*/
#define BBC_CSC_SLOAD 0x01 /* Directly connected to S_LOAD pins */
#define BBC_CSC_SDATA 0x02 /* Directly connected to S_DATA pins */
#define BBC_CSC_SCLOCK 0x04 /* Directly connected to S_CLOCK pins */
#define BBC_CSC_RESV 0x78 /* Reserved */
#define BBC_CSC_RST 0x80 /* Generate system reset when S_LOAD==1 */
/* Energy Star Control register. This register is used to generate the
* clock frequency change trigger to the main system devices (Schizo and
* the processors). The transition occurs when bits in this register
* go from 0 to 1, only one bit must be set at once else no action
* occurs. Basically the sequence of events is:
* a) Choose new frequency: full, 1/2 or 1/32
* b) Program this desired frequency into the cpus and Schizo.
* c) Set the same value in this register.
* d) 16 system clocks later, clear this register.
*/
#define BBC_ES_CTRL_1_1 0x01 /* Full frequency */
#define BBC_ES_CTRL_1_2 0x02 /* 1/2 frequency */
#define BBC_ES_CTRL_1_32 0x20 /* 1/32 frequency */
#define BBC_ES_RESV 0xdc /* Reserved */
/* Energy Star Assert Change Time register. This determines the number
* of BBC clock cycles (which is half the system frequency) between
* the detection of FREEZE_ACK being asserted and the assertion of
* the CLK_CHANGE_L[2:0] signals.
*/
#define BBC_ES_ACT_VAL 0xff
/* Energy Star Assert Bypass Time register. This determines the number
* of BBC clock cycles (which is half the system frequency) between
* the assertion of the CLK_CHANGE_L[2:0] signals and the assertion of
* the ESTAR_PLL_BYPASS signal.
*/
#define BBC_ES_ABT_VAL 0xffff
/* Energy Star PLL Settle Time register. This determines the number of
* BBC clock cycles (which is half the system frequency) between the
* de-assertion of CLK_CHANGE_L[2:0] and the de-assertion of the FREEZE_L
* signal.
*/
#define BBC_ES_PST_VAL 0xffffffff
/* Energy Star Frequency Switch Latency register. This is the number of
* BBC clocks between the de-assertion of CLK_CHANGE_L[2:0] and the first
* edge of the Safari clock at the new frequency.
*/
#define BBC_ES_FSL_VAL 0xffffffff
/* Keyboard Beep control register. This is a simple enabler for the audio
* beep sound.
*/
#define BBC_KBD_BEEP_ENABLE 0x01 /* Enable beep */
#define BBC_KBD_BEEP_RESV 0xfe /* Reserved */
/* Keyboard Beep Counter register. There is a free-running counter inside
* the BBC which runs at half the system clock. The bit set in this register
* determines when the audio sound is generated. So for example if bit
* 10 is set, the audio beep will oscillate at 1/(2**12). The keyboard beep
* generator automatically selects a different bit to use if the system clock
* is changed via Energy Star.
*/
#define BBC_KBD_BCNT_BITS 0x0007fc00
#define BBC_KBC_BCNT_RESV 0xfff803ff
#endif /* _SPARC64_BBC_H */

View file

@ -0,0 +1,27 @@
/*
* bitext.h: Bit string operations on the sparc, specific to architecture.
*
* Copyright 2002 Pete Zaitcev <zaitcev@yahoo.com>
*/
#ifndef _SPARC_BITEXT_H
#define _SPARC_BITEXT_H
#include <linux/spinlock.h>
struct bit_map {
spinlock_t lock;
unsigned long *map;
int size;
int used;
int last_off;
int last_size;
int first_free;
int num_colors;
};
int bit_map_string_get(struct bit_map *t, int len, int align);
void bit_map_clear(struct bit_map *t, int offset, int len);
void bit_map_init(struct bit_map *t, unsigned long *map, int size);
#endif /* defined(_SPARC_BITEXT_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_BITOPS_H
#define ___ASM_SPARC_BITOPS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/bitops_64.h>
#else
#include <asm/bitops_32.h>
#endif
#endif

View file

@ -0,0 +1,108 @@
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */

View file

@ -0,0 +1,63 @@
/*
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
void set_bit(unsigned long nr, volatile unsigned long *addr);
void clear_bit(unsigned long nr, volatile unsigned long *addr);
void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
int ffs(int x);
unsigned long __ffs(unsigned long);
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/sched.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
unsigned long __arch_hweight64(__u64 w);
unsigned int __arch_hweight32(unsigned int w);
unsigned int __arch_hweight16(unsigned int w);
unsigned int __arch_hweight8(unsigned int w);
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC64_BITOPS_H) */

View file

@ -0,0 +1,6 @@
#ifndef _SPARC_BTEXT_H
#define _SPARC_BTEXT_H
int btext_find_display(void);
#endif /* _SPARC_BTEXT_H */

View file

@ -0,0 +1,25 @@
#ifndef _SPARC_BUG_H
#define _SPARC_BUG_H
#ifdef CONFIG_BUG
#include <linux/compiler.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
void do_BUG(const char *file, int line);
#define BUG() do { \
do_BUG(__FILE__, __LINE__); \
__builtin_trap(); \
} while (0)
#else
#define BUG() __builtin_trap()
#endif
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
struct pt_regs;
void __noreturn die_if_kernel(char *str, struct pt_regs *regs);
#endif

View file

@ -0,0 +1,17 @@
/* include/asm/bugs.h: Sparc probes for various bugs.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef CONFIG_SPARC32
#include <asm/cpudata.h>
#endif
extern unsigned long loops_per_jiffy;
static void __init check_bugs(void)
{
#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
cpu_data(0).udelay_val = loops_per_jiffy;
#endif
}

View file

@ -0,0 +1,25 @@
/* cache.h: Cache specific code for the Sparc. These include flushing
* and direct tag/data line access.
*
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC_CACHE_H
#define _SPARC_CACHE_H
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32
#ifdef CONFIG_SPARC32
#define SMP_CACHE_BYTES_SHIFT 5
#else
#define SMP_CACHE_BYTES_SHIFT 6
#endif
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* !(_SPARC_CACHE_H) */

View file

@ -0,0 +1,12 @@
#ifndef ___ASM_SPARC_CACHEFLUSH_H
#define ___ASM_SPARC_CACHEFLUSH_H
/* flush addr - to allow use of self-modifying code */
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cacheflush_64.h>
#else
#include <asm/cacheflush_32.h>
#endif
#endif

View file

@ -0,0 +1,58 @@
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <asm/cachetlb_32.h>
#define flush_cache_all() \
sparc32_cachetlb_ops->cache_all()
#define flush_cache_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_dup_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_range(vma,start,end) \
sparc32_cachetlb_ops->cache_range(vma, start, end)
#define flush_cache_page(vma,addr,pfn) \
sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define __flush_page_to_ram(addr) \
sparc32_cachetlb_ops->page_to_ram(addr)
#define flush_sig_insns(mm,insn_addr) \
sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
void sparc_flush_page_to_ram(struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
/* When a context switch happens we must flush all user windows so that
* the windows of the current process are flushed onto its stack. This
* way the windows are all clean for the next process and the stack
* frames are up to date.
*/
void flush_user_windows(void);
void kill_user_windows(void);
void flushw_all(void);
#endif /* _SPARC_CACHEFLUSH_H */

View file

@ -0,0 +1,84 @@
#ifndef _SPARC64_CACHEFLUSH_H
#define _SPARC64_CACHEFLUSH_H
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <linux/mm.h>
/* Cache flush operations. */
#define flushw_all() __asm__ __volatile__("flushw")
void __flushw_user(void);
#define flushw_user() __flushw_user()
#define flush_user_windows flushw_user
#define flush_register_windows flushw_all
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page, pfn) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
void flush_icache_range(unsigned long start, unsigned long end);
void __flush_icache_page(unsigned long);
void __flush_dcache_page(void *addr, int flush_icache);
void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
void smp_flush_dcache_page_impl(struct page *page, int cpu);
void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
void __flush_dcache_range(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
unsigned long len, int write);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_CACHEFLUSH_H */

View file

@ -0,0 +1,29 @@
#ifndef _SPARC_CACHETLB_H
#define _SPARC_CACHETLB_H
struct mm_struct;
struct vm_area_struct;
struct sparc32_cachetlb_ops {
void (*cache_all)(void);
void (*cache_mm)(struct mm_struct *);
void (*cache_range)(struct vm_area_struct *, unsigned long,
unsigned long);
void (*cache_page)(struct vm_area_struct *, unsigned long);
void (*tlb_all)(void);
void (*tlb_mm)(struct mm_struct *);
void (*tlb_range)(struct vm_area_struct *, unsigned long,
unsigned long);
void (*tlb_page)(struct vm_area_struct *, unsigned long);
void (*page_to_ram)(unsigned long);
void (*sig_insns)(struct mm_struct *, unsigned long);
void (*page_for_dma)(unsigned long);
};
extern const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
#ifdef CONFIG_SMP
extern const struct sparc32_cachetlb_ops *local_ops;
#endif
#endif /* SPARC_CACHETLB_H */

View file

@ -0,0 +1,241 @@
#ifndef _SPARC64_CHAFSR_H
#define _SPARC64_CHAFSR_H
/* Cheetah Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
/* Comments indicate which processor variants on which the bit definition
* is valid. Codes are:
* ch --> cheetah
* ch+ --> cheetah plus
* jp --> jalapeno
*/
/* All bits of this register except M_SYNDROME and E_SYNDROME are
* read, write 1 to clear. M_SYNDROME and E_SYNDROME are read-only.
*/
/* Software bit set by linux trap handlers to indicate that the trap was
* signalled at %tl >= 1.
*/
#define CHAFSR_TL1 (1UL << 63UL) /* n/a */
/* Unmapped error from system bus for prefetch queue or
* store queue read operation
*/
#define CHPAFSR_DTO (1UL << 59UL) /* ch+ */
/* Bus error from system bus for prefetch queue or store queue
* read operation
*/
#define CHPAFSR_DBERR (1UL << 58UL) /* ch+ */
/* Hardware corrected E-cache Tag ECC error */
#define CHPAFSR_THCE (1UL << 57UL) /* ch+ */
/* System interface protocol error, hw timeout caused */
#define JPAFSR_JETO (1UL << 57UL) /* jp */
/* SW handled correctable E-cache Tag ECC error */
#define CHPAFSR_TSCE (1UL << 56UL) /* ch+ */
/* Parity error on system snoop results */
#define JPAFSR_SCE (1UL << 56UL) /* jp */
/* Uncorrectable E-cache Tag ECC error */
#define CHPAFSR_TUE (1UL << 55UL) /* ch+ */
/* System interface protocol error, illegal command detected */
#define JPAFSR_JEIC (1UL << 55UL) /* jp */
/* Uncorrectable system bus data ECC error due to prefetch
* or store fill request
*/
#define CHPAFSR_DUE (1UL << 54UL) /* ch+ */
/* System interface protocol error, illegal ADTYPE detected */
#define JPAFSR_JEIT (1UL << 54UL) /* jp */
/* Multiple errors of the same type have occurred. This bit is set when
* an uncorrectable error or a SW correctable error occurs and the status
* bit to report that error is already set. When multiple errors of
* different types are indicated by setting multiple status bits.
*
* This bit is not set if multiple HW corrected errors with the same
* status bit occur, only uncorrectable and SW correctable ones have
* this behavior.
*
* This bit is not set when multiple ECC errors happen within a single
* 64-byte system bus transaction. Only the first ECC error in a 16-byte
* subunit will be logged. All errors in subsequent 16-byte subunits
* from the same 64-byte transaction are ignored.
*/
#define CHAFSR_ME (1UL << 53UL) /* ch,ch+,jp */
/* Privileged state error has occurred. This is a capture of PSTATE.PRIV
* at the time the error is detected.
*/
#define CHAFSR_PRIV (1UL << 52UL) /* ch,ch+,jp */
/* The following bits 51 (CHAFSR_PERR) to 33 (CHAFSR_CE) are sticky error
* bits and record the most recently detected errors. Bits accumulate
* errors that have been detected since the last write to clear the bit.
*/
/* System interface protocol error. The processor asserts its' ERROR
* pin when this event occurs and it also logs a specific cause code
* into a JTAG scannable flop.
*/
#define CHAFSR_PERR (1UL << 51UL) /* ch,ch+,jp */
/* Internal processor error. The processor asserts its' ERROR
* pin when this event occurs and it also logs a specific cause code
* into a JTAG scannable flop.
*/
#define CHAFSR_IERR (1UL << 50UL) /* ch,ch+,jp */
/* System request parity error on incoming address */
#define CHAFSR_ISAP (1UL << 49UL) /* ch,ch+,jp */
/* HW Corrected system bus MTAG ECC error */
#define CHAFSR_EMC (1UL << 48UL) /* ch,ch+ */
/* Parity error on L2 cache tag SRAM */
#define JPAFSR_ETP (1UL << 48UL) /* jp */
/* Uncorrectable system bus MTAG ECC error */
#define CHAFSR_EMU (1UL << 47UL) /* ch,ch+ */
/* Out of range memory error has occurred */
#define JPAFSR_OM (1UL << 47UL) /* jp */
/* HW Corrected system bus data ECC error for read of interrupt vector */
#define CHAFSR_IVC (1UL << 46UL) /* ch,ch+ */
/* Error due to unsupported store */
#define JPAFSR_UMS (1UL << 46UL) /* jp */
/* Uncorrectable system bus data ECC error for read of interrupt vector */
#define CHAFSR_IVU (1UL << 45UL) /* ch,ch+,jp */
/* Unmapped error from system bus */
#define CHAFSR_TO (1UL << 44UL) /* ch,ch+,jp */
/* Bus error response from system bus */
#define CHAFSR_BERR (1UL << 43UL) /* ch,ch+,jp */
/* SW Correctable E-cache ECC error for instruction fetch or data access
* other than block load.
*/
#define CHAFSR_UCC (1UL << 42UL) /* ch,ch+,jp */
/* Uncorrectable E-cache ECC error for instruction fetch or data access
* other than block load.
*/
#define CHAFSR_UCU (1UL << 41UL) /* ch,ch+,jp */
/* Copyout HW Corrected ECC error */
#define CHAFSR_CPC (1UL << 40UL) /* ch,ch+,jp */
/* Copyout Uncorrectable ECC error */
#define CHAFSR_CPU (1UL << 39UL) /* ch,ch+,jp */
/* HW Corrected ECC error from E-cache for writeback */
#define CHAFSR_WDC (1UL << 38UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from E-cache for writeback */
#define CHAFSR_WDU (1UL << 37UL) /* ch,ch+,jp */
/* HW Corrected ECC error from E-cache for store merge or block load */
#define CHAFSR_EDC (1UL << 36UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from E-cache for store merge or block load */
#define CHAFSR_EDU (1UL << 35UL) /* ch,ch+,jp */
/* Uncorrectable system bus data ECC error for read of memory or I/O */
#define CHAFSR_UE (1UL << 34UL) /* ch,ch+,jp */
/* HW Corrected system bus data ECC error for read of memory or I/O */
#define CHAFSR_CE (1UL << 33UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from remote cache/memory */
#define JPAFSR_RUE (1UL << 32UL) /* jp */
/* Correctable ECC error from remote cache/memory */
#define JPAFSR_RCE (1UL << 31UL) /* jp */
/* JBUS parity error on returned read data */
#define JPAFSR_BP (1UL << 30UL) /* jp */
/* JBUS parity error on data for writeback or block store */
#define JPAFSR_WBP (1UL << 29UL) /* jp */
/* Foreign read to DRAM incurring correctable ECC error */
#define JPAFSR_FRC (1UL << 28UL) /* jp */
/* Foreign read to DRAM incurring uncorrectable ECC error */
#define JPAFSR_FRU (1UL << 27UL) /* jp */
#define CHAFSR_ERRORS (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
#define CHPAFSR_ERRORS (CHPAFSR_DTO | CHPAFSR_DBERR | CHPAFSR_THCE | \
CHPAFSR_TSCE | CHPAFSR_TUE | CHPAFSR_DUE | \
CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
#define JPAFSR_ERRORS (JPAFSR_JETO | JPAFSR_SCE | JPAFSR_JEIC | \
JPAFSR_JEIT | CHAFSR_PERR | CHAFSR_IERR | \
CHAFSR_ISAP | JPAFSR_ETP | JPAFSR_OM | \
JPAFSR_UMS | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | \
CHAFSR_CPC | CHAFSR_CPU | CHAFSR_WDC | \
CHAFSR_WDU | CHAFSR_EDC | CHAFSR_EDU | \
CHAFSR_UE | CHAFSR_CE | JPAFSR_RUE | \
JPAFSR_RCE | JPAFSR_BP | JPAFSR_WBP | \
JPAFSR_FRC | JPAFSR_FRU)
/* Active JBUS request signal when error occurred */
#define JPAFSR_JBREQ (0x7UL << 24UL) /* jp */
#define JPAFSR_JBREQ_SHIFT 24UL
/* L2 cache way information */
#define JPAFSR_ETW (0x3UL << 22UL) /* jp */
#define JPAFSR_ETW_SHIFT 22UL
/* System bus MTAG ECC syndrome. This field captures the status of the
* first occurrence of the highest-priority error according to the M_SYND
* overwrite policy. After the AFSR sticky bit, corresponding to the error
* for which the M_SYND is reported, is cleared, the contents of the M_SYND
* field will be unchanged by will be unfrozen for further error capture.
*/
#define CHAFSR_M_SYNDROME (0xfUL << 16UL) /* ch,ch+,jp */
#define CHAFSR_M_SYNDROME_SHIFT 16UL
/* Agenid Id of the foreign device causing the UE/CE errors */
#define JPAFSR_AID (0x1fUL << 9UL) /* jp */
#define JPAFSR_AID_SHIFT 9UL
/* System bus or E-cache data ECC syndrome. This field captures the status
* of the first occurrence of the highest-priority error according to the
* E_SYND overwrite policy. After the AFSR sticky bit, corresponding to the
* error for which the E_SYND is reported, is cleare, the contents of the E_SYND
* field will be unchanged but will be unfrozen for further error capture.
*/
#define CHAFSR_E_SYNDROME (0x1ffUL << 0UL) /* ch,ch+,jp */
#define CHAFSR_E_SYNDROME_SHIFT 0UL
/* The AFSR must be explicitly cleared by software, it is not cleared automatically
* by a read. Writes to bits <51:33> with bits set will clear the corresponding
* bits in the AFSR. Bits associated with disrupting traps must be cleared before
* interrupts are re-enabled to prevent multiple traps for the same error. I.e.
* PSTATE.IE and AFSR bits control delivery of disrupting traps.
*
* Since there is only one AFAR, when multiple events have been logged by the
* bits in the AFSR, at most one of these events will have its status captured
* in the AFAR. The highest priority of those event bits will get AFAR logging.
* The AFAR will be unlocked and available to capture the address of another event
* as soon as the one bit in AFSR that corresponds to the event logged in AFAR is
* cleared. For example, if AFSR.CE is detected, then AFSR.UE (which overwrites
* the AFAR), and AFSR.UE is cleared by not AFSR.CE, then the AFAR will be unlocked
* and ready for another event, even though AFSR.CE is still set. The same rules
* also apply to the M_SYNDROME and E_SYNDROME fields of the AFSR.
*/
#endif /* _SPARC64_CHAFSR_H */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/checksum_64.h>
#else
#include <asm/checksum_32.h>
#endif
#endif

View file

@ -0,0 +1,253 @@
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
{
__asm__ __volatile__(
"addcc %0, %1, %0\n"
"addx %0, %%g0, %0"
: "=r" (csum)
: "r" (addend), "0" (csum));
return csum;
}
#endif /* !(__SPARC_CHECKSUM_H) */

View file

@ -0,0 +1,179 @@
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the V9.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum);
static inline __wsum
csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_from_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
long __csum_partial_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_to_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__(
" addcc %0, %1, %1\n"
" srl %1, 16, %1\n"
" addc %1, %%g0, %1\n"
" xnor %%g0, %1, %0\n"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned int len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
" addccc %2, %0, %0\n"
" addccc %3, %0, %0\n"
" addc %0, %%g0, %0\n"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
" addcc %3, %4, %%g7\n"
" addccc %5, %%g7, %%g7\n"
" lduw [%2 + 0x0c], %%g2\n"
" lduw [%2 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%2 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%2 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x0c], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" addccc %%g3, %%g7, %0\n"
" addc 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr), "r"(htonl(len)),
"r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g7", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
{
__asm__ __volatile__(
"addcc %0, %1, %0\n"
"addx %0, %%g0, %0"
: "=r" (csum)
: "r" (addend), "0" (csum));
return csum;
}
#endif /* !(__SPARC64_CHECKSUM_H) */

View file

@ -0,0 +1,183 @@
#ifndef _SPARC64_CHMCTRL_H
#define _SPARC64_CHMCTRL_H
/* Cheetah memory controller programmable registers. */
#define CHMCTRL_TCTRL1 0x00 /* Memory Timing Control I */
#define CHMCTRL_TCTRL2 0x08 /* Memory Timing Control II */
#define CHMCTRL_TCTRL3 0x38 /* Memory Timing Control III */
#define CHMCTRL_TCTRL4 0x40 /* Memory Timing Control IV */
#define CHMCTRL_DECODE1 0x10 /* Memory Address Decode I */
#define CHMCTRL_DECODE2 0x18 /* Memory Address Decode II */
#define CHMCTRL_DECODE3 0x20 /* Memory Address Decode III */
#define CHMCTRL_DECODE4 0x28 /* Memory Address Decode IV */
#define CHMCTRL_MACTRL 0x30 /* Memory Address Control */
/* Memory Timing Control I */
#define TCTRL1_SDRAMCTL_DLY 0xf000000000000000UL
#define TCTRL1_SDRAMCTL_DLY_SHIFT 60
#define TCTRL1_SDRAMCLK_DLY 0x0e00000000000000UL
#define TCTRL1_SDRAMCLK_DLY_SHIFT 57
#define TCTRL1_R 0x0100000000000000UL
#define TCTRL1_R_SHIFT 56
#define TCTRL1_AUTORFR_CYCLE 0x00fe000000000000UL
#define TCTRL1_AUTORFR_CYCLE_SHIFT 49
#define TCTRL1_RD_WAIT 0x0001f00000000000UL
#define TCTRL1_RD_WAIT_SHIFT 44
#define TCTRL1_PC_CYCLE 0x00000fc000000000UL
#define TCTRL1_PC_CYCLE_SHIFT 38
#define TCTRL1_WR_MORE_RAS_PW 0x0000003f00000000UL
#define TCTRL1_WR_MORE_RAS_PW_SHIFT 32
#define TCTRL1_RD_MORE_RAW_PW 0x00000000fc000000UL
#define TCTRL1_RD_MORE_RAS_PW_SHIFT 26
#define TCTRL1_ACT_WR_DLY 0x0000000003f00000UL
#define TCTRL1_ACT_WR_DLY_SHIFT 20
#define TCTRL1_ACT_RD_DLY 0x00000000000fc000UL
#define TCTRL1_ACT_RD_DLY_SHIFT 14
#define TCTRL1_BANK_PRESENT 0x0000000000003000UL
#define TCTRL1_BANK_PRESENT_SHIFT 12
#define TCTRL1_RFR_INT 0x0000000000000ff8UL
#define TCTRL1_RFR_INT_SHIFT 3
#define TCTRL1_SET_MODE_REG 0x0000000000000004UL
#define TCTRL1_SET_MODE_REG_SHIFT 2
#define TCTRL1_RFR_ENABLE 0x0000000000000002UL
#define TCTRL1_RFR_ENABLE_SHIFT 1
#define TCTRL1_PRECHG_ALL 0x0000000000000001UL
#define TCTRL1_PRECHG_ALL_SHIFT 0
/* Memory Timing Control II */
#define TCTRL2_WR_MSEL_DLY 0xfc00000000000000UL
#define TCTRL2_WR_MSEL_DLY_SHIFT 58
#define TCTRL2_RD_MSEL_DLY 0x03f0000000000000UL
#define TCTRL2_RD_MSEL_DLY_SHIFT 52
#define TCTRL2_WRDATA_THLD 0x000c000000000000UL
#define TCTRL2_WRDATA_THLD_SHIFT 50
#define TCTRL2_RDWR_RD_TI_DLY 0x0003f00000000000UL
#define TCTRL2_RDWR_RD_TI_DLY_SHIFT 44
#define TCTRL2_AUTOPRECHG_ENBL 0x0000080000000000UL
#define TCTRL2_AUTOPRECHG_ENBL_SHIFT 43
#define TCTRL2_RDWR_PI_MORE_DLY 0x000007c000000000UL
#define TCTRL2_RDWR_PI_MORE_DLY_SHIFT 38
#define TCTRL2_RDWR_1_DLY 0x0000003f00000000UL
#define TCTRL2_RDWR_1_DLY_SHIFT 32
#define TCTRL2_WRWR_PI_MORE_DLY 0x00000000f8000000UL
#define TCTRL2_WRWR_PI_MORE_DLY_SHIFT 27
#define TCTRL2_WRWR_1_DLY 0x0000000007e00000UL
#define TCTRL2_WRWR_1_DLY_SHIFT 21
#define TCTRL2_RDWR_RD_PI_MORE_DLY 0x00000000001f0000UL
#define TCTRL2_RDWR_RD_PI_MORE_DLY_SHIFT 16
#define TCTRL2_R 0x0000000000008000UL
#define TCTRL2_R_SHIFT 15
#define TCTRL2_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
#define TCTRL2_SDRAM_MODE_REG_DATA_SHIFT 0
/* Memory Timing Control III */
#define TCTRL3_SDRAM_CTL_DLY 0xf000000000000000UL
#define TCTRL3_SDRAM_CTL_DLY_SHIFT 60
#define TCTRL3_SDRAM_CLK_DLY 0x0e00000000000000UL
#define TCTRL3_SDRAM_CLK_DLY_SHIFT 57
#define TCTRL3_R 0x0100000000000000UL
#define TCTRL3_R_SHIFT 56
#define TCTRL3_AUTO_RFR_CYCLE 0x00fe000000000000UL
#define TCTRL3_AUTO_RFR_CYCLE_SHIFT 49
#define TCTRL3_RD_WAIT 0x0001f00000000000UL
#define TCTRL3_RD_WAIT_SHIFT 44
#define TCTRL3_PC_CYCLE 0x00000fc000000000UL
#define TCTRL3_PC_CYCLE_SHIFT 38
#define TCTRL3_WR_MORE_RAW_PW 0x0000003f00000000UL
#define TCTRL3_WR_MORE_RAW_PW_SHIFT 32
#define TCTRL3_RD_MORE_RAW_PW 0x00000000fc000000UL
#define TCTRL3_RD_MORE_RAW_PW_SHIFT 26
#define TCTRL3_ACT_WR_DLY 0x0000000003f00000UL
#define TCTRL3_ACT_WR_DLY_SHIFT 20
#define TCTRL3_ACT_RD_DLY 0x00000000000fc000UL
#define TCTRL3_ACT_RD_DLY_SHIFT 14
#define TCTRL3_BANK_PRESENT 0x0000000000003000UL
#define TCTRL3_BANK_PRESENT_SHIFT 12
#define TCTRL3_RFR_INT 0x0000000000000ff8UL
#define TCTRL3_RFR_INT_SHIFT 3
#define TCTRL3_SET_MODE_REG 0x0000000000000004UL
#define TCTRL3_SET_MODE_REG_SHIFT 2
#define TCTRL3_RFR_ENABLE 0x0000000000000002UL
#define TCTRL3_RFR_ENABLE_SHIFT 1
#define TCTRL3_PRECHG_ALL 0x0000000000000001UL
#define TCTRL3_PRECHG_ALL_SHIFT 0
/* Memory Timing Control IV */
#define TCTRL4_WR_MSEL_DLY 0xfc00000000000000UL
#define TCTRL4_WR_MSEL_DLY_SHIFT 58
#define TCTRL4_RD_MSEL_DLY 0x03f0000000000000UL
#define TCTRL4_RD_MSEL_DLY_SHIFT 52
#define TCTRL4_WRDATA_THLD 0x000c000000000000UL
#define TCTRL4_WRDATA_THLD_SHIFT 50
#define TCTRL4_RDWR_RD_RI_DLY 0x0003f00000000000UL
#define TCTRL4_RDWR_RD_RI_DLY_SHIFT 44
#define TCTRL4_AUTO_PRECHG_ENBL 0x0000080000000000UL
#define TCTRL4_AUTO_PRECHG_ENBL_SHIFT 43
#define TCTRL4_RD_WR_PI_MORE_DLY 0x000007c000000000UL
#define TCTRL4_RD_WR_PI_MORE_DLY_SHIFT 38
#define TCTRL4_RD_WR_TI_DLY 0x0000003f00000000UL
#define TCTRL4_RD_WR_TI_DLY_SHIFT 32
#define TCTRL4_WR_WR_PI_MORE_DLY 0x00000000f8000000UL
#define TCTRL4_WR_WR_PI_MORE_DLY_SHIFT 27
#define TCTRL4_WR_WR_TI_DLY 0x0000000007e00000UL
#define TCTRL4_WR_WR_TI_DLY_SHIFT 21
#define TCTRL4_RDWR_RD_PI_MORE_DLY 0x00000000001f000UL0
#define TCTRL4_RDWR_RD_PI_MORE_DLY_SHIFT 16
#define TCTRL4_R 0x0000000000008000UL
#define TCTRL4_R_SHIFT 15
#define TCTRL4_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
#define TCTRL4_SDRAM_MODE_REG_DATA_SHIFT 0
/* All 4 memory address decoding registers have the
* same layout.
*/
#define MEM_DECODE_VALID 0x8000000000000000UL /* Valid */
#define MEM_DECODE_VALID_SHIFT 63
#define MEM_DECODE_UK 0x001ffe0000000000UL /* Upper mask */
#define MEM_DECODE_UK_SHIFT 41
#define MEM_DECODE_UM 0x0000001ffff00000UL /* Upper match */
#define MEM_DECODE_UM_SHIFT 20
#define MEM_DECODE_LK 0x000000000003c000UL /* Lower mask */
#define MEM_DECODE_LK_SHIFT 14
#define MEM_DECODE_LM 0x0000000000000f00UL /* Lower match */
#define MEM_DECODE_LM_SHIFT 8
#define PA_UPPER_BITS 0x000007fffc000000UL
#define PA_UPPER_BITS_SHIFT 26
#define PA_LOWER_BITS 0x00000000000003c0UL
#define PA_LOWER_BITS_SHIFT 6
#define MACTRL_R0 0x8000000000000000UL
#define MACTRL_R0_SHIFT 63
#define MACTRL_ADDR_LE_PW 0x7000000000000000UL
#define MACTRL_ADDR_LE_PW_SHIFT 60
#define MACTRL_CMD_PW 0x0f00000000000000UL
#define MACTRL_CMD_PW_SHIFT 56
#define MACTRL_HALF_MODE_WR_MSEL_DLY 0x00fc000000000000UL
#define MACTRL_HALF_MODE_WR_MSEL_DLY_SHIFT 50
#define MACTRL_HALF_MODE_RD_MSEL_DLY 0x0003f00000000000UL
#define MACTRL_HALF_MODE_RD_MSEL_DLY_SHIFT 44
#define MACTRL_HALF_MODE_SDRAM_CTL_DLY 0x00000f0000000000UL
#define MACTRL_HALF_MODE_SDRAM_CTL_DLY_SHIFT 40
#define MACTRL_HALF_MODE_SDRAM_CLK_DLY 0x000000e000000000UL
#define MACTRL_HALF_MODE_SDRAM_CLK_DLY_SHIFT 37
#define MACTRL_R1 0x0000001000000000UL
#define MACTRL_R1_SHIFT 36
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3 0x0000000f00000000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3_SHIFT 32
#define MACTRL_ENC_INTLV_B3 0x00000000f8000000UL
#define MACTRL_ENC_INTLV_B3_SHIFT 27
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2 0x0000000007800000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2_SHIFT 23
#define MACTRL_ENC_INTLV_B2 0x00000000007c0000UL
#define MACTRL_ENC_INTLV_B2_SHIFT 18
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1 0x000000000003c000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1_SHIFT 14
#define MACTRL_ENC_INTLV_B1 0x0000000000003e00UL
#define MACTRL_ENC_INTLV_B1_SHIFT 9
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0 0x00000000000001e0UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0_SHIFT 5
#define MACTRL_ENC_INTLV_B0 0x000000000000001fUL
#define MACTRL_ENC_INTLV_B0_SHIFT 0
#endif /* _SPARC64_CHMCTRL_H */

View file

@ -0,0 +1,11 @@
/*
* clock.h: Definitions for clock operations on the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_CLOCK_H
#define _SPARC_CLOCK_H
/* Foo for now. */
#endif /* !(_SPARC_CLOCK_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_CMPXCHG_H
#define ___ASM_SPARC_CMPXCHG_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cmpxchg_64.h>
#else
#include <asm/cmpxchg_32.h>
#endif
#endif

View file

@ -0,0 +1,77 @@
/* 32-bit atomic xchg() and cmpxchg() definitions.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_CMPXCHG__
#define __ARCH_SPARC_CMPXCHG__
unsigned long __xchg_u32(volatile u32 *m, u32 new);
void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch (size) {
case 4:
return __xchg_u32(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
* of spinlocks to get a bit of performance...
*
* See arch/sparc/lib/atomic32.c for implementation.
*
* Cribbed from <asm-parisc/atomic.h>
*/
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
default:
__cmpxchg_called_with_bad_pointer();
break;
}
return old;
}
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */

View file

@ -0,0 +1,146 @@
/* 64-bit atomic xchg() and cmpxchg() definitions.
*
* Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_CMPXCHG__
#define __ARCH_SPARC64_CMPXCHG__
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
" mov %0, %1\n"
"1: lduw [%4], %2\n"
" cas [%4], %2, %0\n"
" cmp %2, %0\n"
" bne,a,pn %%icc, 1b\n"
" mov %1, %0\n"
: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
: "0" (val), "r" (m)
: "cc", "memory");
return val;
}
static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
" mov %0, %1\n"
"1: ldx [%4], %2\n"
" casx [%4], %2, %0\n"
" cmp %2, %0\n"
" bne,a,pn %%xcc, 1b\n"
" mov %1, %0\n"
: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
: "0" (val), "r" (m)
: "cc", "memory");
return val;
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch (size) {
case 4:
return xchg32(ptr, x);
case 8:
return xchg64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#include <asm-generic/cmpxchg-local.h>
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
__asm__ __volatile__("cas [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
return new;
}
static inline unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
__asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
return new;
}
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
void __cmpxchg_called_with_bad_pointer(void);
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
return __cmpxchg_u64(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
case 8: return __cmpxchg(ptr, old, new, size);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */

View file

@ -0,0 +1,310 @@
#ifndef _ASM_SPARC64_COMPAT_H
#define _ASM_SPARC64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "sparc\0\0"
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
compat_off_t st_size;
compat_time_t st_atime;
compat_ulong_t st_atime_nsec;
compat_time_t st_mtime;
compat_ulong_t st_mtime_nsec;
compat_time_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_off_t st_blksize;
compat_off_t st_blocks;
u32 __unused4[2];
};
struct compat_stat64 {
unsigned long long st_dev;
unsigned long long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[8];
long long st_size;
unsigned int st_blksize;
unsigned char __pad4[8];
unsigned int st_blocks;
unsigned int st_atime;
unsigned int st_atime_nsec;
unsigned int st_mtime;
unsigned int st_mtime_nsec;
unsigned int st_ctime;
unsigned int st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
short __unused;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
short __unused;
};
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_flags;
int f_spare[4];
};
#define COMPAT_RLIM_INFINITY 0x7fffffff
typedef u32 compat_old_sigset_t;
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
} compat_sigval_t;
#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[SI_PAD_SIZE32];
/* kill() */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
compat_pid_t _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
u32 _addr; /* faulting insn/memory ref. */
int _trapno;
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;
usp &= ~0x7UL;
return (void __user *) usp;
}
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short __pad1;
compat_mode_t mode;
unsigned short __pad2;
unsigned short seq;
unsigned long __unused1; /* yes they really are 64bit pads */
unsigned long __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
unsigned int __pad1;
compat_time_t sem_otime;
unsigned int __pad2;
compat_time_t sem_ctime;
u32 sem_nsems;
u32 __unused1;
u32 __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
unsigned int __pad1;
compat_time_t msg_stime;
unsigned int __pad2;
compat_time_t msg_rtime;
unsigned int __pad3;
compat_time_t msg_ctime;
unsigned int msg_cbytes;
unsigned int msg_qnum;
unsigned int msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
unsigned int __unused1;
unsigned int __unused2;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
unsigned int __pad1;
compat_time_t shm_atime;
unsigned int __pad2;
compat_time_t shm_dtime;
unsigned int __pad3;
compat_time_t shm_ctime;
compat_size_t shm_segsz;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
unsigned int shm_nattch;
unsigned int __unused1;
unsigned int __unused2;
};
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
}
#endif /* _ASM_SPARC64_COMPAT_H */

View file

@ -0,0 +1,23 @@
#ifndef _COMPAT_SIGNAL_H
#define _COMPAT_SIGNAL_H
#include <linux/compat.h>
#include <asm/signal.h>
#ifdef CONFIG_COMPAT
struct __new_sigaction32 {
unsigned sa_handler;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
compat_sigset_t sa_mask;
};
struct __old_sigaction32 {
unsigned sa_handler;
compat_old_sigset_t sa_mask;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
};
#endif
#endif /* !(_COMPAT_SIGNAL_H) */

View file

@ -0,0 +1,31 @@
#ifndef _SPARC_CONTREGS_H
#define _SPARC_CONTREGS_H
/* contregs.h: Addresses of registers in the ASI_CONTROL alternate address
* space. These are for the mmu's context register, etc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
/* s=Swift, h=Ross_HyperSPARC, v=TI_Viking, t=Tsunami, r=Ross_Cypress */
#define AC_M_PCR 0x0000 /* shv Processor Control Reg */
#define AC_M_CTPR 0x0100 /* shv Context Table Pointer Reg */
#define AC_M_CXR 0x0200 /* shv Context Register */
#define AC_M_SFSR 0x0300 /* shv Synchronous Fault Status Reg */
#define AC_M_SFAR 0x0400 /* shv Synchronous Fault Address Reg */
#define AC_M_AFSR 0x0500 /* hv Asynchronous Fault Status Reg */
#define AC_M_AFAR 0x0600 /* hv Asynchronous Fault Address Reg */
#define AC_M_RESET 0x0700 /* hv Reset Reg */
#define AC_M_RPR 0x1000 /* hv Root Pointer Reg */
#define AC_M_TSUTRCR 0x1000 /* s TLB Replacement Ctrl Reg */
#define AC_M_IAPTP 0x1100 /* hv Instruction Access PTP */
#define AC_M_DAPTP 0x1200 /* hv Data Access PTP */
#define AC_M_ITR 0x1300 /* hv Index Tag Register */
#define AC_M_TRCR 0x1400 /* hv TLB Replacement Control Reg */
#define AC_M_SFSRX 0x1300 /* s Synch Fault Status Reg prim */
#define AC_M_SFARX 0x1400 /* s Synch Fault Address Reg prim */
#define AC_M_RPR1 0x1500 /* h Root Pointer Reg (entry 2) */
#define AC_M_IAPTP1 0x1600 /* h Instruction Access PTP (entry 2) */
#define AC_M_DAPTP1 0x1700 /* h Data Access PTP (entry 2) */
#endif /* _SPARC_CONTREGS_H */

View file

@ -0,0 +1,28 @@
#ifndef __ASM_CPU_TYPE_H
#define __ASM_CPU_TYPE_H
/*
* Sparc (general) CPU types
*/
enum sparc_cpu {
sun4m = 0x00,
sun4d = 0x01,
sun4e = 0x02,
sun4u = 0x03, /* V8 ploos ploos */
sun_unknown = 0x04,
ap1000 = 0x05, /* almost a sun4m */
sparc_leon = 0x06, /* Leon SoC */
};
#ifdef CONFIG_SPARC32
extern enum sparc_cpu sparc_cpu_model;
#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
#else
#define sparc_cpu_model sun4u
#endif
#endif /* __ASM_CPU_TYPE_H */

View file

@ -0,0 +1,18 @@
#ifndef ___ASM_SPARC_CPUDATA_H
#define ___ASM_SPARC_CPUDATA_H
#ifndef __ASSEMBLY__
#include <linux/threads.h>
#include <linux/percpu.h>
extern const struct seq_operations cpuinfo_op;
#endif /* !(__ASSEMBLY__) */
#if defined(__sparc__) && defined(__arch64__)
#include <asm/cpudata_64.h>
#else
#include <asm/cpudata_32.h>
#endif
#endif

View file

@ -0,0 +1,31 @@
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int counter;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
#endif
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
#endif /* _SPARC_CPUDATA_H */

View file

@ -0,0 +1,39 @@
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
#ifndef __ASSEMBLY__
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __nmi_count;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
unsigned int irq0_irqs;
unsigned int __pad2;
/* Dcache line 2, rarely used */
unsigned int dcache_size;
unsigned int dcache_line_size;
unsigned int icache_size;
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
#endif /* !(__ASSEMBLY__) */
#include <asm/trap_block.h>
#endif /* _SPARC64_CPUDATA_H */

View file

@ -0,0 +1,34 @@
/* include/asm/current.h
*
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 2002 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*
* Derived from "include/asm-s390/current.h" by
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Derived from "include/asm-i386/current.h"
*/
#ifndef _SPARC_CURRENT_H
#define _SPARC_CURRENT_H
#include <linux/thread_info.h>
#ifdef CONFIG_SPARC64
register struct task_struct *current asm("g4");
#endif
#ifdef CONFIG_SPARC32
/* We might want to consider using %g4 like sparc64 to shave a few cycles.
*
* Two stage process (inline + #define) for type-checking.
* We also obfuscate get_current() to check if anyone used that by mistake.
*/
struct task_struct;
static inline struct task_struct *__get_current(void)
{
return current_thread_info()->task;
}
#define current __get_current()
#endif
#endif /* !(_SPARC_CURRENT_H) */

View file

@ -0,0 +1,14 @@
#ifndef _SPARC64_DCR_H
#define _SPARC64_DCR_H
/* UltraSparc-III/III+ Dispatch Control Register, ASR 0x12 */
#define DCR_DPE 0x0000000000001000 /* III+: D$ Parity Error Enable */
#define DCR_OBS 0x0000000000000fc0 /* Observability Bus Controls */
#define DCR_BPE 0x0000000000000020 /* Branch Predict Enable */
#define DCR_RPE 0x0000000000000010 /* Return Address Prediction Enable */
#define DCR_SI 0x0000000000000008 /* Single Instruction Disable */
#define DCR_IPE 0x0000000000000004 /* III+: I$ Parity Error Enable */
#define DCR_IFPOE 0x0000000000000002 /* IRQ FP Operation Enable */
#define DCR_MS 0x0000000000000001 /* Multi-Scalar dispatch */
#endif /* _SPARC64_DCR_H */

View file

@ -0,0 +1,27 @@
#ifndef _SPARC64_DCU_H
#define _SPARC64_DCU_H
#include <linux/const.h>
/* UltraSparc-III Data Cache Unit Control Register */
#define DCU_CP _AC(0x0002000000000000,UL) /* Phys Cache Enable w/o mmu */
#define DCU_CV _AC(0x0001000000000000,UL) /* Virt Cache Enable w/o mmu */
#define DCU_ME _AC(0x0000800000000000,UL) /* NC-store Merging Enable */
#define DCU_RE _AC(0x0000400000000000,UL) /* RAW bypass Enable */
#define DCU_PE _AC(0x0000200000000000,UL) /* PCache Enable */
#define DCU_HPE _AC(0x0000100000000000,UL) /* HW prefetch Enable */
#define DCU_SPE _AC(0x0000080000000000,UL) /* SW prefetch Enable */
#define DCU_SL _AC(0x0000040000000000,UL) /* Secondary ld-steering Enab*/
#define DCU_WE _AC(0x0000020000000000,UL) /* WCache enable */
#define DCU_PM _AC(0x000001fe00000000,UL) /* PA Watchpoint Byte Mask */
#define DCU_VM _AC(0x00000001fe000000,UL) /* VA Watchpoint Byte Mask */
#define DCU_PR _AC(0x0000000001000000,UL) /* PA Watchpoint Read Enable */
#define DCU_PW _AC(0x0000000000800000,UL) /* PA Watchpoint Write Enable*/
#define DCU_VR _AC(0x0000000000400000,UL) /* VA Watchpoint Read Enable */
#define DCU_VW _AC(0x0000000000200000,UL) /* VA Watchpoint Write Enable*/
#define DCU_DM _AC(0x0000000000000008,UL) /* DMMU Enable */
#define DCU_IM _AC(0x0000000000000004,UL) /* IMMU Enable */
#define DCU_DC _AC(0x0000000000000002,UL) /* Data Cache Enable */
#define DCU_IC _AC(0x0000000000000001,UL) /* Instruction Cache Enable */
#endif /* _SPARC64_DCU_H */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_DELAY_H
#define ___ASM_SPARC_DELAY_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/delay_64.h>
#else
#include <asm/delay_32.h>
#endif
#endif

View file

@ -0,0 +1,34 @@
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
void __udelay(unsigned long usecs, unsigned long lpj);
void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */

View file

@ -0,0 +1,17 @@
/* delay.h: Linux delay routines on sparc64.
*
* Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
*/
#ifndef _SPARC64_DELAY_H
#define _SPARC64_DELAY_H
#ifndef __ASSEMBLY__
void __delay(unsigned long loops);
void udelay(unsigned long usecs);
#define mdelay(n) udelay((n) * 1000)
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_DELAY_H */

View file

@ -0,0 +1,30 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_SPARC_DEVICE_H
#define _ASM_SPARC_DEVICE_H
#include <asm/openprom.h>
struct device_node;
struct platform_device;
struct dev_archdata {
void *iommu;
void *stc;
void *host_controller;
struct platform_device *op;
int numa_node;
};
void of_propagate_archdata(struct platform_device *bus);
struct pdev_archdata {
struct resource resource[PROMREG_MAX];
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#endif /* _ASM_SPARC_DEVICE_H */

View file

@ -0,0 +1,89 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-debug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
* routine can be a nop.
*/
}
extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops *leon_dma_ops;
extern struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
return leon_dma_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
}
#include <asm-generic/dma-mapping-common.h>
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
return (dma_addr == DMA_ERROR_CODE);
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EINVAL;
*dev->dma_mask = mask;
return 0;
}
#endif
return -EINVAL;
}
#endif

View file

@ -0,0 +1,143 @@
#ifndef _ASM_SPARC_DMA_H
#define _ASM_SPARC_DMA_H
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
#define MAX_DMA_ADDRESS (~0UL)
/* Useful constants */
#define SIZE_16MB (16*1024*1024)
#define SIZE_64K (64*1024)
/* SBUS DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* Fields in the cond_reg register */
/* First, the version identification bits */
#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
#define DMA_VERS0 0x00000000 /* Sunray DMA version */
#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
#define DMA_VERS1 0x80000000 /* DMA rev 1 */
#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
/* Values describing the burst-size property from the PROM */
#define DMA_BURST1 0x01
#define DMA_BURST2 0x02
#define DMA_BURST4 0x04
#define DMA_BURST8 0x08
#define DMA_BURST16 0x10
#define DMA_BURST32 0x20
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#ifdef CONFIG_SPARC32
/* Routines for data transfer buffers. */
struct device;
struct scatterlist;
struct sparc32_dma_ops {
__u32 (*get_scsi_one)(struct device *, char *, unsigned long);
void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
void (*release_scsi_one)(struct device *, __u32, unsigned long);
void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
#ifdef CONFIG_SBUS
int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
void (*unmap_dma_area)(struct device *, unsigned long, int);
#endif
};
extern const struct sparc32_dma_ops *sparc32_dma_ops;
#define mmu_get_scsi_one(dev,vaddr,len) \
sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
#define mmu_get_scsi_sgl(dev,sg,sz) \
sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
#define mmu_release_scsi_one(dev,vaddr,len) \
sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
#define mmu_release_scsi_sgl(dev,sg,sz) \
sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
#ifdef CONFIG_SBUS
/*
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
*
* The mmu_map_dma_area establishes two mappings in one go.
* These mappings point to pages normally mapped at 'va' (linear address).
* First mapping is for CPU visible address at 'a', uncached.
* This is an alias, but it works because it is an uncached mapping.
* Second mapping is for device visible address, or "bus" address.
* The bus address is returned at '*pba'.
*
* These functions seem distinct, but are hard to split.
* On sun4m, page attributes depend on the CPU type, so we have to
* know if we are mapping RAM or I/O, so it has to be an additional argument
* to a separate mapping function for CPU visible mappings.
*/
#define sbus_map_dma_area(dev,pba,va,a,len) \
sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
#define sbus_unmap_dma_area(dev,ba,len) \
sparc32_dma_ops->unmap_dma_area(dev, ba, len)
#endif /* CONFIG_SBUS */
#endif
#endif /* !(_ASM_SPARC_DMA_H) */

View file

@ -0,0 +1,35 @@
#ifndef __ASM_SPARC_EBUS_DMA_H
#define __ASM_SPARC_EBUS_DMA_H
struct ebus_dma_info {
spinlock_t lock;
void __iomem *regs;
unsigned int flags;
#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
/* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
* set.
*/
void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
void *client_cookie;
unsigned int irq;
#define EBUS_DMA_EVENT_ERROR 1
#define EBUS_DMA_EVENT_DMA 2
#define EBUS_DMA_EVENT_DEVICE 4
unsigned char name[64];
};
int ebus_dma_register(struct ebus_dma_info *p);
int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
void ebus_dma_unregister(struct ebus_dma_info *p);
int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
size_t len);
void ebus_dma_prepare(struct ebus_dma_info *p, int write);
unsigned int ebus_dma_residue(struct ebus_dma_info *p);
unsigned int ebus_dma_addr(struct ebus_dma_info *p);
void ebus_dma_enable(struct ebus_dma_info *p, int on);
#endif /* __ASM_SPARC_EBUS_DMA_H */

View file

@ -0,0 +1,122 @@
/*
* ecc.h: Definitions and defines for the external cache/memory
* controller on the sun4m.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_ECC_H
#define _SPARC_ECC_H
/* These registers are accessed through the SRMMU passthrough ASI 0x20 */
#define ECC_ENABLE 0x00000000 /* ECC enable register */
#define ECC_FSTATUS 0x00000008 /* ECC fault status register */
#define ECC_FADDR 0x00000010 /* ECC fault address register */
#define ECC_DIGNOSTIC 0x00000018 /* ECC diagnostics register */
#define ECC_MBAENAB 0x00000020 /* MBus arbiter enable register */
#define ECC_DMESG 0x00001000 /* Diagnostic message passing area */
/* ECC MBus Arbiter Enable register:
*
* ----------------------------------------
* | |SBUS|MOD3|MOD2|MOD1|RSV|
* ----------------------------------------
* 31 5 4 3 2 1 0
*
* SBUS: Enable MBus Arbiter on the SBus 0=off 1=on
* MOD3: Enable MBus Arbiter on MBus module 3 0=off 1=on
* MOD2: Enable MBus Arbiter on MBus module 2 0=off 1=on
* MOD1: Enable MBus Arbiter on MBus module 1 0=off 1=on
*/
#define ECC_MBAE_SBUS 0x00000010
#define ECC_MBAE_MOD3 0x00000008
#define ECC_MBAE_MOD2 0x00000004
#define ECC_MBAE_MOD1 0x00000002
/* ECC Fault Control Register layout:
*
* -----------------------------
* | RESV | ECHECK | EINT |
* -----------------------------
* 31 2 1 0
*
* ECHECK: Enable ECC checking. 0=off 1=on
* EINT: Enable Interrupts for correctable errors. 0=off 1=on
*/
#define ECC_FCR_CHECK 0x00000002
#define ECC_FCR_INTENAB 0x00000001
/* ECC Fault Address Register Zero layout:
*
* -----------------------------------------------------
* | MID | S | RSV | VA | BM |AT| C| SZ |TYP| PADDR |
* -----------------------------------------------------
* 31-28 27 26-22 21-14 13 12 11 10-8 7-4 3-0
*
* MID: ModuleID of the faulting processor. ie. who did it?
* S: Supervisor/Privileged access? 0=no 1=yes
* VA: Bits 19-12 of the virtual faulting address, these are the
* superset bits in the virtual cache and can be used for
* a flush operation if necessary.
* BM: Boot mode? 0=no 1=yes This is just like the SRMMU boot
* mode bit.
* AT: Did this fault happen during an atomic instruction? 0=no
* 1=yes. This means either an 'ldstub' or 'swap' instruction
* was in progress (but not finished) when this fault happened.
* This indicated whether the bus was locked when the fault
* occurred.
* C: Did the pte for this access indicate that it was cacheable?
* 0=no 1=yes
* SZ: The size of the transaction.
* TYP: The transaction type.
* PADDR: Bits 35-32 of the physical address for the fault.
*/
#define ECC_FADDR0_MIDMASK 0xf0000000
#define ECC_FADDR0_S 0x08000000
#define ECC_FADDR0_VADDR 0x003fc000
#define ECC_FADDR0_BMODE 0x00002000
#define ECC_FADDR0_ATOMIC 0x00001000
#define ECC_FADDR0_CACHE 0x00000800
#define ECC_FADDR0_SIZE 0x00000700
#define ECC_FADDR0_TYPE 0x000000f0
#define ECC_FADDR0_PADDR 0x0000000f
/* ECC Fault Address Register One layout:
*
* -------------------------------------
* | Physical Address 31-0 |
* -------------------------------------
* 31 0
*
* You get the upper 4 bits of the physical address from the
* PADDR field in ECC Fault Address Zero register.
*/
/* ECC Fault Status Register layout:
*
* ----------------------------------------------
* | RESV|C2E|MULT|SYNDROME|DWORD|UNC|TIMEO|BS|C|
* ----------------------------------------------
* 31-18 17 16 15-8 7-4 3 2 1 0
*
* C2E: A C2 graphics error occurred. 0=no 1=yes (SS10 only)
* MULT: Multiple errors occurred ;-O 0=no 1=prom_panic(yes)
* SYNDROME: Controller is mentally unstable.
* DWORD:
* UNC: Uncorrectable error. 0=no 1=yes
* TIMEO: Timeout occurred. 0=no 1=yes
* BS: C2 graphics bad slot access. 0=no 1=yes (SS10 only)
* C: Correctable error? 0=no 1=yes
*/
#define ECC_FSR_C2ERR 0x00020000
#define ECC_FSR_MULT 0x00010000
#define ECC_FSR_SYND 0x0000ff00
#define ECC_FSR_DWORD 0x000000f0
#define ECC_FSR_UNC 0x00000008
#define ECC_FSR_TIMEO 0x00000004
#define ECC_FSR_BADSLOT 0x00000002
#define ECC_FSR_C 0x00000001
#endif /* !(_SPARC_ECC_H) */

View file

@ -0,0 +1,9 @@
/*
* eeprom.h: Definitions for the Sun eeprom.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
/* The EEPROM and the Mostek Mk48t02 use the same IO address space
* for their registers/data areas. The IDPROM lives here too.
*/

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_ELF_H
#define ___ASM_SPARC_ELF_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/elf_64.h>
#else
#include <asm/elf_32.h>
#endif
#endif

View file

@ -0,0 +1,131 @@
#ifndef __ASMSPARC_ELF_H
#define __ASMSPARC_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define CORE_DUMP_USE_REGSET
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 38
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
union {
unsigned long pr_regs[32];
double pr_dregs[16];
} pr_fr;
unsigned long __unused;
unsigned long pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} elf_fpregset_t;
#include <asm/mbus.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
/* Most sun4m's have them all. */
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#endif /* !(__ASMSPARC_ELF_H) */

View file

@ -0,0 +1,212 @@
#ifndef __ASM_SPARC64_ELF_H
#define __ASM_SPARC64_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 0x00000001
#define HWCAP_SPARC_STBAR 0x00000002
#define HWCAP_SPARC_SWAP 0x00000004
#define HWCAP_SPARC_MULDIV 0x00000008
#define HWCAP_SPARC_V9 0x00000010
#define HWCAP_SPARC_ULTRA3 0x00000020
#define HWCAP_SPARC_BLKINIT 0x00000040
#define HWCAP_SPARC_N2 0x00000080
/* Solaris compatible AT_HWCAP bits. */
#define AV_SPARC_MUL32 0x00000100 /* 32x32 multiply is efficient */
#define AV_SPARC_DIV32 0x00000200 /* 32x32 divide is efficient */
#define AV_SPARC_FSMULD 0x00000400 /* 'fsmuld' is efficient */
#define AV_SPARC_V8PLUS 0x00000800 /* v9 insn available to 32bit */
#define AV_SPARC_POPC 0x00001000 /* 'popc' is efficient */
#define AV_SPARC_VIS 0x00002000 /* VIS insns available */
#define AV_SPARC_VIS2 0x00004000 /* VIS2 insns available */
#define AV_SPARC_ASI_BLK_INIT 0x00008000 /* block init ASIs available */
#define AV_SPARC_FMAF 0x00010000 /* fused multiply-add */
#define AV_SPARC_VIS3 0x00020000 /* VIS3 insns available */
#define AV_SPARC_HPC 0x00040000 /* HPC insns available */
#define AV_SPARC_RANDOM 0x00080000 /* 'random' insn available */
#define AV_SPARC_TRANS 0x00100000 /* transaction insns available */
#define AV_SPARC_FJFMAU 0x00200000 /* unfused multiply-add */
#define AV_SPARC_IMA 0x00400000 /* integer multiply-add */
#define AV_SPARC_ASI_CACHE_SPARING \
0x00800000 /* cache sparing ASIs available */
#define AV_SPARC_PAUSE 0x01000000 /* PAUSE available */
#define AV_SPARC_CBCOND 0x02000000 /* CBCOND insns available */
/* Solaris decided to enumerate every single crypto instruction type
* in the AT_HWCAP bits. This is wasteful, since if crypto is present,
* you still need to look in the CFR register to see if the opcode is
* really available. So we simply advertise only "crypto" support.
*/
#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
#define CORE_DUMP_USE_REGSET
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARCV9
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
/* Format of 64-bit elf_gregset_t is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* TSTATE
* TPC
* TNPC
* Y
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 36
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
unsigned long pr_regs[32];
unsigned long pr_fsr;
unsigned long pr_gsr;
unsigned long pr_fprs;
} elf_fpregset_t;
/* Format of 32-bit elf_gregset_t is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned int compat_elf_greg_t;
#define COMPAT_ELF_NGREG 38
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
typedef struct {
union {
unsigned int pr_regs[32];
unsigned long pr_dregs[16];
} pr_fr;
unsigned int __unused;
unsigned int pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} compat_elf_fpregset_t;
/* UltraSparc extensions. Still unused, but will be eventually. */
typedef struct {
unsigned int pr_type;
unsigned int pr_align;
union {
struct {
union {
unsigned int pr_regs[32];
unsigned long pr_dregs[16];
long double pr_qregs[8];
} pr_xfr;
} pr_v8p;
unsigned int pr_xfsr;
unsigned int pr_fprs;
unsigned int pr_xg[8];
unsigned int pr_xo[8];
unsigned long pr_tstate;
unsigned int pr_filler[8];
} pr_un;
} elf_xregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_SPARC || \
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE 0x0000010000000000UL
#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
extern unsigned long sparc64_elf_hwcap;
#define ELF_HWCAP sparc64_elf_hwcap
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_32BIT); \
else \
clear_thread_flag(TIF_32BIT); \
/* flush_thread will update pgd cache */ \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
} while (0)
#endif /* !(__ASM_SPARC64_ELF_H) */

View file

@ -0,0 +1,49 @@
#ifndef _SPARC64_ESTATE_H
#define _SPARC64_ESTATE_H
/* UltraSPARC-III E-cache Error Enable */
#define ESTATE_ERROR_FMT 0x0000000000040000 /* Force MTAG ECC */
#define ESTATE_ERROR_FMESS 0x000000000003c000 /* Forced MTAG ECC val */
#define ESTATE_ERROR_FMD 0x0000000000002000 /* Force DATA ECC */
#define ESTATE_ERROR_FDECC 0x0000000000001ff0 /* Forced DATA ECC val */
#define ESTATE_ERROR_UCEEN 0x0000000000000008 /* See below */
#define ESTATE_ERROR_NCEEN 0x0000000000000002 /* See below */
#define ESTATE_ERROR_CEEN 0x0000000000000001 /* See below */
/* UCEEN enables the fast_ECC_error trap for: 1) software correctable E-cache
* errors 2) uncorrectable E-cache errors. Such events only occur on reads
* of the E-cache by the local processor for: 1) data loads 2) instruction
* fetches 3) atomic operations. Such events _cannot_ occur for: 1) merge
* 2) writeback 2) copyout. The AFSR bits associated with these traps are
* UCC and UCU.
*/
/* NCEEN enables instruction_access_error, data_access_error, and ECC_error traps
* for uncorrectable ECC errors and system errors.
*
* Uncorrectable system bus data error or MTAG ECC error, system bus TimeOUT,
* or system bus BusERR:
* 1) As the result of an instruction fetch, will generate instruction_access_error
* 2) As the result of a load etc. will generate data_access_error.
* 3) As the result of store merge completion, writeback, or copyout will
* generate a disrupting ECC_error trap.
* 4) As the result of such errors on instruction vector fetch can generate any
* of the 3 trap types.
*
* The AFSR bits associated with these traps are EMU, EDU, WDU, CPU, IVU, UE,
* BERR, and TO.
*/
/* CEEN enables the ECC_error trap for hardware corrected ECC errors. System bus
* reads resulting in a hardware corrected data or MTAG ECC error will generate an
* ECC_error disrupting trap with this bit enabled.
*
* This same trap will also be generated when a hardware corrected ECC error results
* during store merge, writeback, and copyout operations.
*/
/* In general, if the trap enable bits above are disabled the AFSR bits will still
* log the events even though the trap will not be generated by the processor.
*/
#endif /* _SPARC64_ESTATE_H */

View file

@ -0,0 +1,33 @@
#ifndef _SPARC_FB_H_
#define _SPARC_FB_H_
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
#include <asm/prom.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
#ifdef CONFIG_SPARC64
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
}
static inline int fb_is_primary_device(struct fb_info *info)
{
struct device *dev = info->device;
struct device_node *node;
if (console_set_on_cmdline)
return 0;
node = dev->of_node;
if (node &&
node == of_console_device)
return 1;
return 0;
}
#endif /* _SPARC_FB_H_ */

View file

@ -0,0 +1,72 @@
#ifndef __LINUX_FBIO_H
#define __LINUX_FBIO_H
#include <uapi/asm/fbio.h>
#define FBIOPUTCMAP_SPARC _IOW('F', 3, struct fbcmap)
#define FBIOGETCMAP_SPARC _IOW('F', 4, struct fbcmap)
/* Addresses on the fd of a cgsix that are mappable */
#define CG6_FBC 0x70000000
#define CG6_TEC 0x70001000
#define CG6_BTREGS 0x70002000
#define CG6_FHC 0x70004000
#define CG6_THC 0x70005000
#define CG6_ROM 0x70006000
#define CG6_RAM 0x70016000
#define CG6_DHC 0x80000000
#define CG3_MMAP_OFFSET 0x4000000
/* Addresses on the fd of a tcx that are mappable */
#define TCX_RAM8BIT 0x00000000
#define TCX_RAM24BIT 0x01000000
#define TCX_UNK3 0x10000000
#define TCX_UNK4 0x20000000
#define TCX_CONTROLPLANE 0x28000000
#define TCX_UNK6 0x30000000
#define TCX_UNK7 0x38000000
#define TCX_TEC 0x70000000
#define TCX_BTREGS 0x70002000
#define TCX_THC 0x70004000
#define TCX_DHC 0x70008000
#define TCX_ALT 0x7000a000
#define TCX_SYNC 0x7000e000
#define TCX_UNK2 0x70010000
/* CG14 definitions */
/* Offsets into the OBIO space: */
#define CG14_REGS 0 /* registers */
#define CG14_CURSORREGS 0x1000 /* cursor registers */
#define CG14_DACREGS 0x2000 /* DAC registers */
#define CG14_XLUT 0x3000 /* X Look Up Table -- ??? */
#define CG14_CLUT1 0x4000 /* Color Look Up Table */
#define CG14_CLUT2 0x5000 /* Color Look Up Table */
#define CG14_CLUT3 0x6000 /* Color Look Up Table */
#define CG14_AUTO 0xf000
struct fbcmap32 {
int index; /* first element (0 origin) */
int count;
u32 red;
u32 green;
u32 blue;
};
#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
struct fbcursor32 {
short set; /* what to set, choose from the list above */
short enable; /* cursor on/off */
struct fbcurpos pos; /* cursor position */
struct fbcurpos hot; /* cursor hot spot */
struct fbcmap32 cmap; /* color map info */
struct fbcurpos size; /* cursor bit map size */
u32 image; /* cursor image bits */
u32 mask; /* cursor mask bits */
};
#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
#endif /* __LINUX_FBIO_H */

View file

@ -0,0 +1,80 @@
/* fhc.h: FHC and Clock board register definitions.
*
* Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
*/
#ifndef _SPARC64_FHC_H
#define _SPARC64_FHC_H
/* Clock board register offsets. */
#define CLOCK_CTRL 0x00UL /* Main control */
#define CLOCK_STAT1 0x10UL /* Status one */
#define CLOCK_STAT2 0x20UL /* Status two */
#define CLOCK_PWRSTAT 0x30UL /* Power status */
#define CLOCK_PWRPRES 0x40UL /* Power presence */
#define CLOCK_TEMP 0x50UL /* Temperature */
#define CLOCK_IRQDIAG 0x60UL /* IRQ diagnostics */
#define CLOCK_PWRSTAT2 0x70UL /* Power status two */
#define CLOCK_CTRL_LLED 0x04 /* Left LED, 0 == on */
#define CLOCK_CTRL_MLED 0x02 /* Mid LED, 1 == on */
#define CLOCK_CTRL_RLED 0x01 /* RIght LED, 1 == on */
/* Firehose controller register offsets */
#define FHC_PREGS_ID 0x00UL /* FHC ID */
#define FHC_ID_VERS 0xf0000000 /* Version of this FHC */
#define FHC_ID_PARTID 0x0ffff000 /* Part ID code (0x0f9f == FHC) */
#define FHC_ID_MANUF 0x0000007e /* Manufacturer (0x3e == SUN's JEDEC)*/
#define FHC_ID_RESV 0x00000001 /* Read as one */
#define FHC_PREGS_RCS 0x10UL /* FHC Reset Control/Status Register */
#define FHC_RCS_POR 0x80000000 /* Last reset was a power cycle */
#define FHC_RCS_SPOR 0x40000000 /* Last reset was sw power on reset */
#define FHC_RCS_SXIR 0x20000000 /* Last reset was sw XIR reset */
#define FHC_RCS_BPOR 0x10000000 /* Last reset was due to POR button */
#define FHC_RCS_BXIR 0x08000000 /* Last reset was due to XIR button */
#define FHC_RCS_WEVENT 0x04000000 /* CPU reset was due to wakeup event */
#define FHC_RCS_CFATAL 0x02000000 /* Centerplane Fatal Error signalled */
#define FHC_RCS_FENAB 0x01000000 /* Fatal errors elicit system reset */
#define FHC_PREGS_CTRL 0x20UL /* FHC Control Register */
#define FHC_CONTROL_ICS 0x00100000 /* Ignore Centerplane Signals */
#define FHC_CONTROL_FRST 0x00080000 /* Fatal Error Reset Enable */
#define FHC_CONTROL_LFAT 0x00040000 /* AC/DC signalled a local error */
#define FHC_CONTROL_SLINE 0x00010000 /* Firmware Synchronization Line */
#define FHC_CONTROL_DCD 0x00008000 /* DC-->DC Converter Disable */
#define FHC_CONTROL_POFF 0x00004000 /* AC/DC Controller PLL Disable */
#define FHC_CONTROL_FOFF 0x00002000 /* FHC Controller PLL Disable */
#define FHC_CONTROL_AOFF 0x00001000 /* CPU A SRAM/SBD Low Power Mode */
#define FHC_CONTROL_BOFF 0x00000800 /* CPU B SRAM/SBD Low Power Mode */
#define FHC_CONTROL_PSOFF 0x00000400 /* Turns off this FHC's power supply */
#define FHC_CONTROL_IXIST 0x00000200 /* 0=FHC tells clock board it exists */
#define FHC_CONTROL_XMSTR 0x00000100 /* 1=Causes this FHC to be XIR master*/
#define FHC_CONTROL_LLED 0x00000040 /* 0=Left LED ON */
#define FHC_CONTROL_MLED 0x00000020 /* 1=Middle LED ON */
#define FHC_CONTROL_RLED 0x00000010 /* 1=Right LED */
#define FHC_CONTROL_BPINS 0x00000003 /* Spare Bidirectional Pins */
#define FHC_PREGS_BSR 0x30UL /* FHC Board Status Register */
#define FHC_BSR_DA64 0x00040000 /* Port A: 0=128bit 1=64bit data path */
#define FHC_BSR_DB64 0x00020000 /* Port B: 0=128bit 1=64bit data path */
#define FHC_BSR_BID 0x0001e000 /* Board ID */
#define FHC_BSR_SA 0x00001c00 /* Port A UPA Speed (from the pins) */
#define FHC_BSR_SB 0x00000380 /* Port B UPA Speed (from the pins) */
#define FHC_BSR_NDIAG 0x00000040 /* Not in Diag Mode */
#define FHC_BSR_NTBED 0x00000020 /* Not in TestBED Mode */
#define FHC_BSR_NIA 0x0000001c /* Jumper, bit 18 in PROM space */
#define FHC_BSR_SI 0x00000001 /* Spare input pin value */
#define FHC_PREGS_ECC 0x40UL /* FHC ECC Control Register (16 bits) */
#define FHC_PREGS_JCTRL 0xf0UL /* FHC JTAG Control Register */
#define FHC_JTAG_CTRL_MENAB 0x80000000 /* Indicates this is JTAG Master */
#define FHC_JTAG_CTRL_MNONE 0x40000000 /* Indicates no JTAG Master present */
#define FHC_PREGS_JCMD 0x100UL /* FHC JTAG Command Register */
#define FHC_IREG_IGN 0x00UL /* This FHC's IGN */
#define FHC_FFREGS_IMAP 0x00UL /* FHC Fanfail IMAP */
#define FHC_FFREGS_ICLR 0x10UL /* FHC Fanfail ICLR */
#define FHC_SREGS_IMAP 0x00UL /* FHC System IMAP */
#define FHC_SREGS_ICLR 0x10UL /* FHC System ICLR */
#define FHC_UREGS_IMAP 0x00UL /* FHC Uart IMAP */
#define FHC_UREGS_ICLR 0x10UL /* FHC Uart ICLR */
#define FHC_TREGS_IMAP 0x00UL /* FHC TOD IMAP */
#define FHC_TREGS_ICLR 0x10UL /* FHC TOD ICLR */
#endif /* !(_SPARC64_FHC_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_FLOPPY_H
#define ___ASM_SPARC_FLOPPY_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/floppy_64.h>
#else
#include <asm/floppy_32.h>
#endif
#endif

View file

@ -0,0 +1,393 @@
/* asm/floppy.h: Sparc specific parts of the Floppy driver.
*
* Copyright (C) 1995 David S. Miller (davem@davemloft.net)
*/
#ifndef __ASM_SPARC_FLOPPY_H
#define __ASM_SPARC_FLOPPY_H
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/irq.h>
/* We don't need no stinkin' I/O port allocation crap. */
#undef release_region
#undef request_region
#define release_region(X, Y) do { } while(0)
#define request_region(X, Y, Z) (1)
/* References:
* 1) Netbsd Sun floppy driver.
* 2) NCR 82077 controller manual
* 3) Intel 82077 controller manual
*/
struct sun_flpy_controller {
volatile unsigned char status_82072; /* Main Status reg. */
#define dcr_82072 status_82072 /* Digital Control reg. */
#define status1_82077 status_82072 /* Auxiliary Status reg. 1 */
volatile unsigned char data_82072; /* Data fifo. */
#define status2_82077 data_82072 /* Auxiliary Status reg. 2 */
volatile unsigned char dor_82077; /* Digital Output reg. */
volatile unsigned char tapectl_82077; /* What the? Tape control reg? */
volatile unsigned char status_82077; /* Main Status Register. */
#define drs_82077 status_82077 /* Digital Rate Select reg. */
volatile unsigned char data_82077; /* Data fifo. */
volatile unsigned char ___unused;
volatile unsigned char dir_82077; /* Digital Input reg. */
#define dcr_82077 dir_82077 /* Config Control reg. */
};
/* You'll only ever find one controller on a SparcStation anyways. */
static struct sun_flpy_controller *sun_fdc = NULL;
struct sun_floppy_ops {
unsigned char (*fd_inb)(int port);
void (*fd_outb)(unsigned char value, int port);
};
static struct sun_floppy_ops sun_fdops;
#define fd_inb(port) sun_fdops.fd_inb(port)
#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
#define fd_enable_dma() sun_fd_enable_dma()
#define fd_disable_dma() sun_fd_disable_dma()
#define fd_request_dma() (0) /* nothing... */
#define fd_free_dma() /* nothing... */
#define fd_clear_dma_ff() /* nothing... */
#define fd_set_dma_mode(mode) sun_fd_set_dma_mode(mode)
#define fd_set_dma_addr(addr) sun_fd_set_dma_addr(addr)
#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
#define fd_enable_irq() /* nothing... */
#define fd_disable_irq() /* nothing... */
#define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fd_request_irq()
#define fd_free_irq() /* nothing... */
#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
#define fd_dma_mem_alloc(size) ((unsigned long) vmalloc(size))
#define fd_dma_mem_free(addr,size) (vfree((void *)(addr)))
#endif
/* XXX This isn't really correct. XXX */
#define get_dma_residue(x) (0)
#define FLOPPY0_TYPE 4
#define FLOPPY1_TYPE 0
/* Super paranoid... */
#undef HAVE_DISABLE_HLT
/* Here is where we catch the floppy driver trying to initialize,
* therefore this is where we call the PROM device tree probing
* routine etc. on the Sparc.
*/
#define FDC1 sun_floppy_init()
#define N_FDC 1
#define N_DRIVE 8
/* No 64k boundary crossing problems on the Sparc. */
#define CROSS_64KB(a,s) (0)
/* Routines unique to each controller type on a Sun. */
static void sun_set_dor(unsigned char value, int fdc_82077)
{
if (fdc_82077)
sun_fdc->dor_82077 = value;
}
static unsigned char sun_read_dir(void)
{
return sun_fdc->dir_82077;
}
static unsigned char sun_82072_fd_inb(int port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to read unknown port %d\n", port);
panic("floppy: Port bolixed.");
case 4: /* FD_STATUS */
return sun_fdc->status_82072 & ~STATUS_DMA;
case 5: /* FD_DATA */
return sun_fdc->data_82072;
case 7: /* FD_DIR */
return sun_read_dir();
}
panic("sun_82072_fd_inb: How did I get here?");
}
static void sun_82072_fd_outb(unsigned char value, int port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to write to unknown port %d\n", port);
panic("floppy: Port bolixed.");
case 2: /* FD_DOR */
sun_set_dor(value, 0);
break;
case 5: /* FD_DATA */
sun_fdc->data_82072 = value;
break;
case 7: /* FD_DCR */
sun_fdc->dcr_82072 = value;
break;
case 4: /* FD_STATUS */
sun_fdc->status_82072 = value;
break;
}
return;
}
static unsigned char sun_82077_fd_inb(int port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to read unknown port %d\n", port);
panic("floppy: Port bolixed.");
case 0: /* FD_STATUS_0 */
return sun_fdc->status1_82077;
case 1: /* FD_STATUS_1 */
return sun_fdc->status2_82077;
case 2: /* FD_DOR */
return sun_fdc->dor_82077;
case 3: /* FD_TDR */
return sun_fdc->tapectl_82077;
case 4: /* FD_STATUS */
return sun_fdc->status_82077 & ~STATUS_DMA;
case 5: /* FD_DATA */
return sun_fdc->data_82077;
case 7: /* FD_DIR */
return sun_read_dir();
}
panic("sun_82077_fd_inb: How did I get here?");
}
static void sun_82077_fd_outb(unsigned char value, int port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to write to unknown port %d\n", port);
panic("floppy: Port bolixed.");
case 2: /* FD_DOR */
sun_set_dor(value, 1);
break;
case 5: /* FD_DATA */
sun_fdc->data_82077 = value;
break;
case 7: /* FD_DCR */
sun_fdc->dcr_82077 = value;
break;
case 4: /* FD_STATUS */
sun_fdc->status_82077 = value;
break;
case 3: /* FD_TDR */
sun_fdc->tapectl_82077 = value;
break;
}
return;
}
/* For pseudo-dma (Sun floppy drives have no real DMA available to
* them so we must eat the data fifo bytes directly ourselves) we have
* three state variables. doing_pdma tells our inline low-level
* assembly floppy interrupt entry point whether it should sit and eat
* bytes from the fifo or just transfer control up to the higher level
* floppy interrupt c-code. I tried very hard but I could not get the
* pseudo-dma to work in c-code without getting many overruns and
* underruns. If non-zero, doing_pdma encodes the direction of
* the transfer for debugging. 1=read 2=write
*/
/* Common routines to all controller types on the Sparc. */
static inline void virtual_dma_init(void)
{
/* nothing... */
}
static inline void sun_fd_disable_dma(void)
{
doing_pdma = 0;
pdma_base = NULL;
}
static inline void sun_fd_set_dma_mode(int mode)
{
switch(mode) {
case DMA_MODE_READ:
doing_pdma = 1;
break;
case DMA_MODE_WRITE:
doing_pdma = 2;
break;
default:
printk("Unknown dma mode %d\n", mode);
panic("floppy: Giving up...");
}
}
static inline void sun_fd_set_dma_addr(char *buffer)
{
pdma_vaddr = buffer;
}
static inline void sun_fd_set_dma_count(int length)
{
pdma_size = length;
}
static inline void sun_fd_enable_dma(void)
{
pdma_base = pdma_vaddr;
pdma_areasize = pdma_size;
}
int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
static int sun_fd_request_irq(void)
{
static int once = 0;
if (!once) {
once = 1;
return sparc_floppy_request_irq(FLOPPY_IRQ, floppy_interrupt);
} else {
return 0;
}
}
static struct linux_prom_registers fd_regs[2];
static int sun_floppy_init(void)
{
struct platform_device *op;
struct device_node *dp;
struct resource r;
char state[128];
phandle fd_node;
phandle tnode;
int num_regs;
use_virtual_dma = 1;
/* Forget it if we aren't on a machine that could possibly
* ever have a floppy drive.
*/
if (sparc_cpu_model != sun4m) {
/* We certainly don't have a floppy controller. */
goto no_sun_fdc;
}
/* Well, try to find one. */
tnode = prom_getchild(prom_root_node);
fd_node = prom_searchsiblings(tnode, "obio");
if (fd_node != 0) {
tnode = prom_getchild(fd_node);
fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
} else {
fd_node = prom_searchsiblings(tnode, "fd");
}
if (fd_node == 0) {
goto no_sun_fdc;
}
/* The sun4m lets us know if the controller is actually usable. */
if (prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
if(!strcmp(state, "disabled")) {
goto no_sun_fdc;
}
}
num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs, sizeof(fd_regs));
num_regs = (num_regs / sizeof(fd_regs[0]));
prom_apply_obio_ranges(fd_regs, num_regs);
memset(&r, 0, sizeof(r));
r.flags = fd_regs[0].which_io;
r.start = fd_regs[0].phys_addr;
sun_fdc = of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
/* Look up irq in platform_device.
* We try "SUNW,fdtwo" and "fd"
*/
op = NULL;
for_each_node_by_name(dp, "SUNW,fdtwo") {
op = of_find_device_by_node(dp);
if (op)
break;
}
if (!op) {
for_each_node_by_name(dp, "fd") {
op = of_find_device_by_node(dp);
if (op)
break;
}
}
if (!op)
goto no_sun_fdc;
FLOPPY_IRQ = op->archdata.irqs[0];
/* Last minute sanity check... */
if (sun_fdc->status_82072 == 0xff) {
sun_fdc = NULL;
goto no_sun_fdc;
}
sun_fdops.fd_inb = sun_82077_fd_inb;
sun_fdops.fd_outb = sun_82077_fd_outb;
fdc_status = &sun_fdc->status_82077;
if (sun_fdc->dor_82077 == 0x80) {
sun_fdc->dor_82077 = 0x02;
if (sun_fdc->dor_82077 == 0x80) {
sun_fdops.fd_inb = sun_82072_fd_inb;
sun_fdops.fd_outb = sun_82072_fd_outb;
fdc_status = &sun_fdc->status_82072;
}
}
/* Success... */
allowed_drive_mask = 0x01;
return (int) sun_fdc;
no_sun_fdc:
return -1;
}
static int sparc_eject(void)
{
set_dor(0x00, 0xff, 0x90);
udelay(500);
set_dor(0x00, 0x6f, 0x00);
udelay(500);
return 0;
}
#define fd_eject(drive) sparc_eject()
#define EXTRA_FLOPPY_PARAMS
static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \
({ unsigned long flags; \
spin_lock_irqsave(&dma_spin_lock, flags); \
flags; \
})
#define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags);
#endif /* !(__ASM_SPARC_FLOPPY_H) */

View file

@ -0,0 +1,774 @@
/* floppy.h: Sparc specific parts of the Floppy driver.
*
* Copyright (C) 1996, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
* Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be)
*/
#ifndef __ASM_SPARC64_FLOPPY_H
#define __ASM_SPARC64_FLOPPY_H
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <asm/auxio.h>
/*
* Define this to enable exchanging drive 0 and 1 if only drive 1 is
* probed on PCI machines.
*/
#undef PCI_FDC_SWAP_DRIVES
/* References:
* 1) Netbsd Sun floppy driver.
* 2) NCR 82077 controller manual
* 3) Intel 82077 controller manual
*/
struct sun_flpy_controller {
volatile unsigned char status1_82077; /* Auxiliary Status reg. 1 */
volatile unsigned char status2_82077; /* Auxiliary Status reg. 2 */
volatile unsigned char dor_82077; /* Digital Output reg. */
volatile unsigned char tapectl_82077; /* Tape Control reg */
volatile unsigned char status_82077; /* Main Status Register. */
#define drs_82077 status_82077 /* Digital Rate Select reg. */
volatile unsigned char data_82077; /* Data fifo. */
volatile unsigned char ___unused;
volatile unsigned char dir_82077; /* Digital Input reg. */
#define dcr_82077 dir_82077 /* Config Control reg. */
};
/* You'll only ever find one controller on an Ultra anyways. */
static struct sun_flpy_controller *sun_fdc = (struct sun_flpy_controller *)-1;
unsigned long fdc_status;
static struct platform_device *floppy_op = NULL;
struct sun_floppy_ops {
unsigned char (*fd_inb) (unsigned long port);
void (*fd_outb) (unsigned char value, unsigned long port);
void (*fd_enable_dma) (void);
void (*fd_disable_dma) (void);
void (*fd_set_dma_mode) (int);
void (*fd_set_dma_addr) (char *);
void (*fd_set_dma_count) (int);
unsigned int (*get_dma_residue) (void);
int (*fd_request_irq) (void);
void (*fd_free_irq) (void);
int (*fd_eject) (int);
};
static struct sun_floppy_ops sun_fdops;
#define fd_inb(port) sun_fdops.fd_inb(port)
#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
#define fd_enable_dma() sun_fdops.fd_enable_dma()
#define fd_disable_dma() sun_fdops.fd_disable_dma()
#define fd_request_dma() (0) /* nothing... */
#define fd_free_dma() /* nothing... */
#define fd_clear_dma_ff() /* nothing... */
#define fd_set_dma_mode(mode) sun_fdops.fd_set_dma_mode(mode)
#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
#define get_dma_residue(x) sun_fdops.get_dma_residue()
#define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fdops.fd_request_irq()
#define fd_free_irq() sun_fdops.fd_free_irq()
#define fd_eject(drive) sun_fdops.fd_eject(drive)
/* Super paranoid... */
#undef HAVE_DISABLE_HLT
static int sun_floppy_types[2] = { 0, 0 };
/* Here is where we catch the floppy driver trying to initialize,
* therefore this is where we call the PROM device tree probing
* routine etc. on the Sparc.
*/
#define FLOPPY0_TYPE sun_floppy_init()
#define FLOPPY1_TYPE sun_floppy_types[1]
#define FDC1 ((unsigned long)sun_fdc)
#define N_FDC 1
#define N_DRIVE 8
/* No 64k boundary crossing problems on the Sparc. */
#define CROSS_64KB(a,s) (0)
static unsigned char sun_82077_fd_inb(unsigned long port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to read unknown port %lx\n", port);
panic("floppy: Port bolixed.");
case 4: /* FD_STATUS */
return sbus_readb(&sun_fdc->status_82077) & ~STATUS_DMA;
case 5: /* FD_DATA */
return sbus_readb(&sun_fdc->data_82077);
case 7: /* FD_DIR */
/* XXX: Is DCL on 0x80 in sun4m? */
return sbus_readb(&sun_fdc->dir_82077);
}
panic("sun_82072_fd_inb: How did I get here?");
}
static void sun_82077_fd_outb(unsigned char value, unsigned long port)
{
udelay(5);
switch(port & 7) {
default:
printk("floppy: Asked to write to unknown port %lx\n", port);
panic("floppy: Port bolixed.");
case 2: /* FD_DOR */
/* Happily, the 82077 has a real DOR register. */
sbus_writeb(value, &sun_fdc->dor_82077);
break;
case 5: /* FD_DATA */
sbus_writeb(value, &sun_fdc->data_82077);
break;
case 7: /* FD_DCR */
sbus_writeb(value, &sun_fdc->dcr_82077);
break;
case 4: /* FD_STATUS */
sbus_writeb(value, &sun_fdc->status_82077);
break;
}
return;
}
/* For pseudo-dma (Sun floppy drives have no real DMA available to
* them so we must eat the data fifo bytes directly ourselves) we have
* three state variables. doing_pdma tells our inline low-level
* assembly floppy interrupt entry point whether it should sit and eat
* bytes from the fifo or just transfer control up to the higher level
* floppy interrupt c-code. I tried very hard but I could not get the
* pseudo-dma to work in c-code without getting many overruns and
* underruns. If non-zero, doing_pdma encodes the direction of
* the transfer for debugging. 1=read 2=write
*/
unsigned char *pdma_vaddr;
unsigned long pdma_size;
volatile int doing_pdma = 0;
/* This is software state */
char *pdma_base = NULL;
unsigned long pdma_areasize;
/* Common routines to all controller types on the Sparc. */
static void sun_fd_disable_dma(void)
{
doing_pdma = 0;
pdma_base = NULL;
}
static void sun_fd_set_dma_mode(int mode)
{
switch(mode) {
case DMA_MODE_READ:
doing_pdma = 1;
break;
case DMA_MODE_WRITE:
doing_pdma = 2;
break;
default:
printk("Unknown dma mode %d\n", mode);
panic("floppy: Giving up...");
}
}
static void sun_fd_set_dma_addr(char *buffer)
{
pdma_vaddr = buffer;
}
static void sun_fd_set_dma_count(int length)
{
pdma_size = length;
}
static void sun_fd_enable_dma(void)
{
pdma_base = pdma_vaddr;
pdma_areasize = pdma_size;
}
irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie)
{
if (likely(doing_pdma)) {
void __iomem *stat = (void __iomem *) fdc_status;
unsigned char *vaddr = pdma_vaddr;
unsigned long size = pdma_size;
u8 val;
while (size) {
val = readb(stat);
if (unlikely(!(val & 0x80))) {
pdma_vaddr = vaddr;
pdma_size = size;
return IRQ_HANDLED;
}
if (unlikely(!(val & 0x20))) {
pdma_vaddr = vaddr;
pdma_size = size;
doing_pdma = 0;
goto main_interrupt;
}
if (val & 0x40) {
/* read */
*vaddr++ = readb(stat + 1);
} else {
unsigned char data = *vaddr++;
/* write */
writeb(data, stat + 1);
}
size--;
}
pdma_vaddr = vaddr;
pdma_size = size;
/* Send Terminal Count pulse to floppy controller. */
val = readb(auxio_register);
val |= AUXIO_AUX1_FTCNT;
writeb(val, auxio_register);
val &= ~AUXIO_AUX1_FTCNT;
writeb(val, auxio_register);
doing_pdma = 0;
}
main_interrupt:
return floppy_interrupt(irq, dev_cookie);
}
static int sun_fd_request_irq(void)
{
static int once = 0;
int error;
if(!once) {
once = 1;
error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
0, "floppy", NULL);
return ((error == 0) ? 0 : -1);
}
return 0;
}
static void sun_fd_free_irq(void)
{
}
static unsigned int sun_get_dma_residue(void)
{
/* XXX This isn't really correct. XXX */
return 0;
}
static int sun_fd_eject(int drive)
{
set_dor(0x00, 0xff, 0x90);
udelay(500);
set_dor(0x00, 0x6f, 0x00);
udelay(500);
return 0;
}
#include <asm/ebus_dma.h>
#include <asm/ns87303.h>
static struct ebus_dma_info sun_pci_fd_ebus_dma;
static struct device *sun_floppy_dev;
static int sun_pci_broken_drive = -1;
struct sun_pci_dma_op {
unsigned int addr;
int len;
int direction;
char *buf;
};
static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static unsigned char sun_pci_fd_inb(unsigned long port)
{
udelay(5);
return inb(port);
}
static void sun_pci_fd_outb(unsigned char val, unsigned long port)
{
udelay(5);
outb(val, port);
}
static void sun_pci_fd_broken_outb(unsigned char val, unsigned long port)
{
udelay(5);
/*
* XXX: Due to SUN's broken floppy connector on AX and AXi
* we need to turn on MOTOR_0 also, if the floppy is
* jumpered to DS1 (like most PC floppies are). I hope
* this does not hurt correct hardware like the AXmp.
* (Eddie, Sep 12 1998).
*/
if (port == ((unsigned long)sun_fdc) + 2) {
if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x20)) {
val |= 0x10;
}
}
outb(val, port);
}
#ifdef PCI_FDC_SWAP_DRIVES
static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
{
udelay(5);
/*
* XXX: Due to SUN's broken floppy connector on AX and AXi
* we need to turn on MOTOR_0 also, if the floppy is
* jumpered to DS1 (like most PC floppies are). I hope
* this does not hurt correct hardware like the AXmp.
* (Eddie, Sep 12 1998).
*/
if (port == ((unsigned long)sun_fdc) + 2) {
if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x10)) {
val &= ~(0x03);
val |= 0x21;
}
}
outb(val, port);
}
#endif /* PCI_FDC_SWAP_DRIVES */
static void sun_pci_fd_enable_dma(void)
{
BUG_ON((NULL == sun_pci_dma_pending.buf) ||
(0 == sun_pci_dma_pending.len) ||
(0 == sun_pci_dma_pending.direction));
sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
sun_pci_dma_current.len = sun_pci_dma_pending.len;
sun_pci_dma_current.direction = sun_pci_dma_pending.direction;
sun_pci_dma_pending.buf = NULL;
sun_pci_dma_pending.len = 0;
sun_pci_dma_pending.direction = 0;
sun_pci_dma_pending.addr = -1U;
sun_pci_dma_current.addr =
dma_map_single(sun_floppy_dev,
sun_pci_dma_current.buf,
sun_pci_dma_current.len,
sun_pci_dma_current.direction);
ebus_dma_enable(&sun_pci_fd_ebus_dma, 1);
if (ebus_dma_request(&sun_pci_fd_ebus_dma,
sun_pci_dma_current.addr,
sun_pci_dma_current.len))
BUG();
}
static void sun_pci_fd_disable_dma(void)
{
ebus_dma_enable(&sun_pci_fd_ebus_dma, 0);
if (sun_pci_dma_current.addr != -1U)
dma_unmap_single(sun_floppy_dev,
sun_pci_dma_current.addr,
sun_pci_dma_current.len,
sun_pci_dma_current.direction);
sun_pci_dma_current.addr = -1U;
}
static void sun_pci_fd_set_dma_mode(int mode)
{
if (mode == DMA_MODE_WRITE)
sun_pci_dma_pending.direction = DMA_TO_DEVICE;
else
sun_pci_dma_pending.direction = DMA_FROM_DEVICE;
ebus_dma_prepare(&sun_pci_fd_ebus_dma, mode != DMA_MODE_WRITE);
}
static void sun_pci_fd_set_dma_count(int length)
{
sun_pci_dma_pending.len = length;
}
static void sun_pci_fd_set_dma_addr(char *buffer)
{
sun_pci_dma_pending.buf = buffer;
}
static unsigned int sun_pci_get_dma_residue(void)
{
return ebus_dma_residue(&sun_pci_fd_ebus_dma);
}
static int sun_pci_fd_request_irq(void)
{
return ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 1);
}
static void sun_pci_fd_free_irq(void)
{
ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 0);
}
static int sun_pci_fd_eject(int drive)
{
return -EINVAL;
}
void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie)
{
floppy_interrupt(0, NULL);
}
/*
* Floppy probing, we'd like to use /dev/fd0 for a single Floppy on PCI,
* even if this is configured using DS1, thus looks like /dev/fd1 with
* the cabling used in Ultras.
*/
#define DOR (port + 2)
#define MSR (port + 4)
#define FIFO (port + 5)
static void sun_pci_fd_out_byte(unsigned long port, unsigned char val,
unsigned long reg)
{
unsigned char status;
int timeout = 1000;
while (!((status = inb(MSR)) & 0x80) && --timeout)
udelay(100);
outb(val, reg);
}
static unsigned char sun_pci_fd_sensei(unsigned long port)
{
unsigned char result[2] = { 0x70, 0x00 };
unsigned char status;
int i = 0;
sun_pci_fd_out_byte(port, 0x08, FIFO);
do {
int timeout = 1000;
while (!((status = inb(MSR)) & 0x80) && --timeout)
udelay(100);
if (!timeout)
break;
if ((status & 0xf0) == 0xd0)
result[i++] = inb(FIFO);
else
break;
} while (i < 2);
return result[0];
}
static void sun_pci_fd_reset(unsigned long port)
{
unsigned char mask = 0x00;
unsigned char status;
int timeout = 10000;
outb(0x80, MSR);
do {
status = sun_pci_fd_sensei(port);
if ((status & 0xc0) == 0xc0)
mask |= 1 << (status & 0x03);
else
udelay(100);
} while ((mask != 0x0f) && --timeout);
}
static int sun_pci_fd_test_drive(unsigned long port, int drive)
{
unsigned char status, data;
int timeout = 1000;
int ready;
sun_pci_fd_reset(port);
data = (0x10 << drive) | 0x0c | drive;
sun_pci_fd_out_byte(port, data, DOR);
sun_pci_fd_out_byte(port, 0x07, FIFO);
sun_pci_fd_out_byte(port, drive & 0x03, FIFO);
do {
udelay(100);
status = sun_pci_fd_sensei(port);
} while (((status & 0xc0) == 0x80) && --timeout);
if (!timeout)
ready = 0;
else
ready = (status & 0x10) ? 0 : 1;
sun_pci_fd_reset(port);
return ready;
}
#undef FIFO
#undef MSR
#undef DOR
static int __init ebus_fdthree_p(struct device_node *dp)
{
if (!strcmp(dp->name, "fdthree"))
return 1;
if (!strcmp(dp->name, "floppy")) {
const char *compat;
compat = of_get_property(dp, "compatible", NULL);
if (compat && !strcmp(compat, "fdthree"))
return 1;
}
return 0;
}
static unsigned long __init sun_floppy_init(void)
{
static int initialized = 0;
struct device_node *dp;
struct platform_device *op;
const char *prop;
char state[128];
if (initialized)
return sun_floppy_types[0];
initialized = 1;
op = NULL;
for_each_node_by_name(dp, "SUNW,fdtwo") {
if (strcmp(dp->parent->name, "sbus"))
continue;
op = of_find_device_by_node(dp);
if (op)
break;
}
if (op) {
floppy_op = op;
FLOPPY_IRQ = op->archdata.irqs[0];
} else {
struct device_node *ebus_dp;
void __iomem *auxio_reg;
const char *state_prop;
unsigned long config;
dp = NULL;
for_each_node_by_name(ebus_dp, "ebus") {
for (dp = ebus_dp->child; dp; dp = dp->sibling) {
if (ebus_fdthree_p(dp))
goto found_fdthree;
}
}
found_fdthree:
if (!dp)
return 0;
op = of_find_device_by_node(dp);
if (!op)
return 0;
state_prop = of_get_property(op->dev.of_node, "status", NULL);
if (state_prop && !strncmp(state_prop, "disabled", 8))
return 0;
FLOPPY_IRQ = op->archdata.irqs[0];
/* Make sure the high density bit is set, some systems
* (most notably Ultra5/Ultra10) come up with it clear.
*/
auxio_reg = (void __iomem *) op->resource[2].start;
writel(readl(auxio_reg)|0x2, auxio_reg);
sun_floppy_dev = &op->dev;
spin_lock_init(&sun_pci_fd_ebus_dma.lock);
/* XXX ioremap */
sun_pci_fd_ebus_dma.regs = (void __iomem *)
op->resource[1].start;
if (!sun_pci_fd_ebus_dma.regs)
return 0;
sun_pci_fd_ebus_dma.flags = (EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
EBUS_DMA_FLAG_TCI_DISABLE);
sun_pci_fd_ebus_dma.callback = sun_pci_fd_dma_callback;
sun_pci_fd_ebus_dma.client_cookie = NULL;
sun_pci_fd_ebus_dma.irq = FLOPPY_IRQ;
strcpy(sun_pci_fd_ebus_dma.name, "floppy");
if (ebus_dma_register(&sun_pci_fd_ebus_dma))
return 0;
/* XXX ioremap */
sun_fdc = (struct sun_flpy_controller *) op->resource[0].start;
sun_fdops.fd_inb = sun_pci_fd_inb;
sun_fdops.fd_outb = sun_pci_fd_outb;
can_use_virtual_dma = use_virtual_dma = 0;
sun_fdops.fd_enable_dma = sun_pci_fd_enable_dma;
sun_fdops.fd_disable_dma = sun_pci_fd_disable_dma;
sun_fdops.fd_set_dma_mode = sun_pci_fd_set_dma_mode;
sun_fdops.fd_set_dma_addr = sun_pci_fd_set_dma_addr;
sun_fdops.fd_set_dma_count = sun_pci_fd_set_dma_count;
sun_fdops.get_dma_residue = sun_pci_get_dma_residue;
sun_fdops.fd_request_irq = sun_pci_fd_request_irq;
sun_fdops.fd_free_irq = sun_pci_fd_free_irq;
sun_fdops.fd_eject = sun_pci_fd_eject;
fdc_status = (unsigned long) &sun_fdc->status_82077;
/*
* XXX: Find out on which machines this is really needed.
*/
if (1) {
sun_pci_broken_drive = 1;
sun_fdops.fd_outb = sun_pci_fd_broken_outb;
}
allowed_drive_mask = 0;
if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 0))
sun_floppy_types[0] = 4;
if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 1))
sun_floppy_types[1] = 4;
/*
* Find NS87303 SuperIO config registers (through ecpp).
*/
config = 0;
for (dp = ebus_dp->child; dp; dp = dp->sibling) {
if (!strcmp(dp->name, "ecpp")) {
struct platform_device *ecpp_op;
ecpp_op = of_find_device_by_node(dp);
if (ecpp_op)
config = ecpp_op->resource[1].start;
goto config_done;
}
}
config_done:
/*
* Sanity check, is this really the NS87303?
*/
switch (config & 0x3ff) {
case 0x02e:
case 0x15c:
case 0x26e:
case 0x398:
break;
default:
config = 0;
}
if (!config)
return sun_floppy_types[0];
/* Enable PC-AT mode. */
ns87303_modify(config, ASC, 0, 0xc0);
#ifdef PCI_FDC_SWAP_DRIVES
/*
* If only Floppy 1 is present, swap drives.
*/
if (!sun_floppy_types[0] && sun_floppy_types[1]) {
/*
* Set the drive exchange bit in FCR on NS87303,
* make sure other bits are sane before doing so.
*/
ns87303_modify(config, FER, FER_EDM, 0);
ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
ns87303_modify(config, FCR, 0, FCR_LDE);
config = sun_floppy_types[0];
sun_floppy_types[0] = sun_floppy_types[1];
sun_floppy_types[1] = config;
if (sun_pci_broken_drive != -1) {
sun_pci_broken_drive = 1 - sun_pci_broken_drive;
sun_fdops.fd_outb = sun_pci_fd_lde_broken_outb;
}
}
#endif /* PCI_FDC_SWAP_DRIVES */
return sun_floppy_types[0];
}
prop = of_get_property(op->dev.of_node, "status", NULL);
if (prop && !strncmp(state, "disabled", 8))
return 0;
/*
* We cannot do of_ioremap here: it does request_region,
* which the generic floppy driver tries to do once again.
* But we must use the sdev resource values as they have
* had parent ranges applied.
*/
sun_fdc = (struct sun_flpy_controller *)
(op->resource[0].start +
((op->resource[0].flags & 0x1ffUL) << 32UL));
/* Last minute sanity check... */
if (sbus_readb(&sun_fdc->status1_82077) == 0xff) {
sun_fdc = (struct sun_flpy_controller *)-1;
return 0;
}
sun_fdops.fd_inb = sun_82077_fd_inb;
sun_fdops.fd_outb = sun_82077_fd_outb;
can_use_virtual_dma = use_virtual_dma = 1;
sun_fdops.fd_enable_dma = sun_fd_enable_dma;
sun_fdops.fd_disable_dma = sun_fd_disable_dma;
sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode;
sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr;
sun_fdops.fd_set_dma_count = sun_fd_set_dma_count;
sun_fdops.get_dma_residue = sun_get_dma_residue;
sun_fdops.fd_request_irq = sun_fd_request_irq;
sun_fdops.fd_free_irq = sun_fd_free_irq;
sun_fdops.fd_eject = sun_fd_eject;
fdc_status = (unsigned long) &sun_fdc->status_82077;
/* Success... */
allowed_drive_mask = 0x01;
sun_floppy_types[0] = 4;
sun_floppy_types[1] = 0;
return sun_floppy_types[0];
}
#define EXTRA_FLOPPY_PARAMS
static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \
({ unsigned long flags; \
spin_lock_irqsave(&dma_spin_lock, flags); \
flags; \
})
#define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags);
#endif /* !(__ASM_SPARC64_FLOPPY_H) */

View file

@ -0,0 +1,33 @@
/* fpumacro.h: FPU related macros.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_FPUMACRO_H
#define _SPARC64_FPUMACRO_H
#include <asm/asi.h>
#include <asm/visasm.h>
struct fpustate {
u32 regs[64];
};
#define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs)
static inline unsigned long fprs_read(void)
{
unsigned long retval;
__asm__ __volatile__("rd %%fprs, %0" : "=r" (retval));
return retval;
}
static inline void fprs_write(unsigned long val)
{
__asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val));
}
#endif /* !(_SPARC64_FPUMACRO_H) */

View file

@ -0,0 +1,29 @@
#ifndef _ASM_SPARC64_FTRACE
#define _ASM_SPARC64_FTRACE
#ifdef CONFIG_MCOUNT
#define MCOUNT_ADDR ((long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
void _mcount(void);
#endif
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* reloction of mcount call site is the same as the address */
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
unsigned long prepare_ftrace_return(unsigned long parent,
unsigned long self_addr,
unsigned long frame_pointer);
#endif /* _ASM_SPARC64_FTRACE */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_FUTEX_H
#define ___ASM_SPARC_FUTEX_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/futex_64.h>
#else
#include <asm/futex_32.h>
#endif
#endif

View file

@ -0,0 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#endif

View file

@ -0,0 +1,113 @@
#ifndef _SPARC64_FUTEX_H
#define _SPARC64_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile__( \
"\n1: lduwa [%3] %%asi, %2\n" \
" " insn "\n" \
"2: casa [%3] %%asi, %2, %1\n" \
" cmp %2, %1\n" \
" bne,pn %%icc, 1b\n" \
" mov 0, %0\n" \
"3:\n" \
" .section .fixup,#alloc,#execinstr\n" \
" .align 4\n" \
"4: sethi %%hi(3b), %0\n" \
" jmpl %0 + %%lo(3b), %%g0\n" \
" mov %5, %0\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .word 1b, 4b\n" \
" .word 2b, 4b\n" \
" .previous\n" \
: "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_cas_op("andn\t%2, %4, %1", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
__asm__ __volatile__(
"\n1: casa [%4] %%asi, %3, %1\n"
"2:\n"
" .section .fixup,#alloc,#execinstr\n"
" .align 4\n"
"3: sethi %%hi(2b), %0\n"
" jmpl %0 + %%lo(2b), %%g0\n"
" mov %5, %0\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .align 4\n"
" .word 1b, 3b\n"
" .previous\n"
: "+r" (ret), "=r" (newval)
: "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
: "memory");
*uval = newval;
return ret;
}
#endif /* !(_SPARC64_FUTEX_H) */

View file

@ -0,0 +1,4 @@
#ifndef __LINUX_GPIO_H
#warning Include linux/gpio.h instead of asm/gpio.h
#include <linux/gpio.h>
#endif

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_HARDIRQ_H
#define ___ASM_SPARC_HARDIRQ_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/hardirq_64.h>
#else
#include <asm/hardirq_32.h>
#endif
#endif

View file

@ -0,0 +1,12 @@
/* hardirq.h: 32-bit Sparc hard IRQ support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
#ifndef __SPARC_HARDIRQ_H
#define __SPARC_HARDIRQ_H
#include <asm-generic/hardirq.h>
#endif /* __SPARC_HARDIRQ_H */

View file

@ -0,0 +1,17 @@
/* hardirq.h: 64-bit Sparc hard IRQ support.
*
* Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net)
*/
#ifndef __SPARC64_HARDIRQ_H
#define __SPARC64_HARDIRQ_H
#include <asm/cpudata.h>
#define __ARCH_IRQ_STAT
#define local_softirq_pending() \
(local_cpu_data().__softirq_pending)
void ack_bad_irq(unsigned int irq);
#endif /* !(__SPARC64_HARDIRQ_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_HEAD_H
#define ___ASM_SPARC_HEAD_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/head_64.h>
#else
#include <asm/head_32.h>
#endif
#endif

View file

@ -0,0 +1,83 @@
#ifndef __SPARC_HEAD_H
#define __SPARC_HEAD_H
#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */
#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
/* Here are some trap goodies */
/* Generic trap entry. */
#define TRAP_ENTRY(type, label) \
rd %psr, %l0; b label; rd %wim, %l3; nop;
/* Data/text faults */
#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
/* This is for traps we should NEVER get. */
#define BAD_TRAP(num) \
rd %psr, %l0; mov num, %l7; b bad_trap_handler; rd %wim, %l3;
/* This is for traps when we want just skip the instruction which caused it */
#define SKIP_TRAP(type, name) \
jmpl %l2, %g0; rett %l2 + 4; nop; nop;
/* Notice that for the system calls we pull a trick. We load up a
* different pointer to the system call vector table in %l7, but call
* the same generic system call low-level entry point. The trap table
* entry sequences are also HyperSparc pipeline friendly ;-)
*/
/* Software trap for Linux system calls. */
#define LINUX_SYSCALL_TRAP \
sethi %hi(sys_call_table), %l7; \
or %l7, %lo(sys_call_table), %l7; \
b linux_sparc_syscall; \
rd %psr, %l0;
#define BREAKPOINT_TRAP \
b breakpoint_trap; \
rd %psr,%l0; \
nop; \
nop;
#ifdef CONFIG_KGDB
#define KGDB_TRAP(num) \
b kgdb_trap_low; \
rd %psr,%l0; \
nop; \
nop;
#else
#define KGDB_TRAP(num) \
BAD_TRAP(num)
#endif
/* The Get Condition Codes software trap for userland. */
#define GETCC_TRAP \
b getcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Set Condition Codes software trap for userland. */
#define SETCC_TRAP \
b setcc_trap_handler; rd %psr, %l0; nop; nop;
/* The Get PSR software trap for userland. */
#define GETPSR_TRAP \
rd %psr, %i0; jmp %l2; rett %l2 + 4; nop;
/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
* gets handled with another macro.
*/
#define TRAP_ENTRY_INTERRUPT(int_level) \
mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
/* Window overflows/underflows are special and we need to try to be as
* efficient as possible here....
*/
#define WINDOW_SPILL \
rd %psr, %l0; rd %wim, %l3; b spill_window_entry; andcc %l0, PSR_PS, %g0;
#define WINDOW_FILL \
rd %psr, %l0; rd %wim, %l3; b fill_window_entry; andcc %l0, PSR_PS, %g0;
#endif /* __SPARC_HEAD_H */

View file

@ -0,0 +1,76 @@
#ifndef _SPARC64_HEAD_H
#define _SPARC64_HEAD_H
#include <asm/pstate.h>
/* wrpr %g0, val, %gl */
#define SET_GL(val) \
.word 0xa1902000 | val
/* rdpr %gl, %gN */
#define GET_GL_GLOBAL(N) \
.word 0x81540000 | (N << 25)
#define KERNBASE 0x400000
#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
#define __CHEETAH_ID 0x003e0014
#define __JALAPENO_ID 0x003e0016
#define __SERRANO_ID 0x003e0022
#define CHEETAH_MANUF 0x003e
#define CHEETAH_IMPL 0x0014 /* Ultra-III */
#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */
#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */
#define JAGUAR_IMPL 0x0018 /* Ultra-IV */
#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
#define BRANCH_IF_SUN4V(tmp1,label) \
sethi %hi(is_sun4v), %tmp1; \
lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
brnz,pn %tmp1, label; \
nop
#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
rdpr %ver, %tmp1; \
sethi %hi(__CHEETAH_ID), %tmp2; \
srlx %tmp1, 32, %tmp1; \
or %tmp2, %lo(__CHEETAH_ID), %tmp2;\
cmp %tmp1, %tmp2; \
be,pn %icc, label; \
nop;
#define BRANCH_IF_JALAPENO(tmp1,tmp2,label) \
rdpr %ver, %tmp1; \
sethi %hi(__JALAPENO_ID), %tmp2; \
srlx %tmp1, 32, %tmp1; \
or %tmp2, %lo(__JALAPENO_ID), %tmp2;\
cmp %tmp1, %tmp2; \
be,pn %icc, label; \
nop;
#define BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(tmp1,tmp2,label) \
rdpr %ver, %tmp1; \
srlx %tmp1, (32 + 16), %tmp2; \
cmp %tmp2, CHEETAH_MANUF; \
bne,pt %xcc, 99f; \
sllx %tmp1, 16, %tmp1; \
srlx %tmp1, (32 + 16), %tmp2; \
cmp %tmp2, CHEETAH_PLUS_IMPL; \
bgeu,pt %xcc, label; \
99: nop;
#define BRANCH_IF_ANY_CHEETAH(tmp1,tmp2,label) \
rdpr %ver, %tmp1; \
srlx %tmp1, (32 + 16), %tmp2; \
cmp %tmp2, CHEETAH_MANUF; \
bne,pt %xcc, 99f; \
sllx %tmp1, 16, %tmp1; \
srlx %tmp1, (32 + 16), %tmp2; \
cmp %tmp2, CHEETAH_IMPL; \
bgeu,pt %xcc, label; \
99: nop;
#endif /* !(_SPARC64_HEAD_H) */

View file

@ -0,0 +1,23 @@
/*
* hibernate.h: Hibernaton support specific for sparc64.
*
* Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
*/
#ifndef ___SPARC_HIBERNATE_H
#define ___SPARC_HIBERNATE_H
struct saved_context {
unsigned long fp;
unsigned long cwp;
unsigned long wstate;
unsigned long tick;
unsigned long pstate;
unsigned long g4;
unsigned long g5;
unsigned long g6;
};
#endif

View file

@ -0,0 +1,78 @@
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/interrupt.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
void kmap_init(void) __init;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM. Currently the simplest way to do this is to align the
* pkmap region on a pagetable boundary (4MB).
*/
#define LAST_PKMAP 1024
#define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
#define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
void *kmap_high(struct page *page);
void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */

View file

@ -0,0 +1,98 @@
#ifndef _ASM_SPARC64_HUGETLB_H
#define _ASM_SPARC64_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t old_pte = *ptep;
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
flush_tlb_page(vma, addr);
}
return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_SPARC64_HUGETLB_H */

View file

@ -0,0 +1,37 @@
#ifndef _SPARC64_HVTRAP_H
#define _SPARC64_HVTRAP_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct hvtramp_mapping {
__u64 vaddr;
__u64 tte;
};
struct hvtramp_descr {
__u32 cpu;
__u32 num_mappings;
__u64 fault_info_va;
__u64 fault_info_pa;
__u64 thread_reg;
struct hvtramp_mapping maps[1];
};
void hv_cpu_startup(unsigned long hvdescr_pa);
#endif
#define HVTRAMP_DESCR_CPU 0x00
#define HVTRAMP_DESCR_NUM_MAPPINGS 0x04
#define HVTRAMP_DESCR_FAULT_INFO_VA 0x08
#define HVTRAMP_DESCR_FAULT_INFO_PA 0x10
#define HVTRAMP_DESCR_THREAD_REG 0x18
#define HVTRAMP_DESCR_MAPS 0x20
#define HVTRAMP_MAPPING_VADDR 0x00
#define HVTRAMP_MAPPING_TTE 0x08
#define HVTRAMP_MAPPING_SIZE 0x10
#endif /* _SPARC64_HVTRAP_H */

View file

@ -0,0 +1,6 @@
#ifndef __ASM_SPARC_HW_IRQ_H
#define __ASM_SPARC_HW_IRQ_H
/* Dummy include. */
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,97 @@
/* ide.h: SPARC PCI specific IDE glue.
*
* Copyright (C) 1997 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Adaptation from sparc64 version to sparc by Pete Zaitcev.
*/
#ifndef _SPARC_IDE_H
#define _SPARC_IDE_H
#ifdef __KERNEL__
#include <asm/io.h>
#ifdef CONFIG_SPARC64
#include <asm/pgalloc.h>
#include <asm/spitfire.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#else
#include <asm/pgtable.h>
#include <asm/psr.h>
#endif
#define __ide_insl(data_reg, buffer, wcount) \
__ide_insw(data_reg, buffer, (wcount)<<1)
#define __ide_outsl(data_reg, buffer, wcount) \
__ide_outsw(data_reg, buffer, (wcount)<<1)
/* On sparc, I/O ports and MMIO registers are accessed identically. */
#define __ide_mm_insw __ide_insw
#define __ide_mm_insl __ide_insl
#define __ide_mm_outsw __ide_outsw
#define __ide_mm_outsl __ide_outsl
static inline void __ide_insw(void __iomem *port, void *dst, u32 count)
{
#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
unsigned long end = (unsigned long)dst + (count << 1);
#endif
u16 *ps = dst;
u32 *pi;
if(((unsigned long)ps) & 0x2) {
*ps++ = __raw_readw(port);
count--;
}
pi = (u32 *)ps;
while(count >= 2) {
u32 w;
w = __raw_readw(port) << 16;
w |= __raw_readw(port);
*pi++ = w;
count -= 2;
}
ps = (u16 *)pi;
if(count)
*ps++ = __raw_readw(port);
#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
__flush_dcache_range((unsigned long)dst, end);
#endif
}
static inline void __ide_outsw(void __iomem *port, const void *src, u32 count)
{
#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
unsigned long end = (unsigned long)src + (count << 1);
#endif
const u16 *ps = src;
const u32 *pi;
if(((unsigned long)src) & 0x2) {
__raw_writew(*ps++, port);
count--;
}
pi = (const u32 *)ps;
while(count >= 2) {
u32 w;
w = *pi++;
__raw_writew((w >> 16), port);
__raw_writew(w, port);
count -= 2;
}
ps = (const u16 *)pi;
if(count)
__raw_writew(*ps, port);
#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
__flush_dcache_range((unsigned long)src, end);
#endif
}
#endif /* __KERNEL__ */
#endif /* _SPARC_IDE_H */

View file

@ -0,0 +1,25 @@
/*
* idprom.h: Macros and defines for idprom routines
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_IDPROM_H
#define _SPARC_IDPROM_H
#include <linux/types.h>
struct idprom {
u8 id_format; /* Format identifier (always 0x01) */
u8 id_machtype; /* Machine type */
u8 id_ethaddr[6]; /* Hardware ethernet address */
s32 id_date; /* Date of manufacture */
u32 id_sernum:24; /* Unique serial number */
u8 id_cksum; /* Checksum - xor of the data bytes */
u8 reserved[16];
};
extern struct idprom *idprom;
void idprom_init(void);
#endif /* !(_SPARC_IDPROM_H) */

View file

@ -0,0 +1,15 @@
#ifndef _SPARC64_INTR_QUEUE_H
#define _SPARC64_INTR_QUEUE_H
/* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */
#define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */
#define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */
#define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */
#define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */
#define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */
#define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */
#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */
#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
#endif /* !(_SPARC64_INTR_QUEUE_H) */

View file

@ -0,0 +1,58 @@
/* io-unit.h: Definitions for the sun4d IO-UNIT.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#ifndef _SPARC_IO_UNIT_H
#define _SPARC_IO_UNIT_H
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
/* The io-unit handles all virtual to physical address translations
* that occur between the SBUS and physical memory. Access by
* the cpu to IO registers and similar go over the xdbus so are
* translated by the on chip SRMMU. The io-unit and the srmmu do
* not need to have the same translations at all, in fact most
* of the time the translations they handle are a disjunct set.
* Basically the io-unit handles all dvma sbus activity.
*/
/* AIEEE, unlike the nice sun4m, these monsters have
fixed DMA range 64M */
#define IOUNIT_DMA_BASE 0xfc000000 /* TOP - 64M */
#define IOUNIT_DMA_SIZE 0x04000000 /* 64M */
/* We use last 1M for sparc_dvma_malloc */
#define IOUNIT_DVMA_SIZE 0x00100000 /* 1M */
/* The format of an iopte in the external page tables */
#define IOUPTE_PAGE 0xffffff00 /* Physical page number (PA[35:12]) */
#define IOUPTE_CACHE 0x00000080 /* Cached (in Viking/MXCC) */
/* XXX Jakub, find out how to program SBUS streaming cache on XDBUS/sun4d.
* XXX Actually, all you should need to do is find out where the registers
* XXX are and copy over the sparc64 implementation I wrote. There may be
* XXX some horrible hwbugs though, so be careful. -DaveM
*/
#define IOUPTE_STREAM 0x00000040 /* Translation can use streaming cache */
#define IOUPTE_INTRA 0x00000008 /* SBUS direct slot->slot transfer */
#define IOUPTE_WRITE 0x00000004 /* Writeable */
#define IOUPTE_VALID 0x00000002 /* IOPTE is valid */
#define IOUPTE_PARITY 0x00000001 /* Parity is checked during DVMA */
struct iounit_struct {
unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
spinlock_t lock;
iopte_t __iomem *page_table;
unsigned long rotor[3];
unsigned long limit[4];
};
#define IOUNIT_BMAP1_START 0x00000000
#define IOUNIT_BMAP1_END (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 1))
#define IOUNIT_BMAP2_START IOUNIT_BMAP1_END
#define IOUNIT_BMAP2_END IOUNIT_BMAP2_START + (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 2))
#define IOUNIT_BMAPM_START IOUNIT_BMAP2_END
#define IOUNIT_BMAPM_END ((IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE) >> PAGE_SHIFT)
#endif /* !(_SPARC_IO_UNIT_H) */

View file

@ -0,0 +1,21 @@
#ifndef ___ASM_SPARC_IO_H
#define ___ASM_SPARC_IO_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/io_64.h>
#else
#include <asm/io_32.h>
#endif
/*
* Defines used for both SPARC32 and SPARC64
*/
/* Big endian versions of memory read/write routines */
#define readb_be(__addr) __raw_readb(__addr)
#define readw_be(__addr) __raw_readw(__addr)
#define readl_be(__addr) __raw_readl(__addr)
#define writeb_be(__b, __addr) __raw_writeb(__b, __addr)
#define writel_be(__w, __addr) __raw_writel(__w, __addr)
#define writew_be(__l, __addr) __raw_writew(__l, __addr)
#endif

View file

@ -0,0 +1,172 @@
#ifndef __SPARC_IO_H
#define __SPARC_IO_H
#include <linux/kernel.h>
#include <linux/ioport.h> /* struct resource */
#define readb_relaxed(__addr) readb(__addr)
#define readw_relaxed(__addr) readw(__addr)
#define readl_relaxed(__addr) readl(__addr)
#define IO_SPACE_LIMIT 0xffffffff
#define memset_io(d,c,sz) _memset_io(d,c,sz)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
#include <asm-generic/io.h>
static inline void _memset_io(volatile void __iomem *dst,
int c, __kernel_size_t n)
{
volatile void __iomem *d = dst;
while (n--) {
writeb(c, d);
d++;
}
}
static inline void _memcpy_fromio(void *dst, const volatile void __iomem *src,
__kernel_size_t n)
{
char *d = dst;
while (n--) {
char tmp = readb(src);
*d++ = tmp;
src++;
}
}
static inline void _memcpy_toio(volatile void __iomem *dst, const void *src,
__kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
while (n--) {
char tmp = *s++;
writeb(tmp, d);
d++;
}
}
/*
* SBus accessors.
*
* SBus has only one, memory mapped, I/O space.
* We do not need to flip bytes for SBus of course.
*/
static inline u8 sbus_readb(const volatile void __iomem *addr)
{
return *(__force volatile u8 *)addr;
}
static inline u16 sbus_readw(const volatile void __iomem *addr)
{
return *(__force volatile u16 *)addr;
}
static inline u32 sbus_readl(const volatile void __iomem *addr)
{
return *(__force volatile u32 *)addr;
}
static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
{
*(__force volatile u8 *)addr = b;
}
static inline void sbus_writew(u16 w, volatile void __iomem *addr)
{
*(__force volatile u16 *)addr = w;
}
static inline void sbus_writel(u32 l, volatile void __iomem *addr)
{
*(__force volatile u32 *)addr = l;
}
static inline void sbus_memset_io(volatile void __iomem *__dst, int c,
__kernel_size_t n)
{
while(n--) {
sbus_writeb(c, __dst);
__dst++;
}
}
static inline void sbus_memcpy_fromio(void *dst,
const volatile void __iomem *src,
__kernel_size_t n)
{
char *d = dst;
while (n--) {
char tmp = sbus_readb(src);
*d++ = tmp;
src++;
}
}
static inline void sbus_memcpy_toio(volatile void __iomem *dst,
const void *src,
__kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
while (n--) {
char tmp = *s++;
sbus_writeb(tmp, d);
d++;
}
}
#ifdef __KERNEL__
/*
* Bus number may be embedded in the higher bits of the physical address.
* This is why we have no bus number argument to ioremap().
*/
void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
void iounmap(volatile void __iomem *addr);
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
void ioport_unmap(void __iomem *);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
void pci_iounmap(struct pci_dev *dev, void __iomem *);
/*
* At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
* so rtc_port is static in it. This should not change unless a new
* hardware pops up.
*/
#define RTC_PORT(x) (rtc_port + (x))
#define RTC_ALWAYS_BCD 0
static inline int sbus_can_dma_64bit(void)
{
return 0; /* actually, sparc_cpu_model==sun4d */
}
static inline int sbus_can_burst64(void)
{
return 0; /* actually, sparc_cpu_model==sun4d */
}
struct device;
void sbus_set_sbus64(struct device *, int);
#endif
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
#endif /* !(__SPARC_IO_H) */

View file

@ -0,0 +1,451 @@
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/page.h> /* IO address mapping routines need this */
#include <asm/asi.h>
#include <asm-generic/pci_iomap.h>
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
/* __raw_{read,write}{b,w,l,q} uses direct access.
* Access the memory as big endian bypassing the cache
* by using ASI_PHYS_BYPASS_EC_E
*/
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 ret;
__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 ret;
__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 ret;
__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 ret;
__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
{
__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
: /* no outputs */
: "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
{
__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
: /* no outputs */
: "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
{
__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
: /* no outputs */
: "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
{
__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
: /* no outputs */
: "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
}
/* Memory functions, same as I/O accesses on Ultra.
* Access memory as little endian bypassing
* the cache by using ASI_PHYS_BYPASS_EC_E_L
*/
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
{ u8 ret;
__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
return ret;
}
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{ u16 ret;
__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
return ret;
}
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{ u32 ret;
__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
return ret;
}
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{ u64 ret;
__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
: "=r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
return ret;
}
#define writeb writeb
static inline void writeb(u8 b, volatile void __iomem *addr)
{
__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
: /* no outputs */
: "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
}
#define writew writew
static inline void writew(u16 w, volatile void __iomem *addr)
{
__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
: /* no outputs */
: "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
}
#define writel writel
static inline void writel(u32 l, volatile void __iomem *addr)
{
__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
: /* no outputs */
: "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
}
#define writeq writeq
static inline void writeq(u64 q, volatile void __iomem *addr)
{
__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
: /* no outputs */
: "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
}
#define inb inb
static inline u8 inb(unsigned long addr)
{
return readb((volatile void __iomem *)addr);
}
#define inw inw
static inline u16 inw(unsigned long addr)
{
return readw((volatile void __iomem *)addr);
}
#define inl inl
static inline u32 inl(unsigned long addr)
{
return readl((volatile void __iomem *)addr);
}
#define outb outb
static inline void outb(u8 b, unsigned long addr)
{
writeb(b, (volatile void __iomem *)addr);
}
#define outw outw
static inline void outw(u16 w, unsigned long addr)
{
writew(w, (volatile void __iomem *)addr);
}
#define outl outl
static inline void outl(u32 l, unsigned long addr)
{
writel(l, (volatile void __iomem *)addr);
}
#define inb_p(__addr) inb(__addr)
#define outb_p(__b, __addr) outb(__b, __addr)
#define inw_p(__addr) inw(__addr)
#define outw_p(__w, __addr) outw(__w, __addr)
#define inl_p(__addr) inl(__addr)
#define outl_p(__l, __addr) outl(__l, __addr)
void outsb(unsigned long, const void *, unsigned long);
void outsw(unsigned long, const void *, unsigned long);
void outsl(unsigned long, const void *, unsigned long);
void insb(unsigned long, void *, unsigned long);
void insw(unsigned long, void *, unsigned long);
void insl(unsigned long, void *, unsigned long);
static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
{
insb((unsigned long __force)port, buf, count);
}
static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
{
insw((unsigned long __force)port, buf, count);
}
static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
{
insl((unsigned long __force)port, buf, count);
}
static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
{
outsb((unsigned long __force)port, buf, count);
}
static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
{
outsw((unsigned long __force)port, buf, count);
}
static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
{
outsl((unsigned long __force)port, buf, count);
}
#define readb_relaxed(__addr) readb(__addr)
#define readw_relaxed(__addr) readw(__addr)
#define readl_relaxed(__addr) readl(__addr)
#define readq_relaxed(__addr) readq(__addr)
/* Valid I/O Space regions are anywhere, because each PCI bus supported
* can live in an arbitrary area of the physical address range.
*/
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
/* Now, SBUS variants, only difference from PCI is that we do
* not use little-endian ASIs.
*/
static inline u8 sbus_readb(const volatile void __iomem *addr)
{
return __raw_readb(addr);
}
static inline u16 sbus_readw(const volatile void __iomem *addr)
{
return __raw_readw(addr);
}
static inline u32 sbus_readl(const volatile void __iomem *addr)
{
return __raw_readl(addr);
}
static inline u64 sbus_readq(const volatile void __iomem *addr)
{
return __raw_readq(addr);
}
static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
{
__raw_writeb(b, addr);
}
static inline void sbus_writew(u16 w, volatile void __iomem *addr)
{
__raw_writew(w, addr);
}
static inline void sbus_writel(u32 l, volatile void __iomem *addr)
{
__raw_writel(l, addr);
}
static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
{
__raw_writeq(q, addr);
}
static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
{
while(n--) {
sbus_writeb(c, dst);
dst++;
}
}
static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
{
volatile void __iomem *d = dst;
while (n--) {
writeb(c, d);
d++;
}
}
static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
__kernel_size_t n)
{
char *d = dst;
while (n--) {
char tmp = sbus_readb(src);
*d++ = tmp;
src++;
}
}
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
__kernel_size_t n)
{
char *d = dst;
while (n--) {
char tmp = readb(src);
*d++ = tmp;
src++;
}
}
static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
__kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
while (n--) {
char tmp = *s++;
sbus_writeb(tmp, d);
d++;
}
}
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
__kernel_size_t n)
{
const char *s = src;
volatile void __iomem *d = dst;
while (n--) {
char tmp = *s++;
writeb(tmp, d);
d++;
}
}
#define mmiowb()
#ifdef __KERNEL__
/* On sparc64 we have the whole physical IO address space accessible
* using physically addressed loads and stores, so this does nothing.
*/
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *)offset;
}
#define ioremap_nocache(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
static inline void iounmap(volatile void __iomem *addr)
{
}
#define ioread8(X) readb(X)
#define ioread16(X) readw(X)
#define ioread16be(X) __raw_readw(X)
#define ioread32(X) readl(X)
#define ioread32be(X) __raw_readl(X)
#define iowrite8(val,X) writeb(val,X)
#define iowrite16(val,X) writew(val,X)
#define iowrite16be(val,X) __raw_writew(val,X)
#define iowrite32(val,X) writel(val,X)
#define iowrite32be(val,X) __raw_writel(val,X)
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
void ioport_unmap(void __iomem *);
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
void pci_iounmap(struct pci_dev *dev, void __iomem *);
static inline int sbus_can_dma_64bit(void)
{
return 1;
}
static inline int sbus_can_burst64(void)
{
return 1;
}
struct device;
void sbus_set_sbus64(struct device *, int);
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif
#endif /* !(__SPARC64_IO_H) */

View file

@ -0,0 +1,13 @@
#ifndef _ASM_SPARC_IOCTLS_H
#define _ASM_SPARC_IOCTLS_H
#include <uapi/asm/ioctls.h>
#define TIOCGETC __TIOCGETC
#define TIOCGETP __TIOCGETP
#define TIOCGLTC __TIOCGLTC
#define TIOCSLTC __TIOCSLTC
#define TIOCSETP __TIOCSETP
#define TIOCSETN __TIOCSETN
#define TIOCSETC __TIOCSETC
#endif /* !(_ASM_SPARC_IOCTLS_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_IOMMU_H
#define ___ASM_SPARC_IOMMU_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/iommu_64.h>
#else
#include <asm/iommu_32.h>
#endif
#endif

View file

@ -0,0 +1,121 @@
/* iommu.h: Definitions for the sun4m IOMMU.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_IOMMU_H
#define _SPARC_IOMMU_H
#include <asm/page.h>
#include <asm/bitext.h>
/* The iommu handles all virtual to physical address translations
* that occur between the SBUS and physical memory. Access by
* the cpu to IO registers and similar go over the mbus so are
* translated by the on chip SRMMU. The iommu and the srmmu do
* not need to have the same translations at all, in fact most
* of the time the translations they handle are a disjunct set.
* Basically the iommu handles all dvma sbus activity.
*/
/* The IOMMU registers occupy three pages in IO space. */
struct iommu_regs {
/* First page */
volatile unsigned long control; /* IOMMU control */
volatile unsigned long base; /* Physical base of iopte page table */
volatile unsigned long _unused1[3];
volatile unsigned long tlbflush; /* write only */
volatile unsigned long pageflush; /* write only */
volatile unsigned long _unused2[1017];
/* Second page */
volatile unsigned long afsr; /* Async-fault status register */
volatile unsigned long afar; /* Async-fault physical address */
volatile unsigned long _unused3[2];
volatile unsigned long sbuscfg0; /* SBUS configuration registers, per-slot */
volatile unsigned long sbuscfg1;
volatile unsigned long sbuscfg2;
volatile unsigned long sbuscfg3;
volatile unsigned long mfsr; /* Memory-fault status register */
volatile unsigned long mfar; /* Memory-fault physical address */
volatile unsigned long _unused4[1014];
/* Third page */
volatile unsigned long mid; /* IOMMU module-id */
};
#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after transaction */
#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than 12.8 us. */
#define IOMMU_AFSR_BE 0x10000000 /* Write access received error acknowledge */
#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
#define IOMMU_AFSR_RESV 0x00f00000 /* Reserver, forced to 0x8 by hardware */
#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when bypass enabled */
#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
produced by this device as pure
physical. */
#define IOMMU_MFSR_ERR 0x80000000 /* One or more of PERR1 or PERR0 */
#define IOMMU_MFSR_S 0x01000000 /* Sparc was in supervisor mode */
#define IOMMU_MFSR_CPU 0x00800000 /* CPU transaction caused parity error */
#define IOMMU_MFSR_ME 0x00080000 /* Multiple parity errors occurred */
#define IOMMU_MFSR_PERR 0x00006000 /* high bit indicates parity error occurred
on the even word of the access, low bit
indicated odd word caused the parity error */
#define IOMMU_MFSR_BM 0x00001000 /* Error occurred while in boot mode */
#define IOMMU_MFSR_C 0x00000800 /* Address causing error was marked cacheable */
#define IOMMU_MFSR_RTYP 0x000000f0 /* Memory request transaction type */
#define IOMMU_MID_SBAE 0x001f0000 /* SBus arbitration enable */
#define IOMMU_MID_SE 0x00100000 /* Enables SCSI/ETHERNET arbitration */
#define IOMMU_MID_SB3 0x00080000 /* Enable SBUS device 3 arbitration */
#define IOMMU_MID_SB2 0x00040000 /* Enable SBUS device 2 arbitration */
#define IOMMU_MID_SB1 0x00020000 /* Enable SBUS device 1 arbitration */
#define IOMMU_MID_SB0 0x00010000 /* Enable SBUS device 0 arbitration */
#define IOMMU_MID_MID 0x0000000f /* Module-id, hardcoded to 0x8 */
/* The format of an iopte in the page tables */
#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
#define IOPTE_WRITE 0x00000004 /* Writeable */
#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
#define IOPTE_WAZ 0x00000001 /* Write as zeros */
struct iommu_struct {
struct iommu_regs __iomem *regs;
iopte_t *page_table;
/* For convenience */
unsigned long start; /* First managed virtual address */
unsigned long end; /* Last managed virtual address */
struct bit_map usemap;
};
static inline void iommu_invalidate(struct iommu_regs __iomem *regs)
{
sbus_writel(0, &regs->tlbflush);
}
static inline void iommu_invalidate_page(struct iommu_regs __iomem *regs, unsigned long ba)
{
sbus_writel(ba & PAGE_MASK, &regs->pageflush);
}
#endif /* !(_SPARC_IOMMU_H) */

View file

@ -0,0 +1,65 @@
/* iommu.h: Definitions for the sun5 IOMMU.
*
* Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC64_IOMMU_H
#define _SPARC64_IOMMU_H
/* The format of an iopte in the page tables. */
#define IOPTE_VALID 0x8000000000000000UL
#define IOPTE_64K 0x2000000000000000UL
#define IOPTE_STBUF 0x1000000000000000UL
#define IOPTE_INTRA 0x0800000000000000UL
#define IOPTE_CONTEXT 0x07ff800000000000UL
#define IOPTE_PAGE 0x00007fffffffe000UL
#define IOPTE_CACHE 0x0000000000000010UL
#define IOPTE_WRITE 0x0000000000000002UL
#define IOMMU_NUM_CTXS 4096
struct iommu_arena {
unsigned long *map;
unsigned int hint;
unsigned int limit;
};
struct iommu {
spinlock_t lock;
struct iommu_arena arena;
void (*flush_all)(struct iommu *);
iopte_t *page_table;
u32 page_table_map_base;
unsigned long iommu_control;
unsigned long iommu_tsbbase;
unsigned long iommu_flush;
unsigned long iommu_flushinv;
unsigned long iommu_tags;
unsigned long iommu_ctxflush;
unsigned long write_complete_reg;
unsigned long dummy_page;
unsigned long dummy_page_pa;
unsigned long ctx_lowest_free;
DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
u32 dma_addr_mask;
};
struct strbuf {
int strbuf_enabled;
unsigned long strbuf_control;
unsigned long strbuf_pflush;
unsigned long strbuf_fsync;
unsigned long strbuf_err_stat;
unsigned long strbuf_tag_diag;
unsigned long strbuf_line_diag;
unsigned long strbuf_ctxflush;
unsigned long strbuf_ctxmatch_base;
unsigned long strbuf_flushflag_pa;
volatile unsigned long *strbuf_flushflag;
volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
};
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node);
#endif /* !(_SPARC64_IOMMU_H) */

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_IRQ_H
#define ___ASM_SPARC_IRQ_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/irq_64.h>
#else
#include <asm/irq_32.h>
#endif
#endif

View file

@ -0,0 +1,24 @@
/* irq.h: IRQ registers on the Sparc.
*
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC_IRQ_H
#define _SPARC_IRQ_H
/* Allocated number of logical irq numbers.
* sun4d boxes (ss2000e) should be OK with ~32.
* Be on the safe side and make room for 64
*/
#define NR_IRQS 64
#include <linux/interrupt.h>
#define irq_canonicalize(irq) (irq)
void __init init_IRQ(void);
void __init sun4d_init_sbi_irq(void);
#define NO_IRQ 0xffffffff
#endif

View file

@ -0,0 +1,98 @@
/* irq.h: IRQ registers on the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#ifndef _SPARC64_IRQ_H
#define _SPARC64_IRQ_H
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <asm/pil.h>
#include <asm/ptrace.h>
/* IMAP/ICLR register defines */
#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
#define IMAP_TID_SHIFT 26
#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
#define IMAP_AID_SHIFT 26
#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
#define IMAP_NID_SHIFT 21
#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
#define IMAP_INO 0x0000003fUL /* IRQ Number */
#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
#define ICLR_IDLE 0x00000000UL /* Idle state */
#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
#define ICLR_PENDING 0x00000003UL /* Pending state */
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
* the type of ino_bucket->irq as appropriate.
*
* ino_bucket->irq allocation is made during {sun4v_,}build_irq().
*/
#define NR_IRQS (2048)
void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end);
void sun4v_destroy_msi(unsigned int irq);
unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end,
unsigned long imap_base,
unsigned long iclr_base);
void sun4u_destroy_msi(unsigned int irq);
unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
void irq_free(unsigned int irq);
void __init init_IRQ(void);
void fixup_irqs(void);
static inline void set_softint(unsigned long bits)
{
__asm__ __volatile__("wr %0, 0x0, %%set_softint"
: /* No outputs */
: "r" (bits));
}
static inline void clear_softint(unsigned long bits)
{
__asm__ __volatile__("wr %0, 0x0, %%clear_softint"
: /* No outputs */
: "r" (bits));
}
static inline unsigned long get_softint(void)
{
unsigned long retval;
__asm__ __volatile__("rd %%softint, %0"
: "=r" (retval));
return retval;
}
void arch_trigger_all_cpu_backtrace(bool);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];
#define __ARCH_HAS_DO_SOFTIRQ
#define NO_IRQ 0xffffffff
#endif

View file

@ -0,0 +1,8 @@
#ifndef ___ASM_SPARC_IRQFLAGS_H
#define ___ASM_SPARC_IRQFLAGS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm/irqflags_64.h>
#else
#include <asm/irqflags_32.h>
#endif
#endif

Some files were not shown because too many files have changed in this diff Show more