Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,88 @@
#
# Sun network device configuration
#
config NET_VENDOR_SUN
bool "Sun devices"
default y
depends on SUN3 || SBUS || PCI || SUN_LDOMS
---help---
If you have a network (Ethernet) card belonging to this class, say
Y and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Sun network interfaces. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_SUN
config HAPPYMEAL
tristate "Sun Happy Meal 10/100baseT support"
depends on (SBUS || PCI)
select CRC32
---help---
This driver supports the "hme" interface present on most Ultra
systems and as an option on older Sbus systems. This driver supports
both PCI and Sbus devices. This driver also supports the "qfe" quad
100baseT device available in both PCI and Sbus configurations.
To compile this driver as a module, choose M here: the module
will be called sunhme.
config SUNBMAC
tristate "Sun BigMAC 10/100baseT support"
depends on SBUS
select CRC32
---help---
This driver supports the "be" interface available as an Sbus option.
This is Sun's older 100baseT Ethernet device.
To compile this driver as a module, choose M here: the module
will be called sunbmac.
config SUNQE
tristate "Sun QuadEthernet support"
depends on SBUS
select CRC32
---help---
This driver supports the "qe" 10baseT Ethernet device, available as
an Sbus option. Note that this is not the same as Quad FastEthernet
"qfe" which is supported by the Happy Meal driver instead.
To compile this driver as a module, choose M here: the module
will be called sunqe.
config SUNGEM
tristate "Sun GEM support"
depends on PCI
select CRC32
select SUNGEM_PHY
---help---
Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
<http://docs.oracle.com/cd/E19455-01/806-3985-10/806-3985-10.pdf>.
config CASSINI
tristate "Sun Cassini support"
depends on PCI
select CRC32
---help---
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
<http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
config SUNVNET
tristate "Sun Virtual Network support"
depends on SUN_LDOMS
---help---
Support for virtual network devices under Sun Logical Domains.
config NIU
tristate "Sun Neptune 10Gbit Ethernet support"
depends on PCI
select CRC32
---help---
This enables support for cards based upon Sun's
Neptune chipset.
endif # NET_VENDOR_SUN

View file

@ -0,0 +1,11 @@
#
# Makefile for the Sun network device drivers.
#
obj-$(CONFIG_HAPPYMEAL) += sunhme.o
obj-$(CONFIG_SUNQE) += sunqe.o
obj-$(CONFIG_SUNBMAC) += sunbmac.o
obj-$(CONFIG_SUNGEM) += sungem.o
obj-$(CONFIG_CASSINI) += cassini.o
obj-$(CONFIG_SUNVNET) += sunvnet.o
obj-$(CONFIG_NIU) += niu.o

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

10229
drivers/net/ethernet/sun/niu.c Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,338 @@
/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
* sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SUNBMAC_H
#define _SUNBMAC_H
/* QEC global registers. */
#define GLOB_CTRL 0x00UL /* Control */
#define GLOB_STAT 0x04UL /* Status */
#define GLOB_PSIZE 0x08UL /* Packet Size */
#define GLOB_MSIZE 0x0cUL /* Local-mem size (64K) */
#define GLOB_RSIZE 0x10UL /* Receive partition size */
#define GLOB_TSIZE 0x14UL /* Transmit partition size */
#define GLOB_REG_SIZE 0x18UL
#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
/* QEC BigMAC channel registers. */
#define CREG_CTRL 0x00UL /* Control */
#define CREG_STAT 0x04UL /* Status */
#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
#define CREG_BMASK 0x1cUL /* BigMAC Error Interrupt Mask*/
#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
#define CREG_CCNT 0x30UL /* Collision Counter */
#define CREG_REG_SIZE 0x34UL
#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
#define CREG_STAT_BERROR 0x80000000 /* BigMAC error */
#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
#define CREG_STAT_ERRORS (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR| \
CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP| \
CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR| \
CREG_STAT_RXSERR)
#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
/* BIGMAC core registers */
#define BMAC_XIFCFG 0x000UL /* XIF config register */
/* 0x004-->0x0fc, reserved */
#define BMAC_STATUS 0x100UL /* Status register, clear on read */
#define BMAC_IMASK 0x104UL /* Interrupt mask register */
/* 0x108-->0x204, reserved */
#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
#define BMAC_STIME 0x21cUL /* Transmit slot time */
#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
#define BMAC_TXDELIM 0x228UL /* Transmit delimiter */
#define BMAC_JSIZE 0x22cUL /* Toe jam... */
#define BMAC_TXPMAX 0x230UL /* Transmit max pkt size */
#define BMAC_TXPMIN 0x234UL /* Transmit min pkt size */
#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
#define BMAC_RSEED 0x250UL /* Transmit random number seed */
#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
/* 0x258-->0x304, reserved */
#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
#define BMAC_RXCFG 0x30cUL /* Receiver config register */
#define BMAC_RXPMAX 0x310UL /* Receive max pkt size */
#define BMAC_RXPMIN 0x314UL /* Receive min pkt size */
#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
/* 0x33c, reserved */
#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
#define BMAC_AFMASK 0x35cUL /* Address filter mask */
#define BMAC_REG_SIZE 0x360UL
/* BigMac XIF config register. */
#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
#define BIGMAC_XCFG_RESV 0x00000002 /* Reserved, write always as 1 */
#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
#define BIGMAC_XCFG_SMODE 0x00000008 /* Enable serial mode */
/* BigMAC status register. */
#define BIGMAC_STAT_GOTFRAME 0x00000001 /* Received a frame */
#define BIGMAC_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
#define BIGMAC_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
#define BIGMAC_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
#define BIGMAC_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
#define BIGMAC_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
#define BIGMAC_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
#define BIGMAC_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
#define BIGMAC_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
#define BIGMAC_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
#define BIGMAC_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
#define BIGMAC_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
#define BIGMAC_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
#define BIGMAC_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
#define BIGMAC_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
/* BigMAC interrupt mask register. */
#define BIGMAC_IMASK_GOTFRAME 0x00000001 /* Received a frame */
#define BIGMAC_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
#define BIGMAC_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
#define BIGMAC_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
#define BIGMAC_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
#define BIGMAC_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
#define BIGMAC_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
#define BIGMAC_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
#define BIGMAC_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
#define BIGMAC_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
#define BIGMAC_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
#define BIGMAC_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
/* BigMac transmit config register. */
#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
#define BIGMAC_TXCFG_FIFO 0x00000010 /* Default tx fthresh... */
#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
/* BigMac receive config register. */
#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
#define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */
#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
#define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */
#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
/* The BigMAC PHY transceiver. Not nearly as sophisticated as the happy meal
* one. But it does have the "bit banger", oh baby.
*/
#define TCVR_TPAL 0x00UL
#define TCVR_MPAL 0x04UL
#define TCVR_REG_SIZE 0x08UL
/* Frame commands. */
#define FRAME_WRITE 0x50020000
#define FRAME_READ 0x60020000
/* Tranceiver registers. */
#define TCVR_PAL_SERIAL 0x00000001 /* Enable serial mode */
#define TCVR_PAL_EXTLBACK 0x00000002 /* Enable external loopback */
#define TCVR_PAL_MSENSE 0x00000004 /* Media sense */
#define TCVR_PAL_LTENABLE 0x00000008 /* Link test enable */
#define TCVR_PAL_LTSTATUS 0x00000010 /* Link test status (P1 only) */
/* Management PAL. */
#define MGMT_PAL_DCLOCK 0x00000001 /* Data clock */
#define MGMT_PAL_OENAB 0x00000002 /* Output enabler */
#define MGMT_PAL_MDIO 0x00000004 /* MDIO Data/attached */
#define MGMT_PAL_TIMEO 0x00000008 /* Transmit enable timeout error */
#define MGMT_PAL_EXT_MDIO MGMT_PAL_MDIO
#define MGMT_PAL_INT_MDIO MGMT_PAL_TIMEO
/* Here are some PHY addresses. */
#define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */
#define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */
/* Ring descriptors and such, same as Quad Ethernet. */
struct be_rxd {
u32 rx_flags;
u32 rx_addr;
};
#define RXD_OWN 0x80000000 /* Ownership. */
#define RXD_UPDATE 0x10000000 /* Being Updated? */
#define RXD_LENGTH 0x000007ff /* Packet Length. */
struct be_txd {
u32 tx_flags;
u32 tx_addr;
};
#define TXD_OWN 0x80000000 /* Ownership. */
#define TXD_SOP 0x40000000 /* Start Of Packet */
#define TXD_EOP 0x20000000 /* End Of Packet */
#define TXD_UPDATE 0x10000000 /* Being Updated? */
#define TXD_LENGTH 0x000007ff /* Packet Length. */
#define TX_RING_MAXSIZE 256
#define RX_RING_MAXSIZE 256
#define TX_RING_SIZE 256
#define RX_RING_SIZE 256
#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
#define TX_BUFFS_AVAIL(bp) \
(((bp)->tx_old <= (bp)->tx_new) ? \
(bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
(bp)->tx_old - (bp)->tx_new - 1)
#define RX_COPY_THRESHOLD 256
#define RX_BUF_ALLOC_SIZE (ETH_FRAME_LEN + (64 * 3))
struct bmac_init_block {
struct be_rxd be_rxd[RX_RING_MAXSIZE];
struct be_txd be_txd[TX_RING_MAXSIZE];
};
#define bib_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem]))))
/* Now software state stuff. */
enum bigmac_transceiver {
external = 0,
internal = 1,
none = 2,
};
/* Timer state engine. */
enum bigmac_timer_state {
ltrywait = 1, /* Forcing try of all modes, from fastest to slowest. */
asleep = 2, /* Timer inactive. */
};
struct bigmac {
void __iomem *gregs; /* QEC Global Registers */
void __iomem *creg; /* QEC BigMAC Channel Registers */
void __iomem *bregs; /* BigMAC Registers */
void __iomem *tregs; /* BigMAC Transceiver */
struct bmac_init_block *bmac_block; /* RX and TX descriptors */
__u32 bblock_dvma; /* RX and TX descriptors */
spinlock_t lock;
struct sk_buff *rx_skbs[RX_RING_SIZE];
struct sk_buff *tx_skbs[TX_RING_SIZE];
int rx_new, tx_new, rx_old, tx_old;
int board_rev; /* BigMAC board revision. */
enum bigmac_transceiver tcvr_type;
unsigned int bigmac_bursts;
unsigned int paddr;
unsigned short sw_bmsr; /* SW copy of PHY BMSR */
unsigned short sw_bmcr; /* SW copy of PHY BMCR */
struct timer_list bigmac_timer;
enum bigmac_timer_state timer_state;
unsigned int timer_ticks;
struct net_device_stats enet_stats;
struct platform_device *qec_op;
struct platform_device *bigmac_op;
struct net_device *dev;
};
/* We use this to acquire receive skb's that we can DMA directly into. */
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
{
struct sk_buff *skb;
skb = alloc_skb(length + 64, gfp_flags);
if(skb) {
int offset = ALIGNED_RX_SKB_ADDR(skb->data);
if(offset)
skb_reserve(skb, offset);
}
return skb;
}
#endif /* !(_SUNBMAC_H) */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,513 @@
/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
* sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
* Also known as the "Happy Meal".
*
* Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
*/
#ifndef _SUNHME_H
#define _SUNHME_H
#include <linux/pci.h>
/* Happy Meal global registers. */
#define GREG_SWRESET 0x000UL /* Software Reset */
#define GREG_CFG 0x004UL /* Config Register */
#define GREG_STAT 0x108UL /* Status */
#define GREG_IMASK 0x10cUL /* Interrupt Mask */
#define GREG_REG_SIZE 0x110UL
/* Global reset register. */
#define GREG_RESET_ETX 0x01
#define GREG_RESET_ERX 0x02
#define GREG_RESET_ALL 0x03
/* Global config register. */
#define GREG_CFG_BURSTMSK 0x03
#define GREG_CFG_BURST16 0x00
#define GREG_CFG_BURST32 0x01
#define GREG_CFG_BURST64 0x02
#define GREG_CFG_64BIT 0x04
#define GREG_CFG_PARITY 0x08
#define GREG_CFG_RESV 0x10
/* Global status register. */
#define GREG_STAT_GOTFRAME 0x00000001 /* Received a frame */
#define GREG_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
#define GREG_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
#define GREG_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
#define GREG_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
#define GREG_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
#define GREG_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
#define GREG_STAT_STSTERR 0x00000080 /* Test error in XIF for SQE */
#define GREG_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
#define GREG_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
#define GREG_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
#define GREG_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
#define GREG_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
#define GREG_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
#define GREG_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
#define GREG_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
#define GREG_STAT_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
#define GREG_STAT_NORXD 0x00020000 /* No more receive descriptors */
#define GREG_STAT_RXERR 0x00040000 /* Error during receive dma */
#define GREG_STAT_RXLATERR 0x00080000 /* Late error during receive dma */
#define GREG_STAT_RXPERR 0x00100000 /* Parity error during receive dma */
#define GREG_STAT_RXTERR 0x00200000 /* Tag error during receive dma */
#define GREG_STAT_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
#define GREG_STAT_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
#define GREG_STAT_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
#define GREG_STAT_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
#define GREG_STAT_TXEACK 0x04000000 /* Error during transmit dma */
#define GREG_STAT_TXLERR 0x08000000 /* Late error during transmit dma */
#define GREG_STAT_TXPERR 0x10000000 /* Parity error during transmit dma */
#define GREG_STAT_TXTERR 0x20000000 /* Tag error during transmit dma */
#define GREG_STAT_SLVERR 0x40000000 /* PIO access got an error */
#define GREG_STAT_SLVPERR 0x80000000 /* PIO access got a parity error */
/* All interesting error conditions. */
#define GREG_STAT_ERRORS 0xfc7efefc
/* Global interrupt mask register. */
#define GREG_IMASK_GOTFRAME 0x00000001 /* Received a frame */
#define GREG_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
#define GREG_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
#define GREG_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
#define GREG_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
#define GREG_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
#define GREG_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
#define GREG_IMASK_STSTERR 0x00000080 /* Test error in XIF for SQE */
#define GREG_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
#define GREG_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
#define GREG_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
#define GREG_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
#define GREG_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
#define GREG_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
#define GREG_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
#define GREG_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
#define GREG_IMASK_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
#define GREG_IMASK_NORXD 0x00020000 /* No more receive descriptors */
#define GREG_IMASK_RXERR 0x00040000 /* Error during receive dma */
#define GREG_IMASK_RXLATERR 0x00080000 /* Late error during receive dma */
#define GREG_IMASK_RXPERR 0x00100000 /* Parity error during receive dma */
#define GREG_IMASK_RXTERR 0x00200000 /* Tag error during receive dma */
#define GREG_IMASK_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
#define GREG_IMASK_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
#define GREG_IMASK_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
#define GREG_IMASK_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
#define GREG_IMASK_TXEACK 0x04000000 /* Error during transmit dma */
#define GREG_IMASK_TXLERR 0x08000000 /* Late error during transmit dma */
#define GREG_IMASK_TXPERR 0x10000000 /* Parity error during transmit dma */
#define GREG_IMASK_TXTERR 0x20000000 /* Tag error during transmit dma */
#define GREG_IMASK_SLVERR 0x40000000 /* PIO access got an error */
#define GREG_IMASK_SLVPERR 0x80000000 /* PIO access got a parity error */
/* Happy Meal external transmitter registers. */
#define ETX_PENDING 0x00UL /* Transmit pending/wakeup register */
#define ETX_CFG 0x04UL /* Transmit config register */
#define ETX_RING 0x08UL /* Transmit ring pointer */
#define ETX_BBASE 0x0cUL /* Transmit buffer base */
#define ETX_BDISP 0x10UL /* Transmit buffer displacement */
#define ETX_FIFOWPTR 0x14UL /* FIFO write ptr */
#define ETX_FIFOSWPTR 0x18UL /* FIFO write ptr (shadow register) */
#define ETX_FIFORPTR 0x1cUL /* FIFO read ptr */
#define ETX_FIFOSRPTR 0x20UL /* FIFO read ptr (shadow register) */
#define ETX_FIFOPCNT 0x24UL /* FIFO packet counter */
#define ETX_SMACHINE 0x28UL /* Transmitter state machine */
#define ETX_RSIZE 0x2cUL /* Ring descriptor size */
#define ETX_BPTR 0x30UL /* Transmit data buffer ptr */
#define ETX_REG_SIZE 0x34UL
/* ETX transmit pending register. */
#define ETX_TP_DMAWAKEUP 0x00000001 /* Restart transmit dma */
/* ETX config register. */
#define ETX_CFG_DMAENABLE 0x00000001 /* Enable transmit dma */
#define ETX_CFG_FIFOTHRESH 0x000003fe /* Transmit FIFO threshold */
#define ETX_CFG_IRQDAFTER 0x00000400 /* Interrupt after TX-FIFO drained */
#define ETX_CFG_IRQDBEFORE 0x00000000 /* Interrupt before TX-FIFO drained */
#define ETX_RSIZE_SHIFT 4
/* Happy Meal external receiver registers. */
#define ERX_CFG 0x00UL /* Receiver config register */
#define ERX_RING 0x04UL /* Receiver ring ptr */
#define ERX_BPTR 0x08UL /* Receiver buffer ptr */
#define ERX_FIFOWPTR 0x0cUL /* FIFO write ptr */
#define ERX_FIFOSWPTR 0x10UL /* FIFO write ptr (shadow register) */
#define ERX_FIFORPTR 0x14UL /* FIFO read ptr */
#define ERX_FIFOSRPTR 0x18UL /* FIFO read ptr (shadow register) */
#define ERX_SMACHINE 0x1cUL /* Receiver state machine */
#define ERX_REG_SIZE 0x20UL
/* ERX config register. */
#define ERX_CFG_DMAENABLE 0x00000001 /* Enable receive DMA */
#define ERX_CFG_RESV1 0x00000006 /* Unused... */
#define ERX_CFG_BYTEOFFSET 0x00000038 /* Receive first byte offset */
#define ERX_CFG_RESV2 0x000001c0 /* Unused... */
#define ERX_CFG_SIZE32 0x00000000 /* Receive ring size == 32 */
#define ERX_CFG_SIZE64 0x00000200 /* Receive ring size == 64 */
#define ERX_CFG_SIZE128 0x00000400 /* Receive ring size == 128 */
#define ERX_CFG_SIZE256 0x00000600 /* Receive ring size == 256 */
#define ERX_CFG_RESV3 0x0000f800 /* Unused... */
#define ERX_CFG_CSUMSTART 0x007f0000 /* Offset of checksum start,
* in halfwords. */
/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */
#define BMAC_XIFCFG 0x0000UL /* XIF config register */
/* 0x4-->0x204, reserved */
#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
#define BMAC_STIME 0x21cUL /* Transmit slot time */
#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
#define BMAC_TXSDELIM 0x228UL /* Transmit delimiter */
#define BMAC_JSIZE 0x22cUL /* Jam size */
#define BMAC_TXMAX 0x230UL /* Transmit max pkt size */
#define BMAC_TXMIN 0x234UL /* Transmit min pkt size */
#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
#define BMAC_RSEED 0x250UL /* Transmit random number seed */
#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
/* 0x258-->0x304, reserved */
#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
#define BMAC_RXCFG 0x30cUL /* Receiver config register */
#define BMAC_RXMAX 0x310UL /* Receive max pkt size */
#define BMAC_RXMIN 0x314UL /* Receive min pkt size */
#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
/* 0x33c, reserved */
#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
#define BMAC_AFMASK 0x35cUL /* Address filter mask */
#define BMAC_REG_SIZE 0x360UL
/* BigMac XIF config register. */
#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
#define BIGMAC_XCFG_XLBACK 0x00000002 /* Loopback-mode XIF enable */
#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
#define BIGMAC_XCFG_MIIDISAB 0x00000008 /* MII receive buffer disable */
#define BIGMAC_XCFG_SQENABLE 0x00000010 /* SQE test enable */
#define BIGMAC_XCFG_SQETWIN 0x000003e0 /* SQE time window */
#define BIGMAC_XCFG_LANCE 0x00000010 /* Lance mode enable */
#define BIGMAC_XCFG_LIPG0 0x000003e0 /* Lance mode IPG0 */
/* BigMac transmit config register. */
#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
#define BIGMAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */
/* BigMac receive config register. */
#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
#define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */
#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */
#define TCVR_BBCLOCK 0x00UL /* Bit bang clock register */
#define TCVR_BBDATA 0x04UL /* Bit bang data register */
#define TCVR_BBOENAB 0x08UL /* Bit bang output enable */
#define TCVR_FRAME 0x0cUL /* Frame control/data register */
#define TCVR_CFG 0x10UL /* MIF config register */
#define TCVR_IMASK 0x14UL /* MIF interrupt mask */
#define TCVR_STATUS 0x18UL /* MIF status */
#define TCVR_SMACHINE 0x1cUL /* MIF state machine */
#define TCVR_REG_SIZE 0x20UL
/* Frame commands. */
#define FRAME_WRITE 0x50020000
#define FRAME_READ 0x60020000
/* Transceiver config register */
#define TCV_CFG_PSELECT 0x00000001 /* Select PHY */
#define TCV_CFG_PENABLE 0x00000002 /* Enable MIF polling */
#define TCV_CFG_BENABLE 0x00000004 /* Enable the "bit banger" oh baby */
#define TCV_CFG_PREGADDR 0x000000f8 /* Address of poll register */
#define TCV_CFG_MDIO0 0x00000100 /* MDIO zero, data/attached */
#define TCV_CFG_MDIO1 0x00000200 /* MDIO one, data/attached */
#define TCV_CFG_PDADDR 0x00007c00 /* Device PHY address polling */
/* Here are some PHY addresses. */
#define TCV_PADDR_ETX 0 /* Internal transceiver */
#define TCV_PADDR_ITX 1 /* External transceiver */
/* Transceiver status register */
#define TCV_STAT_BASIC 0xffff0000 /* The "basic" part */
#define TCV_STAT_NORMAL 0x0000ffff /* The "non-basic" part */
/* Inside the Happy Meal transceiver is the physical layer, they use an
* implementations for National Semiconductor, part number DP83840VCE.
* You can retrieve the data sheets and programming docs for this beast
* from http://www.national.com/
*
* The DP83840 is capable of both 10 and 100Mbps ethernet, in both
* half and full duplex mode. It also supports auto negotiation.
*
* But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM!
* Debugging eeprom burnt code is more fun than programming this chip!
*/
/* Generic MII registers defined in linux/mii.h, these below
* are DP83840 specific.
*/
#define DP83840_CSCONFIG 0x17 /* CS configuration */
/* The Carrier Sense config register. */
#define CSCONFIG_RESV1 0x0001 /* Unused... */
#define CSCONFIG_LED4 0x0002 /* Pin for full-dplx LED4 */
#define CSCONFIG_LED1 0x0004 /* Pin for conn-status LED1 */
#define CSCONFIG_RESV2 0x0008 /* Unused... */
#define CSCONFIG_TCVDISAB 0x0010 /* Turns off the transceiver */
#define CSCONFIG_DFBYPASS 0x0020 /* Bypass disconnect function */
#define CSCONFIG_GLFORCE 0x0040 /* Good link force for 100mbps */
#define CSCONFIG_CLKTRISTATE 0x0080 /* Tristate 25m clock */
#define CSCONFIG_RESV3 0x0700 /* Unused... */
#define CSCONFIG_ENCODE 0x0800 /* 1=MLT-3, 0=binary */
#define CSCONFIG_RENABLE 0x1000 /* Repeater mode enable */
#define CSCONFIG_TCDISABLE 0x2000 /* Disable timeout counter */
#define CSCONFIG_RESV4 0x4000 /* Unused... */
#define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */
/* Happy Meal descriptor rings and such.
* All descriptor rings must be aligned on a 2K boundary.
* All receive buffers must be 64 byte aligned.
* Always write the address first before setting the ownership
* bits to avoid races with the hardware scanning the ring.
*/
typedef u32 __bitwise__ hme32;
struct happy_meal_rxd {
hme32 rx_flags;
hme32 rx_addr;
};
#define RXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
#define RXFLAG_OVERFLOW 0x40000000 /* 1 = buffer overflow */
#define RXFLAG_SIZE 0x3fff0000 /* Size of the buffer */
#define RXFLAG_CSUM 0x0000ffff /* HW computed checksum */
struct happy_meal_txd {
hme32 tx_flags;
hme32 tx_addr;
};
#define TXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
#define TXFLAG_SOP 0x40000000 /* 1 = start of packet */
#define TXFLAG_EOP 0x20000000 /* 1 = end of packet */
#define TXFLAG_CSENABLE 0x10000000 /* 1 = enable hw-checksums */
#define TXFLAG_CSLOCATION 0x0ff00000 /* Where to stick the csum */
#define TXFLAG_CSBUFBEGIN 0x000fc000 /* Where to begin checksum */
#define TXFLAG_SIZE 0x00003fff /* Size of the packet */
#define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */
#define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */
#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
#error TX_RING_SIZE holds illegal value
#endif
#define TX_RING_MAXSIZE 256
#define RX_RING_MAXSIZE 256
/* We use a 14 byte offset for checksum computation. */
#if (RX_RING_SIZE == 32)
#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16))
#else
#if (RX_RING_SIZE == 64)
#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16))
#else
#if (RX_RING_SIZE == 128)
#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16))
#else
#if (RX_RING_SIZE == 256)
#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16))
#else
#error RX_RING_SIZE holds illegal value
#endif
#endif
#endif
#endif
#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
#define TX_BUFFS_AVAIL(hp) \
(((hp)->tx_old <= (hp)->tx_new) ? \
(hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
(hp)->tx_old - (hp)->tx_new - 1)
#define RX_OFFSET 2
#define RX_BUF_ALLOC_SIZE (1546 + RX_OFFSET + 64)
#define RX_COPY_THRESHOLD 256
struct hmeal_init_block {
struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE];
struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE];
};
#define hblock_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem]))))
/* Now software state stuff. */
enum happy_transceiver {
external = 0,
internal = 1,
none = 2,
};
/* Timer state engine. */
enum happy_timer_state {
arbwait = 0, /* Waiting for auto negotiation to complete. */
lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
asleep = 3, /* Time inactive. */
};
struct quattro;
/* Happy happy, joy joy! */
struct happy_meal {
void __iomem *gregs; /* Happy meal global registers */
struct hmeal_init_block *happy_block; /* RX and TX descriptors (CPU addr) */
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
u32 (*read_desc32)(hme32 *);
void (*write_txd)(struct happy_meal_txd *, u32, u32);
void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
#endif
/* This is either an platform_device or a pci_dev. */
void *happy_dev;
struct device *dma_dev;
spinlock_t happy_lock;
struct sk_buff *rx_skbs[RX_RING_SIZE];
struct sk_buff *tx_skbs[TX_RING_SIZE];
int rx_new, tx_new, rx_old, tx_old;
struct net_device_stats net_stats; /* Statistical counters */
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
u32 (*read32)(void __iomem *);
void (*write32)(void __iomem *, u32);
#endif
void __iomem *etxregs; /* External transmitter regs */
void __iomem *erxregs; /* External receiver regs */
void __iomem *bigmacregs; /* BIGMAC core regs */
void __iomem *tcvregs; /* MIF transceiver regs */
dma_addr_t hblock_dvma; /* DVMA visible address happy block */
unsigned int happy_flags; /* Driver state flags */
int irq;
enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
unsigned int happy_bursts; /* Get your mind out of the gutter */
unsigned int paddr; /* PHY address for transceiver */
unsigned short hm_revision; /* Happy meal revision */
unsigned short sw_bmcr; /* SW copy of BMCR */
unsigned short sw_bmsr; /* SW copy of BMSR */
unsigned short sw_physid1; /* SW copy of PHYSID1 */
unsigned short sw_physid2; /* SW copy of PHYSID2 */
unsigned short sw_advertise; /* SW copy of ADVERTISE */
unsigned short sw_lpa; /* SW copy of LPA */
unsigned short sw_expansion; /* SW copy of EXPANSION */
unsigned short sw_csconfig; /* SW copy of CSCONFIG */
unsigned int auto_speed; /* Auto-nego link speed */
unsigned int forced_speed; /* Force mode link speed */
unsigned int poll_data; /* MIF poll data */
unsigned int poll_flag; /* MIF poll flag */
unsigned int linkcheck; /* Have we checked the link yet? */
unsigned int lnkup; /* Is the link up as far as we know? */
unsigned int lnkdown; /* Trying to force the link down? */
unsigned int lnkcnt; /* Counter for link-up attempts. */
struct timer_list happy_timer; /* To watch the link when coming up. */
enum happy_timer_state timer_state; /* State of the auto-neg timer. */
unsigned int timer_ticks; /* Number of clicks at each state. */
struct net_device *dev; /* Backpointer */
struct quattro *qfe_parent; /* For Quattro cards */
int qfe_ent; /* Which instance on quattro */
};
/* Here are the happy flags. */
#define HFLAG_POLL 0x00000001 /* We are doing MIF polling */
#define HFLAG_FENABLE 0x00000002 /* The MII frame is enabled */
#define HFLAG_LANCE 0x00000004 /* We are using lance-mode */
#define HFLAG_RXENABLE 0x00000008 /* Receiver is enabled */
#define HFLAG_AUTO 0x00000010 /* Using auto-negotiation, 0 = force */
#define HFLAG_FULL 0x00000020 /* Full duplex enable */
#define HFLAG_MACFULL 0x00000040 /* Using full duplex in the MAC */
#define HFLAG_POLLENABLE 0x00000080 /* Actually try MIF polling */
#define HFLAG_RXCV 0x00000100 /* XXX RXCV ENABLE */
#define HFLAG_INIT 0x00000200 /* Init called at least once */
#define HFLAG_LINKUP 0x00000400 /* 1 = Link is up */
#define HFLAG_PCI 0x00000800 /* PCI based Happy Meal */
#define HFLAG_QUATTRO 0x00001000 /* On QFE/Quattro card */
#define HFLAG_20_21 (HFLAG_POLLENABLE | HFLAG_FENABLE)
#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV)
/* Support for QFE/Quattro cards. */
struct quattro {
struct net_device *happy_meals[4];
/* This is either a sbus_dev or a pci_dev. */
void *quattro_dev;
struct quattro *next;
/* PROM ranges, if any. */
#ifdef CONFIG_SBUS
struct linux_prom_ranges ranges[8];
#endif
int nranges;
};
/* We use this to acquire receive skb's that we can DMA directly into. */
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
#define happy_meal_alloc_skb(__length, __gfp_flags) \
({ struct sk_buff *__skb; \
__skb = alloc_skb((__length) + 64, (__gfp_flags)); \
if(__skb) { \
int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
if(__offset) \
skb_reserve(__skb, __offset); \
} \
__skb; \
})
#endif /* !(_SUNHME_H) */

View file

@ -0,0 +1,996 @@
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
* if you make it look like a LANCE.
*
* Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include "sunqe.h"
#define DRV_NAME "sunqe"
#define DRV_VERSION "4.1"
#define DRV_RELDATE "August 27, 2008"
#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
MODULE_LICENSE("GPL");
static struct sunqec *root_qec_dev;
static void qe_set_multicast(struct net_device *dev);
#define QEC_RESET_TRIES 200
static inline int qec_global_reset(void __iomem *gregs)
{
int tries = QEC_RESET_TRIES;
sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
while (--tries) {
u32 tmp = sbus_readl(gregs + GLOB_CTRL);
if (tmp & GLOB_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (tries)
return 0;
printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
return -1;
}
#define MACE_RESET_RETRIES 200
#define QE_RESET_RETRIES 200
static inline int qe_stop(struct sunqe *qep)
{
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
int tries;
/* Reset the MACE, then the QEC channel. */
sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
tries = MACE_RESET_RETRIES;
while (--tries) {
u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
if (tmp & MREGS_BCONFIG_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
return -1;
}
sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
tries = QE_RESET_RETRIES;
while (--tries) {
u32 tmp = sbus_readl(cregs + CREG_CTRL);
if (tmp & CREG_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
return -1;
}
return 0;
}
static void qe_init_rings(struct sunqe *qep)
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
memset(qb, 0, sizeof(struct qe_init_block));
memset(qbufs, 0, sizeof(struct sunqe_buffers));
for (i = 0; i < RX_RING_SIZE; i++) {
qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
qb->qe_rxd[i].rx_flags =
(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
}
}
static int qe_init(struct sunqe *qep, int from_irq)
{
struct sunqec *qecp = qep->parent;
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
u32 tmp;
int i;
/* Shut it up. */
if (qe_stop(qep))
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
sbus_writel(1, cregs + CREG_TIMASK);
sbus_writel(0, cregs + CREG_QMASK);
sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
/* Setup the FIFO pointers into QEC local memory. */
tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
sbus_readl(gregs + GLOB_RSIZE);
sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
/* Clear the channel collision counter. */
sbus_writel(0, cregs + CREG_CCNT);
/* For 10baseT, inter frame space nor throttle seems to be necessary. */
sbus_writel(0, cregs + CREG_PIPG);
/* Now dork with the AMD MACE. */
sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
sbus_writeb(0, mregs + MREGS_RXFCNTL);
/* The QEC dma's the rx'd packets from local memory out to main memory,
* and therefore it interrupts when the packet reception is "complete".
* So don't listen for the MACE talking about it.
*/
sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
mregs + MREGS_FCONFIG);
/* Only usable interface on QuadEther is twisted pair. */
sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
/* Tell MACE we are changing the ether address. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
sbus_writeb(e[0], mregs + MREGS_ETHADDR);
sbus_writeb(e[1], mregs + MREGS_ETHADDR);
sbus_writeb(e[2], mregs + MREGS_ETHADDR);
sbus_writeb(e[3], mregs + MREGS_ETHADDR);
sbus_writeb(e[4], mregs + MREGS_ETHADDR);
sbus_writeb(e[5], mregs + MREGS_ETHADDR);
/* Clear out the address filter. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0, mregs + MREGS_FILTER);
/* Address changes are now complete. */
sbus_writeb(0, mregs + MREGS_IACONFIG);
qe_init_rings(qep);
/* Wait a little bit for the link to come up... */
mdelay(5);
if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
int tries = 50;
while (--tries) {
u8 tmp;
mdelay(5);
barrier();
tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
break;
}
if (tries == 0)
printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
}
/* Missed packet counter is cleared on a read. */
sbus_readb(mregs + MREGS_MPCNT);
/* Reload multicast information, this will enable the receiver
* and transmitter.
*/
qe_set_multicast(qep->dev);
/* QEC should now start to show interrupts. */
return 0;
}
/* Grrr, certain error conditions completely lock up the AMD MACE,
* so when we get these we _must_ reset the chip.
*/
static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
{
struct net_device *dev = qep->dev;
int mace_hwbug_workaround = 0;
if (qe_status & CREG_STAT_EDEFER) {
printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
dev->stats.tx_errors++;
}
if (qe_status & CREG_STAT_CLOSS) {
printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
}
if (qe_status & CREG_STAT_ERETRIES) {
printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_LCOLL) {
printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.collisions++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_FUFLOW) {
printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_JERROR) {
printk(KERN_ERR "%s: Jabber error.\n", dev->name);
}
if (qe_status & CREG_STAT_BERROR) {
printk(KERN_ERR "%s: Babble error.\n", dev->name);
}
if (qe_status & CREG_STAT_CCOFLOW) {
dev->stats.tx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_TXDERROR) {
printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXLERR) {
printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXPERR) {
printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXSERR) {
printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RCCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_RUOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_over_errors += 256;
}
if (qe_status & CREG_STAT_MCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_missed_errors += 256;
}
if (qe_status & CREG_STAT_RXFOFLOW) {
printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_over_errors++;
}
if (qe_status & CREG_STAT_RLCOLL) {
printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.collisions++;
}
if (qe_status & CREG_STAT_FCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_frame_errors += 256;
}
if (qe_status & CREG_STAT_CECOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_crc_errors += 256;
}
if (qe_status & CREG_STAT_RXDROP) {
printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (qe_status & CREG_STAT_RXSMALL) {
printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
if (qe_status & CREG_STAT_RXLERR) {
printk(KERN_ERR "%s: Receive late error.\n", dev->name);
dev->stats.rx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXPERR) {
printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXSERR) {
printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (mace_hwbug_workaround)
qe_init(qep, 1);
return mace_hwbug_workaround;
}
/* Per-QE receive interrupt service routine. Just like on the happy meal
* we receive directly into skb's with a small packet copy water mark.
*/
static void qe_rx(struct sunqe *qep)
{
struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
while (!((flags = this->rx_flags) & RXD_OWN)) {
struct sk_buff *skb;
unsigned char *this_qbuf =
&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
__u32 this_qbuf_dvma = qbufs_dvma +
qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
struct qe_rxd *end_rxd =
&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
/* Check for errors. */
if (len < ETH_ZLEN) {
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
}
end_rxd->rx_addr = this_qbuf_dvma;
end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
elem = NEXT_RX(elem);
this = &rxbase[elem];
}
qep->rx_new = elem;
}
static void qe_tx_reclaim(struct sunqe *qep);
/* Interrupts for all QE's get filtered out via the QEC master controller,
* so we just run through each qe and check to see who is signaling
* and thus needs to be serviced.
*/
static irqreturn_t qec_interrupt(int irq, void *dev_id)
{
struct sunqec *qecp = dev_id;
u32 qec_status;
int channel = 0;
/* Latch the status now. */
qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
while (channel < 4) {
if (qec_status & 0xf) {
struct sunqe *qep = qecp->qes[channel];
u32 qe_status;
qe_status = sbus_readl(qep->qcregs + CREG_STAT);
if (qe_status & CREG_STAT_ERRORS) {
if (qe_is_bolixed(qep, qe_status))
goto next;
}
if (qe_status & CREG_STAT_RXIRQ)
qe_rx(qep);
if (netif_queue_stopped(qep->dev) &&
(qe_status & CREG_STAT_TXIRQ)) {
spin_lock(&qep->lock);
qe_tx_reclaim(qep);
if (TX_BUFFS_AVAIL(qep) > 0) {
/* Wake net queue and return to
* lazy tx reclaim.
*/
netif_wake_queue(qep->dev);
sbus_writel(1, qep->qcregs + CREG_TIMASK);
}
spin_unlock(&qep->lock);
}
next:
;
}
qec_status >>= 4;
channel++;
}
return IRQ_HANDLED;
}
static int qe_open(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qep->mconfig = (MREGS_MCONFIG_TXENAB |
MREGS_MCONFIG_RXENAB |
MREGS_MCONFIG_MBAENAB);
return qe_init(qep, 0);
}
static int qe_close(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qe_stop(qep);
return 0;
}
/* Reclaim TX'd frames from the ring. This must always run under
* the IRQ protected qep->lock.
*/
static void qe_tx_reclaim(struct sunqe *qep)
{
struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
int elem = qep->tx_old;
while (elem != qep->tx_new) {
u32 flags = txbase[elem].tx_flags;
if (flags & TXD_OWN)
break;
elem = NEXT_TX(elem);
}
qep->tx_old = elem;
}
static void qe_tx_timeout(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
spin_lock_irq(&qep->lock);
/* Try to reclaim, if that frees up some tx
* entries, we're fine.
*/
qe_tx_reclaim(qep);
tx_full = TX_BUFFS_AVAIL(qep) <= 0;
spin_unlock_irq(&qep->lock);
if (! tx_full)
goto out;
printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
qe_init(qep, 1);
out:
netif_wake_queue(dev);
}
/* Get a packet queued to go onto the wire. */
static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
spin_lock_irq(&qep->lock);
qe_tx_reclaim(qep);
len = skb->len;
entry = qep->tx_new;
txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
txbuf_dvma = qbufs_dvma +
qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
/* Avoid a race... */
qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
skb_copy_from_linear_data(skb, txbuf, len);
qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
qep->qe_block->qe_txd[entry].tx_flags =
(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
if (TX_BUFFS_AVAIL(qep) <= 0) {
/* Halt the net queue and enable tx interrupts.
* When the tx queue empties the tx irq handler
* will wake up the queue and return us back to
* the lazy tx reclaim scheme.
*/
netif_stop_queue(dev);
sbus_writel(0, qep->qcregs + CREG_TIMASK);
}
spin_unlock_irq(&qep->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
int i;
u32 crc;
/* Lock out others. */
netif_stop_queue(dev);
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
} else if (dev->flags & IFF_PROMISC) {
new_mconfig |= MREGS_MCONFIG_PROMISC;
} else {
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
/* Program the qe with the new filter value. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++) {
u8 tmp = *hbytes++;
sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
}
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
}
/* Any change of the logical address filter, the physical address,
* or enabling/disabling promiscuous mode causes the MACE to disable
* the receiver. So we must re-enable them here or else the MACE
* refuses to listen to anything on the network. Sheesh, took
* me a day or two to find this bug.
*/
qep->mconfig = new_mconfig;
sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
/* Let us get going again. */
netif_wake_queue(dev);
}
/* Ethtool support... */
static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
const struct linux_prom_registers *regs;
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
strlcpy(info->driver, "sunqe", sizeof(info->driver));
strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
regs->which_io);
}
static u32 qe_get_link(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
void __iomem *mregs = qep->mregs;
u8 phyconfig;
spin_lock_irq(&qep->lock);
phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
spin_unlock_irq(&qep->lock);
return phyconfig & MREGS_PHYCONFIG_LSTAT;
}
static const struct ethtool_ops qe_ethtool_ops = {
.get_drvinfo = qe_get_drvinfo,
.get_link = qe_get_link,
};
/* This is only called once at boot time for each card probed. */
static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
{
u8 bsizes = qecp->qec_bursts;
if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
} else if (bsizes & DMA_BURST32) {
sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
} else {
sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
}
/* Packetsize only used in 100baseT BigMAC configurations,
* set it to zero just to be on the safe side.
*/
sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
/* Set the local memsize register, divided up to one piece per QE channel. */
sbus_writel((resource_size(&op->resource[1]) >> 2),
qecp->gregs + GLOB_MSIZE);
/* Divide up the local QEC memory amongst the 4 QE receiver and
* transmitter FIFOs. Basically it is (total / 2 / num_channels).
*/
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_TSIZE);
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_RSIZE);
}
static u8 qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
/* Find and set the burst sizes for the QEC, since it
* does the actual dma for all 4 channels.
*/
bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
bsizes &= 0xff;
bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
if (bsizes_more != 0xff)
bsizes &= bsizes_more;
if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
(bsizes & DMA_BURST32)==0)
bsizes = (DMA_BURST32 - 1);
return bsizes;
}
static struct sunqec *get_qec(struct platform_device *child)
{
struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
qecp = platform_get_drvdata(op);
if (!qecp) {
qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
if (qecp) {
u32 ctrl;
qecp->op = op;
qecp->gregs = of_ioremap(&op->resource[0], 0,
GLOB_REG_SIZE,
"QEC Global Registers");
if (!qecp->gregs)
goto fail;
/* Make sure the QEC is in MACE mode. */
ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
ctrl &= 0xf0000000;
if (ctrl != GLOB_CTRL_MMODE) {
printk(KERN_ERR "qec: Not in MACE mode!\n");
goto fail;
}
if (qec_global_reset(qecp->gregs))
goto fail;
qecp->qec_bursts = qec_get_burst(op->dev.of_node);
qec_init_once(qecp, op);
if (request_irq(op->archdata.irqs[0], qec_interrupt,
IRQF_SHARED, "qec", (void *) qecp)) {
printk(KERN_ERR "qec: Can't register irq.\n");
goto fail;
}
platform_set_drvdata(op, qecp);
qecp->next_module = root_qec_dev;
root_qec_dev = qecp;
}
}
return qecp;
fail:
if (qecp->gregs)
of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
kfree(qecp);
return NULL;
}
static const struct net_device_ops qec_ops = {
.ndo_open = qe_open,
.ndo_stop = qe_close,
.ndo_start_xmit = qe_start_xmit,
.ndo_set_rx_mode = qe_set_multicast,
.ndo_tx_timeout = qe_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
struct sunqec *qecp;
struct sunqe *qe;
int i, res;
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
dev = alloc_etherdev(sizeof(struct sunqe));
if (!dev)
return -ENOMEM;
memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
qe = netdev_priv(dev);
res = -ENODEV;
i = of_getintprop_default(op->dev.of_node, "channel#", -1);
if (i == -1)
goto fail;
qe->channel = i;
spin_lock_init(&qe->lock);
qecp = get_qec(op);
if (!qecp)
goto fail;
qecp->qes[qe->channel] = qe;
qe->dev = dev;
qe->parent = qecp;
qe->op = op;
res = -ENOMEM;
qe->qcregs = of_ioremap(&op->resource[0], 0,
CREG_REG_SIZE, "QEC Channel Registers");
if (!qe->qcregs) {
printk(KERN_ERR "qe: Cannot map channel registers.\n");
goto fail;
}
qe->mregs = of_ioremap(&op->resource[1], 0,
MREGS_REG_SIZE, "QE MACE Registers");
if (!qe->mregs) {
printk(KERN_ERR "qe: Cannot map MACE registers.\n");
goto fail;
}
qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
&qe->qblock_dvma, GFP_ATOMIC);
qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
&qe->buffers_dvma, GFP_ATOMIC);
if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
qe->buffers == NULL || qe->buffers_dvma == 0)
goto fail;
/* Stop this QE. */
qe_stop(qe);
SET_NETDEV_DEV(dev, &op->dev);
dev->watchdog_timeo = 5*HZ;
dev->irq = op->archdata.irqs[0];
dev->dma = 0;
dev->ethtool_ops = &qe_ethtool_ops;
dev->netdev_ops = &qec_ops;
res = register_netdev(dev);
if (res)
goto fail;
platform_set_drvdata(op, qe);
printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
dev->dev_addr);
return 0;
fail:
if (qe->qcregs)
of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
if (qe->mregs)
of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
if (qe->qe_block)
dma_free_coherent(&op->dev, PAGE_SIZE,
qe->qe_block, qe->qblock_dvma);
if (qe->buffers)
dma_free_coherent(&op->dev,
sizeof(struct sunqe_buffers),
qe->buffers,
qe->buffers_dvma);
free_netdev(dev);
return res;
}
static int qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
static int qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = platform_get_drvdata(op);
struct net_device *net_dev = qp->dev;
unregister_netdev(net_dev);
of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
dma_free_coherent(&op->dev, PAGE_SIZE,
qp->qe_block, qp->qblock_dvma);
dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
qp->buffers, qp->buffers_dvma);
free_netdev(net_dev);
return 0;
}
static const struct of_device_id qec_sbus_match[] = {
{
.name = "qe",
},
{},
};
MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
.owner = THIS_MODULE,
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
.remove = qec_sbus_remove,
};
static int __init qec_init(void)
{
return platform_driver_register(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
platform_driver_unregister(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
struct platform_device *op = root_qec_dev->op;
free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
of_iounmap(&op->resource[0], root_qec_dev->gregs,
GLOB_REG_SIZE);
kfree(root_qec_dev);
root_qec_dev = next;
}
}
module_init(qec_init);
module_exit(qec_exit);

View file

@ -0,0 +1,350 @@
/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
* sunqe.h: Definitions for the Sun QuadEthernet driver.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SUNQE_H
#define _SUNQE_H
/* QEC global registers. */
#define GLOB_CTRL 0x00UL /* Control */
#define GLOB_STAT 0x04UL /* Status */
#define GLOB_PSIZE 0x08UL /* Packet Size */
#define GLOB_MSIZE 0x0cUL /* Local-memory Size */
#define GLOB_RSIZE 0x10UL /* Receive partition size */
#define GLOB_TSIZE 0x14UL /* Transmit partition size */
#define GLOB_REG_SIZE 0x18UL
#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
/* In MACE mode, there are four qe channels. Each channel has it's own
* status bits in the QEC status register. This macro picks out the
* ones you want.
*/
#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)
/* The following registers are for per-qe channel information/status. */
#define CREG_CTRL 0x00UL /* Control */
#define CREG_STAT 0x04UL /* Status */
#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
#define CREG_MMASK 0x1cUL /* MACE Error Interrupt Mask */
#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
#define CREG_CCNT 0x30UL /* Collision Counter */
#define CREG_PIPG 0x34UL /* Inter-Frame Gap */
#define CREG_REG_SIZE 0x38UL
#define CREG_CTRL_RXOFF 0x00000004 /* Disable this qe's receiver*/
#define CREG_CTRL_RESET 0x00000002 /* Reset this qe channel */
#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
#define CREG_STAT_EDEFER 0x10000000 /* Excessive Defers */
#define CREG_STAT_CLOSS 0x08000000 /* Carrier Loss */
#define CREG_STAT_ERETRIES 0x04000000 /* More than 16 retries */
#define CREG_STAT_LCOLL 0x02000000 /* Late TX Collision */
#define CREG_STAT_FUFLOW 0x01000000 /* FIFO Underflow */
#define CREG_STAT_JERROR 0x00800000 /* Jabber Error */
#define CREG_STAT_BERROR 0x00400000 /* Babble Error */
#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
#define CREG_STAT_CCOFLOW 0x00100000 /* TX Coll-counter Overflow */
#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
#define CREG_STAT_RCCOFLOW 0x00001000 /* RX Coll-counter Overflow */
#define CREG_STAT_RUOFLOW 0x00000800 /* Runt Counter Overflow */
#define CREG_STAT_MCOFLOW 0x00000400 /* Missed Counter Overflow */
#define CREG_STAT_RXFOFLOW 0x00000200 /* RX FIFO Overflow */
#define CREG_STAT_RLCOLL 0x00000100 /* RX Late Collision */
#define CREG_STAT_FCOFLOW 0x00000080 /* Frame Counter Overflow */
#define CREG_STAT_CECOFLOW 0x00000040 /* CRC Error-counter Overflow*/
#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
#define CREG_STAT_ERRORS (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES| \
CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR| \
CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR| \
CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR| \
CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW| \
CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL| \
CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)
#define CREG_QMASK_COFLOW 0x00100000 /* CollCntr overflow */
#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
#define CREG_MMASK_EDEFER 0x10000000 /* Excess defer */
#define CREG_MMASK_CLOSS 0x08000000 /* Carrier loss */
#define CREG_MMASK_ERETRY 0x04000000 /* Excess retry */
#define CREG_MMASK_LCOLL 0x02000000 /* Late collision error */
#define CREG_MMASK_UFLOW 0x01000000 /* Underflow */
#define CREG_MMASK_JABBER 0x00800000 /* Jabber error */
#define CREG_MMASK_BABBLE 0x00400000 /* Babble error */
#define CREG_MMASK_OFLOW 0x00000800 /* Overflow */
#define CREG_MMASK_RXCOLL 0x00000400 /* RX Coll-Cntr overflow */
#define CREG_MMASK_RPKT 0x00000200 /* Runt pkt overflow */
#define CREG_MMASK_MPKT 0x00000100 /* Missed pkt overflow */
#define CREG_PIPG_TENAB 0x00000020 /* Enable Throttle */
#define CREG_PIPG_MMODE 0x00000010 /* Manual Mode */
#define CREG_PIPG_WMASK 0x0000000f /* SBUS Wait Mask */
/* Per-channel AMD 79C940 MACE registers. */
#define MREGS_RXFIFO 0x00UL /* Receive FIFO */
#define MREGS_TXFIFO 0x01UL /* Transmit FIFO */
#define MREGS_TXFCNTL 0x02UL /* Transmit Frame Control */
#define MREGS_TXFSTAT 0x03UL /* Transmit Frame Status */
#define MREGS_TXRCNT 0x04UL /* Transmit Retry Count */
#define MREGS_RXFCNTL 0x05UL /* Receive Frame Control */
#define MREGS_RXFSTAT 0x06UL /* Receive Frame Status */
#define MREGS_FFCNT 0x07UL /* FIFO Frame Count */
#define MREGS_IREG 0x08UL /* Interrupt Register */
#define MREGS_IMASK 0x09UL /* Interrupt Mask */
#define MREGS_POLL 0x0aUL /* POLL Register */
#define MREGS_BCONFIG 0x0bUL /* BIU Config */
#define MREGS_FCONFIG 0x0cUL /* FIFO Config */
#define MREGS_MCONFIG 0x0dUL /* MAC Config */
#define MREGS_PLSCONFIG 0x0eUL /* PLS Config */
#define MREGS_PHYCONFIG 0x0fUL /* PHY Config */
#define MREGS_CHIPID1 0x10UL /* Chip-ID, low bits */
#define MREGS_CHIPID2 0x11UL /* Chip-ID, high bits */
#define MREGS_IACONFIG 0x12UL /* Internal Address Config */
/* 0x13UL, reserved */
#define MREGS_FILTER 0x14UL /* Logical Address Filter */
#define MREGS_ETHADDR 0x15UL /* Our Ethernet Address */
/* 0x16UL, reserved */
/* 0x17UL, reserved */
#define MREGS_MPCNT 0x18UL /* Missed Packet Count */
/* 0x19UL, reserved */
#define MREGS_RPCNT 0x1aUL /* Runt Packet Count */
#define MREGS_RCCNT 0x1bUL /* RX Collision Count */
/* 0x1cUL, reserved */
#define MREGS_UTEST 0x1dUL /* User Test */
#define MREGS_RTEST1 0x1eUL /* Reserved Test 1 */
#define MREGS_RTEST2 0x1fUL /* Reserved Test 2 */
#define MREGS_REG_SIZE 0x20UL
#define MREGS_TXFCNTL_DRETRY 0x80 /* Retry disable */
#define MREGS_TXFCNTL_DFCS 0x08 /* Disable TX FCS */
#define MREGS_TXFCNTL_AUTOPAD 0x01 /* TX auto pad */
#define MREGS_TXFSTAT_VALID 0x80 /* TX valid */
#define MREGS_TXFSTAT_UNDERFLOW 0x40 /* TX underflow */
#define MREGS_TXFSTAT_LCOLL 0x20 /* TX late collision */
#define MREGS_TXFSTAT_MRETRY 0x10 /* TX > 1 retries */
#define MREGS_TXFSTAT_ORETRY 0x08 /* TX 1 retry */
#define MREGS_TXFSTAT_PDEFER 0x04 /* TX pkt deferred */
#define MREGS_TXFSTAT_CLOSS 0x02 /* TX carrier lost */
#define MREGS_TXFSTAT_RERROR 0x01 /* TX retry error */
#define MREGS_TXRCNT_EDEFER 0x80 /* TX Excess defers */
#define MREGS_TXRCNT_CMASK 0x0f /* TX retry count */
#define MREGS_RXFCNTL_LOWLAT 0x08 /* RX low latency */
#define MREGS_RXFCNTL_AREJECT 0x04 /* RX addr match rej */
#define MREGS_RXFCNTL_AUTOSTRIP 0x01 /* RX auto strip */
#define MREGS_RXFSTAT_OVERFLOW 0x80 /* RX overflow */
#define MREGS_RXFSTAT_LCOLL 0x40 /* RX late collision */
#define MREGS_RXFSTAT_FERROR 0x20 /* RX framing error */
#define MREGS_RXFSTAT_FCSERROR 0x10 /* RX FCS error */
#define MREGS_RXFSTAT_RBCNT 0x0f /* RX msg byte count */
#define MREGS_FFCNT_RX 0xf0 /* RX FIFO frame cnt */
#define MREGS_FFCNT_TX 0x0f /* TX FIFO frame cnt */
#define MREGS_IREG_JABBER 0x80 /* IRQ Jabber error */
#define MREGS_IREG_BABBLE 0x40 /* IRQ Babble error */
#define MREGS_IREG_COLL 0x20 /* IRQ Collision error */
#define MREGS_IREG_RCCO 0x10 /* IRQ Collision cnt overflow */
#define MREGS_IREG_RPKTCO 0x08 /* IRQ Runt packet count overflow */
#define MREGS_IREG_MPKTCO 0x04 /* IRQ missed packet cnt overflow */
#define MREGS_IREG_RXIRQ 0x02 /* IRQ RX'd a packet */
#define MREGS_IREG_TXIRQ 0x01 /* IRQ TX'd a packet */
#define MREGS_IMASK_BABBLE 0x40 /* IMASK Babble errors */
#define MREGS_IMASK_COLL 0x20 /* IMASK Collision errors */
#define MREGS_IMASK_MPKTCO 0x04 /* IMASK Missed pkt cnt overflow */
#define MREGS_IMASK_RXIRQ 0x02 /* IMASK RX interrupts */
#define MREGS_IMASK_TXIRQ 0x01 /* IMASK TX interrupts */
#define MREGS_POLL_TXVALID 0x80 /* TX is valid */
#define MREGS_POLL_TDTR 0x40 /* TX data transfer request */
#define MREGS_POLL_RDTR 0x20 /* RX data transfer request */
#define MREGS_BCONFIG_BSWAP 0x40 /* Byte Swap */
#define MREGS_BCONFIG_4TS 0x00 /* 4byte transmit start point */
#define MREGS_BCONFIG_16TS 0x10 /* 16byte transmit start point */
#define MREGS_BCONFIG_64TS 0x20 /* 64byte transmit start point */
#define MREGS_BCONFIG_112TS 0x30 /* 112byte transmit start point */
#define MREGS_BCONFIG_RESET 0x01 /* SW-Reset the MACE */
#define MREGS_FCONFIG_TXF8 0x00 /* TX fifo 8 write cycles */
#define MREGS_FCONFIG_TXF32 0x80 /* TX fifo 32 write cycles */
#define MREGS_FCONFIG_TXF16 0x40 /* TX fifo 16 write cycles */
#define MREGS_FCONFIG_RXF64 0x20 /* RX fifo 64 write cycles */
#define MREGS_FCONFIG_RXF32 0x10 /* RX fifo 32 write cycles */
#define MREGS_FCONFIG_RXF16 0x00 /* RX fifo 16 write cycles */
#define MREGS_FCONFIG_TFWU 0x08 /* TX fifo watermark update */
#define MREGS_FCONFIG_RFWU 0x04 /* RX fifo watermark update */
#define MREGS_FCONFIG_TBENAB 0x02 /* TX burst enable */
#define MREGS_FCONFIG_RBENAB 0x01 /* RX burst enable */
#define MREGS_MCONFIG_PROMISC 0x80 /* Promiscuous mode enable */
#define MREGS_MCONFIG_TPDDISAB 0x40 /* TX 2part deferral enable */
#define MREGS_MCONFIG_MBAENAB 0x20 /* Modified backoff enable */
#define MREGS_MCONFIG_RPADISAB 0x08 /* RX physical addr disable */
#define MREGS_MCONFIG_RBDISAB 0x04 /* RX broadcast disable */
#define MREGS_MCONFIG_TXENAB 0x02 /* Enable transmitter */
#define MREGS_MCONFIG_RXENAB 0x01 /* Enable receiver */
#define MREGS_PLSCONFIG_TXMS 0x08 /* TX mode select */
#define MREGS_PLSCONFIG_GPSI 0x06 /* Use GPSI connector */
#define MREGS_PLSCONFIG_DAI 0x04 /* Use DAI connector */
#define MREGS_PLSCONFIG_TP 0x02 /* Use TwistedPair connector */
#define MREGS_PLSCONFIG_AUI 0x00 /* Use AUI connector */
#define MREGS_PLSCONFIG_IOENAB 0x01 /* PLS I/O enable */
#define MREGS_PHYCONFIG_LSTAT 0x80 /* Link status */
#define MREGS_PHYCONFIG_LTESTDIS 0x40 /* Disable link test logic */
#define MREGS_PHYCONFIG_RXPOLARITY 0x20 /* RX polarity */
#define MREGS_PHYCONFIG_APCDISAB 0x10 /* AutoPolarityCorrect disab */
#define MREGS_PHYCONFIG_LTENAB 0x08 /* Select low threshold */
#define MREGS_PHYCONFIG_AUTO 0x04 /* Connector port auto-sel */
#define MREGS_PHYCONFIG_RWU 0x02 /* Remote WakeUp */
#define MREGS_PHYCONFIG_AW 0x01 /* Auto Wakeup */
#define MREGS_IACONFIG_ACHNGE 0x80 /* Do address change */
#define MREGS_IACONFIG_PARESET 0x04 /* Physical address reset */
#define MREGS_IACONFIG_LARESET 0x02 /* Logical address reset */
#define MREGS_UTEST_RTRENAB 0x80 /* Enable resv test register */
#define MREGS_UTEST_RTRDISAB 0x40 /* Disab resv test register */
#define MREGS_UTEST_RPACCEPT 0x20 /* Accept runt packets */
#define MREGS_UTEST_FCOLL 0x10 /* Force collision status */
#define MREGS_UTEST_FCSENAB 0x08 /* Enable FCS on RX */
#define MREGS_UTEST_INTLOOPM 0x06 /* Intern lpback w/MENDEC */
#define MREGS_UTEST_INTLOOP 0x04 /* Intern lpback */
#define MREGS_UTEST_EXTLOOP 0x02 /* Extern lpback */
#define MREGS_UTEST_NOLOOP 0x00 /* No loopback */
struct qe_rxd {
u32 rx_flags;
u32 rx_addr;
};
#define RXD_OWN 0x80000000 /* Ownership. */
#define RXD_UPDATE 0x10000000 /* Being Updated? */
#define RXD_LENGTH 0x000007ff /* Packet Length. */
struct qe_txd {
u32 tx_flags;
u32 tx_addr;
};
#define TXD_OWN 0x80000000 /* Ownership. */
#define TXD_SOP 0x40000000 /* Start Of Packet */
#define TXD_EOP 0x20000000 /* End Of Packet */
#define TXD_UPDATE 0x10000000 /* Being Updated? */
#define TXD_LENGTH 0x000007ff /* Packet Length. */
#define TX_RING_MAXSIZE 256
#define RX_RING_MAXSIZE 256
#define TX_RING_SIZE 16
#define RX_RING_SIZE 16
#define NEXT_RX(num) (((num) + 1) & (RX_RING_MAXSIZE - 1))
#define NEXT_TX(num) (((num) + 1) & (TX_RING_MAXSIZE - 1))
#define PREV_RX(num) (((num) - 1) & (RX_RING_MAXSIZE - 1))
#define PREV_TX(num) (((num) - 1) & (TX_RING_MAXSIZE - 1))
#define TX_BUFFS_AVAIL(qp) \
(((qp)->tx_old <= (qp)->tx_new) ? \
(qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
(qp)->tx_old - (qp)->tx_new - 1)
struct qe_init_block {
struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
struct qe_txd qe_txd[TX_RING_MAXSIZE];
};
#define qib_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))
struct sunqe;
struct sunqec {
void __iomem *gregs; /* QEC Global Registers */
struct sunqe *qes[4]; /* Each child MACE */
unsigned int qec_bursts; /* Support burst sizes */
struct platform_device *op; /* QEC's OF device */
struct sunqec *next_module; /* List of all QECs in system */
};
#define PKT_BUF_SZ 1664
#define RXD_PKT_SZ 1664
struct sunqe_buffers {
u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
u8 __pad[2];
u8 rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
};
#define qebuf_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))
struct sunqe {
void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */
__u32 qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */
__u32 buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */
struct net_device *dev; /* QE's netdevice struct */
int channel; /* Who am I? */
};
#endif /* !(_SUNQE_H) */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,103 @@
#ifndef _SUNVNET_H
#define _SUNVNET_H
#include <linux/interrupt.h>
#define DESC_NCOOKIES(entry_size) \
((entry_size) - sizeof(struct vio_net_desc))
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
#define VNET_TX_TIMEOUT (5 * HZ)
/* length of time (or less) we expect pending descriptors to be marked
* as VIO_DESC_DONE and skbs ready to be freed
*/
#define VNET_CLEAN_TIMEOUT ((HZ/100)+1)
#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
#define VNET_TX_RING_SIZE 512
#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
*/
#define VNET_PACKET_SKIP 6
#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1)
struct vnet_tx_entry {
struct sk_buff *skb;
unsigned int ncookies;
struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
};
struct vnet;
struct vnet_port {
struct vio_driver_state vio;
struct hlist_node hash;
u8 raddr[ETH_ALEN];
u8 switch_port;
u8 __pad;
struct vnet *vp;
struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
struct list_head list;
u32 stop_rx_idx;
bool stop_rx;
bool start_cons;
struct timer_list clean_timer;
u64 rmtu;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vnet_port, vio);
}
#define VNET_PORT_HASH_SIZE 16
#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
static inline unsigned int vnet_hashfn(u8 *mac)
{
unsigned int val = mac[4] ^ mac[5];
return val & (VNET_PORT_HASH_MASK);
}
struct vnet_mcast_entry {
u8 addr[ETH_ALEN];
u8 sent;
u8 hit;
struct vnet_mcast_entry *next;
};
struct vnet {
/* Protects port_list and port_hash. */
spinlock_t lock;
struct net_device *dev;
u32 msg_enable;
struct list_head port_list;
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
struct vnet_mcast_entry *mcast_list;
struct list_head list;
u64 local_mac;
struct tasklet_struct vnet_tx_wakeup;
};
#endif /* _SUNVNET_H */