Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,165 @@
#
# Broadcom device configuration
#
config NET_VENDOR_BROADCOM
bool "Broadcom devices"
default y
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
Note that the answer to this question does not directly affect
the kernel: saying N will just case the configurator to skip all
the questions regarding AMD chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
if NET_VENDOR_BROADCOM
config B44
tristate "Broadcom 440x/47xx ethernet support"
depends on SSB_POSSIBLE && HAS_DMA
select SSB
select MII
select PHYLIB
---help---
If you have a network (Ethernet) controller of this type, say Y
or M and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here. The module
will be called b44.
# Auto-select SSB PCI-HOST support, if possible
config B44_PCI_AUTOSELECT
bool
depends on B44 && SSB_PCIHOST_POSSIBLE
select SSB_PCIHOST
default y
# Auto-select SSB PCICORE driver, if possible
config B44_PCICORE_AUTOSELECT
bool
depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
select SSB_DRIVER_PCICORE
default y
config B44_PCI
bool
depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
default y
config BCM63XX_ENET
tristate "Broadcom 63xx internal mac support"
depends on BCM63XX
select MII
select PHYLIB
help
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on OF
select MII
select PHYLIB
select FIXED_PHY if BCMGENET=y
select BCM7XXX_PHY
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
config BNX2
tristate "QLogic NetXtremeII support"
depends on PCI
select CRC32
select FW_LOADER
---help---
This driver supports QLogic NetXtremeII gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
select BNX2
select UIO
---help---
This driver supports offload features of QLogic NetXtremeII
gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
config SB1250_MAC
tristate "SB1250 Gigabit Ethernet support"
depends on SIBYTE_SB1xxx_SOC
select PHYLIB
---help---
This driver supports Gigabit Ethernet interfaces based on the
Broadcom SiByte family of System-On-a-Chip parts. They include
the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
and BCM1480 chips.
To compile this driver as a module, choose M here: the module
will be called sb1250-mac.
config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
select HWMON
select PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
select PTP_1588_CLOCK
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
select MDIO
---help---
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
config BNX2X_SRIOV
bool "Broadcom 578xx and 57712 SR-IOV support"
depends on BNX2X && PCI_IOV
default y
---help---
This configuration parameter enables Single Root Input Output
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
select PHYLIB
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
They can be found on BCM47xx SoCs and provide gigabit ethernet.
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on OF
select MII
select PHYLIB
select FIXED_PHY if SYSTEMPORT=y
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
Ethernet switch.
endif # NET_VENDOR_BROADCOM

View file

@ -0,0 +1,14 @@
#
# Makefile for the Broadcom network device drivers.
#
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,409 @@
#ifndef _B44_H
#define _B44_H
/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */
#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */
#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */
#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */
#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */
#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */
#define DEVCTRL_PADDR_SHIFT 18
#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */
#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */
#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */
#define WKUP_LEN_D0 0x00000080
#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */
#define WKUP_LEN_P1_SHIFT 8
#define WKUP_LEN_D1 0x00008000
#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */
#define WKUP_LEN_P2_SHIFT 16
#define WKUP_LEN_D2 0x00000000
#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
#define WKUP_LEN_P3_SHIFT 24
#define WKUP_LEN_D3 0x80000000
#define WKUP_LEN_DISABLE 0x80808080
#define WKUP_LEN_ENABLE_TWO 0x80800000
#define WKUP_LEN_ENABLE_THREE 0x80000000
#define B44_ISTAT 0x0020UL /* Interrupt Status */
#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */
#define ISTAT_PME 0x00000040 /* Power Management Event */
#define ISTAT_TO 0x00000080 /* General Purpose Timeout */
#define ISTAT_DSCE 0x00000400 /* Descriptor Error */
#define ISTAT_DATAE 0x00000800 /* Data Error */
#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */
#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */
#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */
#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */
#define ISTAT_RX 0x00010000 /* RX Interrupt */
#define ISTAT_TX 0x01000000 /* TX Interrupt */
#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */
#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */
#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */
#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
#define B44_IMASK 0x0024UL /* Interrupt Mask */
#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
#define B44_GPTIMER 0x0028UL /* General Purpose Timer */
#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */
#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */
#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */
#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */
#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */
#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */
#define B44_MAC_CTRL 0x00A8UL /* MAC Control */
#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */
#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */
#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */
#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */
#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5
#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */
#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */
#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */
#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */
#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */
#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */
#define RCV_LAZY_FC_SHIFT 24
#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */
#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */
#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */
#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */
#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */
#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */
#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */
#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */
#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */
#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMATX_STAT_ENONE 0x00000000 /* Error None */
#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */
#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */
#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */
#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */
#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */
#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */
#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */
#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */
#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMARX_STAT_ENONE 0x00000000 /* Error None */
#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */
#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */
#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */
#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */
#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */
#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */
#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */
#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */
#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */
#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */
#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */
#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */
#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */
#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */
#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */
#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */
#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */
#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */
#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */
#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */
#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */
#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */
#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */
#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */
#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */
#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */
#define MDIO_DATA_TA_SHIFT 16
#define MDIO_TA_VALID 2
#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */
#define MDIO_DATA_RA_SHIFT 18
#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */
#define MDIO_DATA_PMD_SHIFT 23
#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */
#define MDIO_DATA_OP_SHIFT 28
#define MDIO_OP_WRITE 1
#define MDIO_OP_READ 2
#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */
#define MDIO_DATA_SB_SHIFT 30
#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */
#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */
#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */
#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */
#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */
#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */
#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */
#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */
#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */
#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */
#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */
#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */
#define CAM_CTRL_READ 0x00000004 /* Read */
#define CAM_CTRL_WRITE 0x00000008 /* Read */
#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */
#define CAM_CTRL_INDEX_SHIFT 16
#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */
#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */
#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */
#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */
#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */
#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */
#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */
#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */
#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */
#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */
#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */
#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */
#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */
#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */
#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */
#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */
#define B44_TX_O 0x0508UL /* MIB TX Octets */
#define B44_TX_P 0x050CUL /* MIB TX Packets */
#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */
#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */
#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */
#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */
#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */
#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */
#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */
#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */
#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */
#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */
#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */
#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */
#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */
#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */
#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */
#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */
#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */
#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */
#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */
#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */
#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */
#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */
#define B44_RX_O 0x0588UL /* MIB RX Octets */
#define B44_RX_P 0x058CUL /* MIB RX Packets */
#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */
#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */
#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */
#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */
#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */
#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */
#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */
#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */
#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */
#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */
#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */
#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */
#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */
#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */
#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */
#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */
#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */
#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
/* 4400 PHY registers */
#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */
#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */
#define B44_MII_ALEDCTRL 26 /* Activity LED */
#define MII_ALEDCTRL_ALLMSK 0x7fff
#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */
#define MII_TLEDCTRL_ENABLE 0x0040
struct dma_desc {
__le32 ctrl;
__le32 addr;
};
/* There are only 12 bits in the DMA engine for descriptor offsetting
* so the table must be aligned on a boundary of this.
*/
#define DMA_TABLE_BYTES 4096
#define DESC_CTRL_LEN 0x00001fff
#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */
#define DESC_CTRL_EOT 0x10000000 /* End of Table */
#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */
#define DESC_CTRL_EOF 0x40000000 /* End of Frame */
#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */
#define RX_COPY_THRESHOLD 256
struct rx_header {
__le16 len;
__le16 flags;
__le16 pad[12];
};
#define RX_HEADER_LEN 28
#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */
#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */
#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */
#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */
#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */
#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */
#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */
#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */
#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */
#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
#define B44_MCAST_TABLE_SIZE 32
#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
_B44(tx_good_octets) \
_B44(tx_good_pkts) \
_B44(tx_octets) \
_B44(tx_pkts) \
_B44(tx_broadcast_pkts) \
_B44(tx_multicast_pkts) \
_B44(tx_len_64) \
_B44(tx_len_65_to_127) \
_B44(tx_len_128_to_255) \
_B44(tx_len_256_to_511) \
_B44(tx_len_512_to_1023) \
_B44(tx_len_1024_to_max) \
_B44(tx_jabber_pkts) \
_B44(tx_oversize_pkts) \
_B44(tx_fragment_pkts) \
_B44(tx_underruns) \
_B44(tx_total_cols) \
_B44(tx_single_cols) \
_B44(tx_multiple_cols) \
_B44(tx_excessive_cols) \
_B44(tx_late_cols) \
_B44(tx_defered) \
_B44(tx_carrier_lost) \
_B44(tx_pause_pkts) \
_B44(rx_good_octets) \
_B44(rx_good_pkts) \
_B44(rx_octets) \
_B44(rx_pkts) \
_B44(rx_broadcast_pkts) \
_B44(rx_multicast_pkts) \
_B44(rx_len_64) \
_B44(rx_len_65_to_127) \
_B44(rx_len_128_to_255) \
_B44(rx_len_256_to_511) \
_B44(rx_len_512_to_1023) \
_B44(rx_len_1024_to_max) \
_B44(rx_jabber_pkts) \
_B44(rx_oversize_pkts) \
_B44(rx_fragment_pkts) \
_B44(rx_missed_pkts) \
_B44(rx_crc_align_errs) \
_B44(rx_undersize) \
_B44(rx_crc_errs) \
_B44(rx_align_errs) \
_B44(rx_symbol_errs) \
_B44(rx_pause_pkts) \
_B44(rx_nonpause_pkts)
/* SW copy of device statistics, kept up to date by periodic timer
* which probes HW values. Check b44_stats_update if you mess with
* the layout
*/
struct b44_hw_stats {
#define _B44(x) u64 x;
B44_STAT_REG_DECLARE
#undef _B44
struct u64_stats_sync syncp;
};
#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */
#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */
struct ssb_device;
struct b44 {
spinlock_t lock;
u32 imask, istat;
struct dma_desc *rx_ring, *tx_ring;
u32 tx_prod, tx_cons;
u32 rx_prod, rx_cons;
struct ring_info *rx_buffers;
struct ring_info *tx_buffers;
struct napi_struct napi;
u32 dma_offset;
u32 flags;
#define B44_FLAG_B0_ANDLATER 0x00000001
#define B44_FLAG_BUGGY_TXPTR 0x00000002
#define B44_FLAG_REORDER_BUG 0x00000004
#define B44_FLAG_PAUSE_AUTO 0x00008000
#define B44_FLAG_FULL_DUPLEX 0x00010000
#define B44_FLAG_100_BASE_T 0x00020000
#define B44_FLAG_TX_PAUSE 0x00040000
#define B44_FLAG_RX_PAUSE 0x00080000
#define B44_FLAG_FORCE_LINK 0x00100000
#define B44_FLAG_ADV_10HALF 0x01000000
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
#define B44_FLAG_EXTERNAL_PHY 0x10000000
#define B44_FLAG_RX_RING_HACK 0x20000000
#define B44_FLAG_TX_RING_HACK 0x40000000
#define B44_FLAG_WOL_ENABLE 0x80000000
u32 msg_enable;
struct timer_list timer;
struct b44_hw_stats hw_stats;
struct ssb_device *sdev;
struct net_device *dev;
dma_addr_t rx_ring_dma, tx_ring_dma;
u32 rx_pending;
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
struct phy_device *phydev;
struct mii_bus *mii_bus;
int old_link;
struct mii_if_info mii_if;
};
#endif /* _B44_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,360 @@
#ifndef BCM63XX_ENET_H_
#define BCM63XX_ENET_H_
#include <linux/types.h>
#include <linux/mii.h>
#include <linux/mutex.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_io.h>
#include <bcm63xx_iudma.h>
/* default number of descriptor */
#define BCMENET_DEF_RX_DESC 64
#define BCMENET_DEF_TX_DESC 32
/* maximum burst len for dma (4 bytes unit) */
#define BCMENET_DMA_MAXBURST 16
#define BCMENETSW_DMA_MAXBURST 8
/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
* must be low enough so that a DMA transfer of above burst length can
* not overflow the fifo */
#define BCMENET_TX_FIFO_TRESH 32
/*
* hardware maximum rx/tx packet size including FCS, max mtu is
* actually 2047, but if we set max rx size register to 2047 we won't
* get overflow information if packet size is 2048 or above
*/
#define BCMENET_MAX_MTU 2046
/*
* MIB Counters register definitions
*/
#define ETH_MIB_TX_GD_OCTETS 0
#define ETH_MIB_TX_GD_PKTS 1
#define ETH_MIB_TX_ALL_OCTETS 2
#define ETH_MIB_TX_ALL_PKTS 3
#define ETH_MIB_TX_BRDCAST 4
#define ETH_MIB_TX_MULT 5
#define ETH_MIB_TX_64 6
#define ETH_MIB_TX_65_127 7
#define ETH_MIB_TX_128_255 8
#define ETH_MIB_TX_256_511 9
#define ETH_MIB_TX_512_1023 10
#define ETH_MIB_TX_1024_MAX 11
#define ETH_MIB_TX_JAB 12
#define ETH_MIB_TX_OVR 13
#define ETH_MIB_TX_FRAG 14
#define ETH_MIB_TX_UNDERRUN 15
#define ETH_MIB_TX_COL 16
#define ETH_MIB_TX_1_COL 17
#define ETH_MIB_TX_M_COL 18
#define ETH_MIB_TX_EX_COL 19
#define ETH_MIB_TX_LATE 20
#define ETH_MIB_TX_DEF 21
#define ETH_MIB_TX_CRS 22
#define ETH_MIB_TX_PAUSE 23
#define ETH_MIB_RX_GD_OCTETS 32
#define ETH_MIB_RX_GD_PKTS 33
#define ETH_MIB_RX_ALL_OCTETS 34
#define ETH_MIB_RX_ALL_PKTS 35
#define ETH_MIB_RX_BRDCAST 36
#define ETH_MIB_RX_MULT 37
#define ETH_MIB_RX_64 38
#define ETH_MIB_RX_65_127 39
#define ETH_MIB_RX_128_255 40
#define ETH_MIB_RX_256_511 41
#define ETH_MIB_RX_512_1023 42
#define ETH_MIB_RX_1024_MAX 43
#define ETH_MIB_RX_JAB 44
#define ETH_MIB_RX_OVR 45
#define ETH_MIB_RX_FRAG 46
#define ETH_MIB_RX_DROP 47
#define ETH_MIB_RX_CRC_ALIGN 48
#define ETH_MIB_RX_UND 49
#define ETH_MIB_RX_CRC 50
#define ETH_MIB_RX_ALIGN 51
#define ETH_MIB_RX_SYM 52
#define ETH_MIB_RX_PAUSE 53
#define ETH_MIB_RX_CNTRL 54
/*
* SW MIB Counters register definitions
*/
#define ETHSW_MIB_TX_ALL_OCT 0
#define ETHSW_MIB_TX_DROP_PKTS 2
#define ETHSW_MIB_TX_QOS_PKTS 3
#define ETHSW_MIB_TX_BRDCAST 4
#define ETHSW_MIB_TX_MULT 5
#define ETHSW_MIB_TX_UNI 6
#define ETHSW_MIB_TX_COL 7
#define ETHSW_MIB_TX_1_COL 8
#define ETHSW_MIB_TX_M_COL 9
#define ETHSW_MIB_TX_DEF 10
#define ETHSW_MIB_TX_LATE 11
#define ETHSW_MIB_TX_EX_COL 12
#define ETHSW_MIB_TX_PAUSE 14
#define ETHSW_MIB_TX_QOS_OCT 15
#define ETHSW_MIB_RX_ALL_OCT 17
#define ETHSW_MIB_RX_UND 19
#define ETHSW_MIB_RX_PAUSE 20
#define ETHSW_MIB_RX_64 21
#define ETHSW_MIB_RX_65_127 22
#define ETHSW_MIB_RX_128_255 23
#define ETHSW_MIB_RX_256_511 24
#define ETHSW_MIB_RX_512_1023 25
#define ETHSW_MIB_RX_1024_1522 26
#define ETHSW_MIB_RX_OVR 27
#define ETHSW_MIB_RX_JAB 28
#define ETHSW_MIB_RX_ALIGN 29
#define ETHSW_MIB_RX_CRC 30
#define ETHSW_MIB_RX_GD_OCT 31
#define ETHSW_MIB_RX_DROP 33
#define ETHSW_MIB_RX_UNI 34
#define ETHSW_MIB_RX_MULT 35
#define ETHSW_MIB_RX_BRDCAST 36
#define ETHSW_MIB_RX_SA_CHANGE 37
#define ETHSW_MIB_RX_FRAG 38
#define ETHSW_MIB_RX_OVR_DISC 39
#define ETHSW_MIB_RX_SYM 40
#define ETHSW_MIB_RX_QOS_PKTS 41
#define ETHSW_MIB_RX_QOS_OCT 42
#define ETHSW_MIB_RX_1523_2047 44
#define ETHSW_MIB_RX_2048_4095 45
#define ETHSW_MIB_RX_4096_8191 46
#define ETHSW_MIB_RX_8192_9728 47
struct bcm_enet_mib_counters {
u64 tx_gd_octets;
u32 tx_gd_pkts;
u32 tx_all_octets;
u32 tx_all_pkts;
u32 tx_unicast;
u32 tx_brdcast;
u32 tx_mult;
u32 tx_64;
u32 tx_65_127;
u32 tx_128_255;
u32 tx_256_511;
u32 tx_512_1023;
u32 tx_1024_max;
u32 tx_1523_2047;
u32 tx_2048_4095;
u32 tx_4096_8191;
u32 tx_8192_9728;
u32 tx_jab;
u32 tx_drop;
u32 tx_ovr;
u32 tx_frag;
u32 tx_underrun;
u32 tx_col;
u32 tx_1_col;
u32 tx_m_col;
u32 tx_ex_col;
u32 tx_late;
u32 tx_def;
u32 tx_crs;
u32 tx_pause;
u64 rx_gd_octets;
u32 rx_gd_pkts;
u32 rx_all_octets;
u32 rx_all_pkts;
u32 rx_brdcast;
u32 rx_unicast;
u32 rx_mult;
u32 rx_64;
u32 rx_65_127;
u32 rx_128_255;
u32 rx_256_511;
u32 rx_512_1023;
u32 rx_1024_max;
u32 rx_jab;
u32 rx_ovr;
u32 rx_frag;
u32 rx_drop;
u32 rx_crc_align;
u32 rx_und;
u32 rx_crc;
u32 rx_align;
u32 rx_sym;
u32 rx_pause;
u32 rx_cntrl;
};
struct bcm_enet_priv {
/* mac id (from platform device id) */
int mac_id;
/* base remapped address of device */
void __iomem *base;
/* mac irq, rx_dma irq, tx_dma irq */
int irq;
int irq_rx;
int irq_tx;
/* hw view of rx & tx dma ring */
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
/* allocated size (in bytes) for rx & tx dma ring */
unsigned int rx_desc_alloc_size;
unsigned int tx_desc_alloc_size;
struct napi_struct napi;
/* dma channel id for rx */
int rx_chan;
/* number of dma desc in rx ring */
int rx_ring_size;
/* cpu view of rx dma ring */
struct bcm_enet_desc *rx_desc_cpu;
/* current number of armed descriptor given to hardware for rx */
int rx_desc_count;
/* next rx descriptor to fetch from hardware */
int rx_curr_desc;
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
/* size of allocated rx skbs */
unsigned int rx_skb_size;
/* list of skb given to hw for rx */
struct sk_buff **rx_skb;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
struct timer_list rx_timeout;
/* lock rx_timeout against rx normal operation */
spinlock_t rx_lock;
/* dma channel id for tx */
int tx_chan;
/* number of dma desc in tx ring */
int tx_ring_size;
/* maximum dma burst size */
int dma_maxburst;
/* cpu view of rx dma ring */
struct bcm_enet_desc *tx_desc_cpu;
/* number of available descriptor for tx */
int tx_desc_count;
/* next tx descriptor avaiable */
int tx_curr_desc;
/* next dirty tx descriptor to reclaim */
int tx_dirty_desc;
/* list of skb given to hw for tx */
struct sk_buff **tx_skb;
/* lock used by tx reclaim and xmit */
spinlock_t tx_lock;
/* set if internal phy is ignored and external mii interface
* is selected */
int use_external_mii;
/* set if a phy is connected, phy address must be known,
* probing is not possible */
int has_phy;
int phy_id;
/* set if connected phy has an associated irq */
int has_phy_interrupt;
int phy_interrupt;
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
/* used when no phy is connected */
int force_speed_100;
int force_duplex_full;
/* pause parameters */
int pause_auto;
int pause_rx;
int pause_tx;
/* stats */
struct bcm_enet_mib_counters mib;
/* after mib interrupt, mib registers update is done in this
* work queue */
struct work_struct mib_update_task;
/* lock mib update between userspace request and workqueue */
struct mutex mib_update_lock;
/* mac clock */
struct clk *mac_clk;
/* phy clock if internal phy is used */
struct clk *phy_clk;
/* network device reference */
struct net_device *net_dev;
/* platform device reference */
struct platform_device *pdev;
/* maximum hardware transmit/receive size */
unsigned int hw_mtu;
bool enet_is_sw;
/* port mapping for switch devices */
int num_ports;
struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
int sw_port_link[ENETSW_MAX_PORT];
/* used to poll switch port state */
struct timer_list swphy_poll;
spinlock_t enetsw_mdio_lock;
/* dma channel enable mask */
u32 dma_chan_en_mask;
/* dma channel interrupt mask */
u32 dma_chan_int_mask;
/* DMA engine has internal SRAM */
bool dma_has_sram;
/* dma channel width */
unsigned int dma_chan_width;
/* dma descriptor shift value */
unsigned int dma_desc_shift;
};
#endif /* ! BCM63XX_ENET_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,690 @@
/*
* Broadcom BCM7xxx System Port Ethernet MAC driver
*
* Copyright (C) 2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __BCM_SYSPORT_H
#define __BCM_SYSPORT_H
#include <linux/if_vlan.h>
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
#define DESC_ADDR_HI_MASK 0xff
#define DESC_STATUS_SHIFT 8
#define DESC_STATUS_MASK 0x3ff
#define DESC_LEN_SHIFT 18
#define DESC_LEN_MASK 0x7fff
#define DESC_ADDR_LO 0x04
/* HW supports 40-bit addressing hence the */
#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
/* Default RX buffer allocation size */
#define RX_BUF_LENGTH 2048
/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
* 1536 is multiple of 256 bytes
*/
#define ENET_BRCM_TAG_LEN 4
#define ENET_PAD 10
#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
/* Transmit status block */
struct bcm_tsb {
u32 pcp_dei_vid;
#define PCP_DEI_MASK 0xf
#define VID_SHIFT 4
#define VID_MASK 0xfff
u32 l4_ptr_dest_map;
#define L4_CSUM_PTR_MASK 0x1ff
#define L4_PTR_SHIFT 9
#define L4_PTR_MASK 0x1ff
#define L4_UDP (1 << 18)
#define L4_LENGTH_VALID (1 << 19)
#define DEST_MAP_SHIFT 20
#define DEST_MAP_MASK 0x1ff
};
/* Receive status block uses the same
* definitions as the DMA descriptor
*/
struct bcm_rsb {
u32 rx_status_len;
u32 brcm_egress_tag;
};
/* Common Receive/Transmit status bits */
#define DESC_L4_CSUM (1 << 7)
#define DESC_SOP (1 << 8)
#define DESC_EOP (1 << 9)
/* Receive Status bits */
#define RX_STATUS_UCAST 0
#define RX_STATUS_BCAST 0x04
#define RX_STATUS_MCAST 0x08
#define RX_STATUS_L2_MCAST 0x0c
#define RX_STATUS_ERR (1 << 4)
#define RX_STATUS_OVFLOW (1 << 5)
#define RX_STATUS_PARSE_FAIL (1 << 6)
/* Transmit Status bits */
#define TX_STATUS_VLAN_NO_ACT 0x00
#define TX_STATUS_VLAN_PCP_TSB 0x01
#define TX_STATUS_VLAN_QUEUE 0x02
#define TX_STATUS_VLAN_VID_TSB 0x03
#define TX_STATUS_OWR_CRC (1 << 2)
#define TX_STATUS_APP_CRC (1 << 3)
#define TX_STATUS_BRCM_TAG_NO_ACT 0
#define TX_STATUS_BRCM_TAG_ZERO 0x10
#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
#define TX_STATUS_SKIP_BYTES (1 << 6)
/* Specific register definitions */
#define SYS_PORT_TOPCTRL_OFFSET 0
#define REV_CNTL 0x00
#define REV_MASK 0xffff
#define RX_FLUSH_CNTL 0x04
#define RX_FLUSH (1 << 0)
#define TX_FLUSH_CNTL 0x08
#define TX_FLUSH (1 << 0)
#define MISC_CNTL 0x0c
#define SYS_CLK_SEL (1 << 0)
#define TDMA_EOP_SEL (1 << 1)
/* Level-2 Interrupt controller offsets and defines */
#define SYS_PORT_INTRL2_0_OFFSET 0x200
#define SYS_PORT_INTRL2_1_OFFSET 0x240
#define INTRL2_CPU_STATUS 0x00
#define INTRL2_CPU_SET 0x04
#define INTRL2_CPU_CLEAR 0x08
#define INTRL2_CPU_MASK_STATUS 0x0c
#define INTRL2_CPU_MASK_SET 0x10
#define INTRL2_CPU_MASK_CLEAR 0x14
/* Level-2 instance 0 interrupt bits */
#define INTRL2_0_GISB_ERR (1 << 0)
#define INTRL2_0_RBUF_OVFLOW (1 << 1)
#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
#define INTRL2_0_MPD (1 << 3)
#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
#define INTRL2_0_RDMA_MBDONE (1 << 5)
#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
#define INTRL2_0_TX_RING_FULL (1 << 9)
#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
/* RXCHK offset and defines */
#define SYS_PORT_RXCHK_OFFSET 0x300
#define RXCHK_CONTROL 0x00
#define RXCHK_EN (1 << 0)
#define RXCHK_SKIP_FCS (1 << 1)
#define RXCHK_BAD_CSUM_DIS (1 << 2)
#define RXCHK_BRCM_TAG_EN (1 << 3)
#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
#define RXCHK_PARSE_TNL (1 << 12)
#define RXCHK_VIOL_EN (1 << 13)
#define RXCHK_VIOL_DIS (1 << 14)
#define RXCHK_INCOM_PKT (1 << 15)
#define RXCHK_V6_DUPEXT_EN (1 << 16)
#define RXCHK_V6_DUPEXT_DIS (1 << 17)
#define RXCHK_ETHERTYPE_DIS (1 << 18)
#define RXCHK_L2_HDR_DIS (1 << 19)
#define RXCHK_L3_HDR_DIS (1 << 20)
#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
#define RXCHK_PARSE_AUTH (1 << 22)
#define RXCHK_BRCM_TAG0 0x04
#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
#define RXCHK_BRCM_TAG0_MASK 0x24
#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
#define RXCHK_ETHERTYPE 0x48
#define RXCHK_BAD_CSUM_CNTR 0x4C
#define RXCHK_OTHER_DISC_CNTR 0x50
/* TXCHCK offsets and defines */
#define SYS_PORT_TXCHK_OFFSET 0x380
#define TXCHK_PKT_RDY_THRESH 0x00
/* Receive buffer offset and defines */
#define SYS_PORT_RBUF_OFFSET 0x400
#define RBUF_CONTROL 0x00
#define RBUF_RSB_EN (1 << 0)
#define RBUF_4B_ALGN (1 << 1)
#define RBUF_BRCM_TAG_STRIP (1 << 2)
#define RBUF_BAD_PKT_DISC (1 << 3)
#define RBUF_RESUME_THRESH_SHIFT 4
#define RBUF_RESUME_THRESH_MASK 0xff
#define RBUF_OK_TO_SEND_SHIFT 12
#define RBUF_OK_TO_SEND_MASK 0xff
#define RBUF_CRC_REPLACE (1 << 20)
#define RBUF_OK_TO_SEND_MODE (1 << 21)
#define RBUF_RSB_SWAP (1 << 22)
#define RBUF_ACPI_EN (1 << 23)
#define RBUF_PKT_RDY_THRESH 0x04
#define RBUF_STATUS 0x08
#define RBUF_WOL_MODE (1 << 0)
#define RBUF_MPD (1 << 1)
#define RBUF_ACPI (1 << 2)
#define RBUF_OVFL_DISC_CNTR 0x0c
#define RBUF_ERR_PKT_CNTR 0x10
/* Transmit buffer offset and defines */
#define SYS_PORT_TBUF_OFFSET 0x600
#define TBUF_CONTROL 0x00
#define TBUF_BP_EN (1 << 0)
#define TBUF_MAX_PKT_THRESH_SHIFT 1
#define TBUF_MAX_PKT_THRESH_MASK 0x1f
#define TBUF_FULL_THRESH_SHIFT 8
#define TBUF_FULL_THRESH_MASK 0x1f
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
#define UMAC_CMD 0x008
#define CMD_TX_EN (1 << 0)
#define CMD_RX_EN (1 << 1)
#define CMD_SPEED_SHIFT 2
#define CMD_SPEED_10 0
#define CMD_SPEED_100 1
#define CMD_SPEED_1000 2
#define CMD_SPEED_2500 3
#define CMD_SPEED_MASK 3
#define CMD_PROMISC (1 << 4)
#define CMD_PAD_EN (1 << 5)
#define CMD_CRC_FWD (1 << 6)
#define CMD_PAUSE_FWD (1 << 7)
#define CMD_RX_PAUSE_IGNORE (1 << 8)
#define CMD_TX_ADDR_INS (1 << 9)
#define CMD_HD_EN (1 << 10)
#define CMD_SW_RESET (1 << 13)
#define CMD_LCL_LOOP_EN (1 << 15)
#define CMD_AUTO_CONFIG (1 << 22)
#define CMD_CNTL_FRM_EN (1 << 23)
#define CMD_NO_LEN_CHK (1 << 24)
#define CMD_RMT_LOOP_EN (1 << 25)
#define CMD_PRBL_EN (1 << 27)
#define CMD_TX_PAUSE_IGNORE (1 << 28)
#define CMD_TX_RX_EN (1 << 29)
#define CMD_RUNT_FILTER_DIS (1 << 30)
#define UMAC_MAC0 0x00c
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
#define UMAC_TX_FLUSH 0x334
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
*/
#define UMAC_MIB_STAT_OFFSET 0xc
#define UMAC_MIB_CTRL 0x580
#define MIB_RX_CNT_RST (1 << 0)
#define MIB_RUNT_CNT_RST (1 << 1)
#define MIB_TX_CNT_RST (1 << 2)
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
#define MSEQ_LEN_SHIFT 16
#define MSEQ_LEN_MASK 0xff
#define PSW_EN (1 << 27)
#define UMAC_PSW_MS 0x624
#define UMAC_PSW_LS 0x628
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
/* Receive DMA offset and defines */
#define SYS_PORT_RDMA_OFFSET 0x2000
#define RDMA_CONTROL 0x1000
#define RDMA_EN (1 << 0)
#define RDMA_RING_CFG (1 << 1)
#define RDMA_DISC_EN (1 << 2)
#define RDMA_BUF_DATA_OFFSET_SHIFT 4
#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
#define RDMA_STATUS 0x1004
#define RDMA_DISABLED (1 << 0)
#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
#define RDMA_BP_STATUS (1 << 2)
#define RDMA_SCB_BURST_SIZE 0x1008
#define RDMA_RING_BUF_SIZE 0x100c
#define RDMA_RING_SIZE_SHIFT 16
#define RDMA_WRITE_PTR_HI 0x1010
#define RDMA_WRITE_PTR_LO 0x1014
#define RDMA_PROD_INDEX 0x1018
#define RDMA_PROD_INDEX_MASK 0xffff
#define RDMA_CONS_INDEX 0x101c
#define RDMA_CONS_INDEX_MASK 0xffff
#define RDMA_START_ADDR_HI 0x1020
#define RDMA_START_ADDR_LO 0x1024
#define RDMA_END_ADDR_HI 0x1028
#define RDMA_END_ADDR_LO 0x102c
#define RDMA_MBDONE_INTR 0x1030
#define RDMA_INTR_THRESH_MASK 0xff
#define RDMA_TIMEOUT_SHIFT 16
#define RDMA_TIMEOUT_MASK 0xffff
#define RDMA_XON_XOFF_THRESH 0x1034
#define RDMA_XON_XOFF_THRESH_MASK 0xffff
#define RDMA_XOFF_THRESH_SHIFT 16
#define RDMA_READ_PTR_HI 0x1038
#define RDMA_READ_PTR_LO 0x103c
#define RDMA_OVERRIDE 0x1040
#define RDMA_LE_MODE (1 << 0)
#define RDMA_REG_MODE (1 << 1)
#define RDMA_TEST 0x1044
#define RDMA_TP_OUT_SEL (1 << 0)
#define RDMA_MEM_SEL (1 << 1)
#define RDMA_DEBUG 0x1048
/* Transmit DMA offset and defines */
#define TDMA_NUM_RINGS 32 /* rings = queues */
#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
#define SYS_PORT_TDMA_OFFSET 0x4000
#define TDMA_WRITE_PORT_OFFSET 0x0000
#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
(i) * TDMA_PORT_SIZE)
#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
sizeof(u32) + (i) * TDMA_PORT_SIZE)
#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
(TDMA_NUM_RINGS * TDMA_PORT_SIZE))
#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
(i) * TDMA_PORT_SIZE)
#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
sizeof(u32) + (i) * TDMA_PORT_SIZE)
#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
(TDMA_NUM_RINGS * TDMA_PORT_SIZE))
#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
(i) * sizeof(u32))
#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
(TDMA_NUM_RINGS * sizeof(u32)))
/* Register offsets and defines relatives to a specific ring number */
#define RING_HEAD_TAIL_PTR 0x00
#define RING_HEAD_MASK 0x7ff
#define RING_TAIL_SHIFT 11
#define RING_TAIL_MASK 0x7ff
#define RING_FLUSH (1 << 24)
#define RING_EN (1 << 25)
#define RING_COUNT 0x04
#define RING_COUNT_MASK 0x7ff
#define RING_BUFF_DONE_SHIFT 11
#define RING_BUFF_DONE_MASK 0x7ff
#define RING_MAX_HYST 0x08
#define RING_MAX_THRESH_MASK 0x7ff
#define RING_HYST_THRESH_SHIFT 11
#define RING_HYST_THRESH_MASK 0x7ff
#define RING_INTR_CONTROL 0x0c
#define RING_INTR_THRESH_MASK 0x7ff
#define RING_EMPTY_INTR_EN (1 << 15)
#define RING_TIMEOUT_SHIFT 16
#define RING_TIMEOUT_MASK 0xffff
#define RING_PROD_CONS_INDEX 0x10
#define RING_PROD_INDEX_MASK 0xffff
#define RING_CONS_INDEX_SHIFT 16
#define RING_CONS_INDEX_MASK 0xffff
#define RING_MAPPING 0x14
#define RING_QID_MASK 0x3
#define RING_PORT_ID_SHIFT 3
#define RING_PORT_ID_MASK 0x7
#define RING_IGNORE_STATUS (1 << 6)
#define RING_FAILOVER_EN (1 << 7)
#define RING_CREDIT_SHIFT 8
#define RING_CREDIT_MASK 0xffff
#define RING_PCP_DEI_VID 0x18
#define RING_VID_MASK 0x7ff
#define RING_DEI (1 << 12)
#define RING_PCP_SHIFT 13
#define RING_PCP_MASK 0x7
#define RING_PKT_SIZE_ADJ_SHIFT 16
#define RING_PKT_SIZE_ADJ_MASK 0xf
#define TDMA_DESC_RING_SIZE 28
/* Defininition for a given TX ring base address */
#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
((i) * TDMA_DESC_RING_SIZE))
/* Ring indexed register addreses */
#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
RING_HEAD_TAIL_PTR)
#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
RING_COUNT)
#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
RING_MAX_HYST)
#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
RING_INTR_CONTROL)
#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
(TDMA_DESC_RING_BASE(i) + \
RING_PROD_CONS_INDEX)
#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
RING_MAPPING)
#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
RING_PCP_DEI_VID)
#define TDMA_CONTROL 0x600
#define TDMA_EN (1 << 0)
#define TSB_EN (1 << 1)
#define TSB_SWAP (1 << 2)
#define ACB_ALGO (1 << 3)
#define BUF_DATA_OFFSET_SHIFT 4
#define BUF_DATA_OFFSET_MASK 0x3ff
#define VLAN_EN (1 << 14)
#define SW_BRCM_TAG (1 << 15)
#define WNC_KPT_SIZE_UPDATE (1 << 16)
#define SYNC_PKT_SIZE (1 << 17)
#define ACH_TXDONE_DELAY_SHIFT 18
#define ACH_TXDONE_DELAY_MASK 0xff
#define TDMA_STATUS 0x604
#define TDMA_DISABLED (1 << 0)
#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
#define TDMA_SCB_BURST_SIZE 0x608
#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
#define TDMA_OVER_HYST_THRESH_STATUS 0x610
#define TDMA_TPID 0x614
#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
#define TDMA_FREE_HEAD_MASK 0x7ff
#define TDMA_FREE_TAIL_SHIFT 11
#define TDMA_FREE_TAIL_MASK 0x7ff
#define TDMA_FREE_LIST_COUNT 0x61c
#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
#define TDMA_TIER2_ARB_CTRL 0x620
#define TDMA_ARB_MODE_RR 0
#define TDMA_ARB_MODE_WEIGHT_RR 0x1
#define TDMA_ARB_MODE_STRICT 0x2
#define TDMA_ARB_MODE_DEFICIT_RR 0x3
#define TDMA_CREDIT_SHIFT 4
#define TDMA_CREDIT_MASK 0xffff
#define TDMA_TIER1_ARB_0_CTRL 0x624
#define TDMA_ARB_EN (1 << 0)
#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
#define TDMA_TIER1_ARB_1_CTRL 0x62c
#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
#define TDMA_TIER1_ARB_2_CTRL 0x634
#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
#define TDMA_TIER1_ARB_3_CTRL 0x63c
#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
#define TDMA_LE_MODE (1 << 0)
#define TDMA_REG_MODE (1 << 1)
#define TDMA_TEST 0x648
#define TDMA_TP_OUT_SEL (1 << 0)
#define TDMA_MEM_TM (1 << 1)
#define TDMA_DEBUG 0x64c
/* Transmit/Receive descriptor */
struct dma_desc {
u32 addr_status_len;
u32 addr_lo;
};
/* Number of Receive hardware descriptor words */
#define NUM_HW_RX_DESC_WORDS 1024
/* Real number of usable descriptors */
#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
/* Internal linked-list RAM has up to 1536 entries */
#define NUM_TX_DESC 1536
#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
/* Rx/Tx common counter group.*/
struct bcm_sysport_pkt_counters {
u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
};
/* RSV, Receive Status Vector */
struct bcm_sysport_rx_counters {
struct bcm_sysport_pkt_counters pkt_cnt;
u32 pkt; /* RO (0x428) Received pkt count*/
u32 bytes; /* RO Received byte count */
u32 mca; /* RO # of Received multicast pkt */
u32 bca; /* RO # of Receive broadcast pkt */
u32 fcs; /* RO # of Received FCS error */
u32 cf; /* RO # of Received control frame pkt*/
u32 pf; /* RO # of Received pause frame pkt */
u32 uo; /* RO # of unknown op code pkt */
u32 aln; /* RO # of alignment error count */
u32 flr; /* RO # of frame length out of range count */
u32 cde; /* RO # of code error pkt */
u32 fcr; /* RO # of carrier sense error pkt */
u32 ovr; /* RO # of oversize pkt*/
u32 jbr; /* RO # of jabber count */
u32 mtue; /* RO # of MTU error pkt*/
u32 pok; /* RO # of Received good pkt */
u32 uc; /* RO # of unicast pkt */
u32 ppp; /* RO # of PPP pkt */
u32 rcrc; /* RO (0x470),# of CRC match pkt */
};
/* TSV, Transmit Status Vector */
struct bcm_sysport_tx_counters {
struct bcm_sysport_pkt_counters pkt_cnt;
u32 pkts; /* RO (0x4a8) Transmited pkt */
u32 mca; /* RO # of xmited multicast pkt */
u32 bca; /* RO # of xmited broadcast pkt */
u32 pf; /* RO # of xmited pause frame count */
u32 cf; /* RO # of xmited control frame count */
u32 fcs; /* RO # of xmited FCS error count */
u32 ovr; /* RO # of xmited oversize pkt */
u32 drf; /* RO # of xmited deferral pkt */
u32 edf; /* RO # of xmited Excessive deferral pkt*/
u32 scl; /* RO # of xmited single collision pkt */
u32 mcl; /* RO # of xmited multiple collision pkt*/
u32 lcl; /* RO # of xmited late collision pkt */
u32 ecl; /* RO # of xmited excessive collision pkt*/
u32 frg; /* RO # of xmited fragments pkt*/
u32 ncl; /* RO # of xmited total collision count */
u32 jbr; /* RO # of xmited jabber count*/
u32 bytes; /* RO # of xmited byte count */
u32 pok; /* RO # of xmited good pkt */
u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
};
struct bcm_sysport_mib {
struct bcm_sysport_rx_counters rx;
struct bcm_sysport_tx_counters tx;
u32 rx_runt_cnt;
u32 rx_runt_fcs;
u32 rx_runt_fcs_align;
u32 rx_runt_bytes;
u32 rxchk_bad_csum;
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
};
/* HW maintains a large list of counters */
enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_NETDEV = -1,
BCM_SYSPORT_STAT_MIB_RX,
BCM_SYSPORT_STAT_MIB_TX,
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
};
/* Macros to help define ethtool statistics */
#define STAT_NETDEV(m) { \
.stat_string = __stringify(m), \
.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
.stat_offset = offsetof(struct net_device_stats, m), \
.type = BCM_SYSPORT_STAT_NETDEV, \
}
#define STAT_MIB(str, m, _type) { \
.stat_string = str, \
.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
.stat_offset = offsetof(struct bcm_sysport_priv, m), \
.type = _type, \
}
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
#define STAT_RXCHK(str, m, ofs) { \
.stat_string = str, \
.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
.stat_offset = offsetof(struct bcm_sysport_priv, m), \
.type = BCM_SYSPORT_STAT_RXCHK, \
.reg_offset = ofs, \
}
#define STAT_RBUF(str, m, ofs) { \
.stat_string = str, \
.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
.stat_offset = offsetof(struct bcm_sysport_priv, m), \
.type = BCM_SYSPORT_STAT_RBUF, \
.reg_offset = ofs, \
}
struct bcm_sysport_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_sizeof;
int stat_offset;
enum bcm_sysport_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
};
/* Software house keeping helper structure */
struct bcm_sysport_cb {
struct sk_buff *skb; /* SKB for RX packets */
void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(dma_len);
};
/* Software view of the TX ring */
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
struct napi_struct napi; /* NAPI per tx queue */
dma_addr_t desc_dma; /* DMA cookie */
unsigned int index; /* Ring index */
unsigned int size; /* Ring current size */
unsigned int alloc_size; /* Ring one-time allocated size */
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
unsigned int p_index; /* Current producer index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
};
/* Driver private structure */
struct bcm_sysport_priv {
void __iomem *base;
u32 irq0_stat;
u32 irq0_mask;
u32 irq1_stat;
u32 irq1_mask;
struct napi_struct napi ____cacheline_aligned;
struct net_device *netdev;
struct platform_device *pdev;
int irq0;
int irq1;
int wol_irq;
/* Transmit rings */
struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
/* Receive queue */
void __iomem *rx_bds;
void __iomem *rx_bd_assign_ptr;
unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_read_ptr;
unsigned int rx_c_index;
/* PHY device */
struct device_node *phy_dn;
struct phy_device *phydev;
phy_interface_t phy_interface;
int old_pause;
int old_link;
int old_duplex;
/* Misc fields */
unsigned int rx_chk_en:1;
unsigned int tsb_en:1;
unsigned int crc_fwd:1;
u16 rev;
u32 wolopts;
unsigned int wol_irq_disabled:1;
/* MIB related fields */
struct bcm_sysport_mib mib;
/* Ethtool */
u32 msg_enable;
};
#endif /* __BCM_SYSPORT_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,488 @@
#ifndef _BGMAC_H
#define _BGMAC_H
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define bgmac_err(bgmac, fmt, ...) \
dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#define bgmac_warn(bgmac, fmt, ...) \
dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#define bgmac_info(bgmac, fmt, ...) \
dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#define bgmac_dbg(bgmac, fmt, ...) \
dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#include <linux/bcma/bcma.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
#define BGMAC_DC_RLSS 0x00000008
#define BGMAC_DC_MROR 0x00000010
#define BGMAC_DC_FCM_MASK 0x00000060
#define BGMAC_DC_FCM_SHIFT 5
#define BGMAC_DC_NAE 0x00000080
#define BGMAC_DC_TF 0x00000100
#define BGMAC_DC_RDS_MASK 0x00030000
#define BGMAC_DC_RDS_SHIFT 16
#define BGMAC_DC_TDS_MASK 0x000c0000
#define BGMAC_DC_TDS_SHIFT 18
#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */
#define BGMAC_DS_RBF 0x00000001
#define BGMAC_DS_RDF 0x00000002
#define BGMAC_DS_RIF 0x00000004
#define BGMAC_DS_TBF 0x00000008
#define BGMAC_DS_TDF 0x00000010
#define BGMAC_DS_TIF 0x00000020
#define BGMAC_DS_PO 0x00000040
#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */
#define BGMAC_DS_MM_SHIFT 8
#define BGMAC_BIST_STATUS 0x00c
#define BGMAC_INT_STATUS 0x020 /* Interrupt status */
#define BGMAC_IS_MRO 0x00000001
#define BGMAC_IS_MTO 0x00000002
#define BGMAC_IS_TFD 0x00000004
#define BGMAC_IS_LS 0x00000008
#define BGMAC_IS_MDIO 0x00000010
#define BGMAC_IS_MR 0x00000020
#define BGMAC_IS_MT 0x00000040
#define BGMAC_IS_TO 0x00000080
#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */
#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */
#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */
#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */
#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */
#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */
#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */
#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */
#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */
#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */
#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */
#define BGMAC_IS_TX_MASK 0x0f000000
#define BGMAC_IS_INTMASK 0x0f01fcff
#define BGMAC_IS_ERRMASK 0x0000fc00
#define BGMAC_INT_MASK 0x024 /* Interrupt mask */
#define BGMAC_GP_TIMER 0x028
#define BGMAC_INT_RECV_LAZY 0x100
#define BGMAC_IRL_TO_MASK 0x00ffffff
#define BGMAC_IRL_FC_MASK 0xff000000
#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */
#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */
#define BGMAC_WRRTHRESH 0x108
#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c
#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */
#define BGMAC_PA_DATA_MASK 0x0000ffff
#define BGMAC_PA_ADDR_MASK 0x001f0000
#define BGMAC_PA_ADDR_SHIFT 16
#define BGMAC_PA_REG_MASK 0x1f000000
#define BGMAC_PA_REG_SHIFT 24
#define BGMAC_PA_WRITE 0x20000000
#define BGMAC_PA_START 0x40000000
#define BGMAC_PHY_CNTL 0x188 /* PHY control address */
#define BGMAC_PC_EPA_MASK 0x0000001f
#define BGMAC_PC_MCT_MASK 0x007f0000
#define BGMAC_PC_MCT_SHIFT 16
#define BGMAC_PC_MTE 0x00800000
#define BGMAC_TXQ_CTL 0x18c
#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff
#define BGMAC_TXQ_CTL_DBT_SHIFT 0
#define BGMAC_RXQ_CTL 0x190
#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff
#define BGMAC_RXQ_CTL_DBT_SHIFT 0
#define BGMAC_RXQ_CTL_PTE 0x00001000
#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000
#define BGMAC_RXQ_CTL_MDP_SHIFT 24
#define BGMAC_GPIO_SELECT 0x194
#define BGMAC_GPIO_OUTPUT_EN 0x198
/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100
#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000
#define BGMAC_HW_WAR 0x1e4
#define BGMAC_PWR_CTL 0x1e8
#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */
#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */
#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */
#define BGMAC_TX_GOOD_OCTETS 0x300
#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304
#define BGMAC_TX_GOOD_PKTS 0x308
#define BGMAC_TX_OCTETS 0x30c
#define BGMAC_TX_OCTETS_HIGH 0x310
#define BGMAC_TX_PKTS 0x314
#define BGMAC_TX_BROADCAST_PKTS 0x318
#define BGMAC_TX_MULTICAST_PKTS 0x31c
#define BGMAC_TX_LEN_64 0x320
#define BGMAC_TX_LEN_65_TO_127 0x324
#define BGMAC_TX_LEN_128_TO_255 0x328
#define BGMAC_TX_LEN_256_TO_511 0x32c
#define BGMAC_TX_LEN_512_TO_1023 0x330
#define BGMAC_TX_LEN_1024_TO_1522 0x334
#define BGMAC_TX_LEN_1523_TO_2047 0x338
#define BGMAC_TX_LEN_2048_TO_4095 0x33c
#define BGMAC_TX_LEN_4095_TO_8191 0x340
#define BGMAC_TX_LEN_8192_TO_MAX 0x344
#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
#define BGMAC_TX_FRAGMENT_PKTS 0x350
#define BGMAC_TX_UNDERRUNS 0x354 /* Error */
#define BGMAC_TX_TOTAL_COLS 0x358
#define BGMAC_TX_SINGLE_COLS 0x35c
#define BGMAC_TX_MULTIPLE_COLS 0x360
#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */
#define BGMAC_TX_LATE_COLS 0x368 /* Error */
#define BGMAC_TX_DEFERED 0x36c
#define BGMAC_TX_CARRIER_LOST 0x370
#define BGMAC_TX_PAUSE_PKTS 0x374
#define BGMAC_TX_UNI_PKTS 0x378
#define BGMAC_TX_Q0_PKTS 0x37c
#define BGMAC_TX_Q0_OCTETS 0x380
#define BGMAC_TX_Q0_OCTETS_HIGH 0x384
#define BGMAC_TX_Q1_PKTS 0x388
#define BGMAC_TX_Q1_OCTETS 0x38c
#define BGMAC_TX_Q1_OCTETS_HIGH 0x390
#define BGMAC_TX_Q2_PKTS 0x394
#define BGMAC_TX_Q2_OCTETS 0x398
#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c
#define BGMAC_TX_Q3_PKTS 0x3a0
#define BGMAC_TX_Q3_OCTETS 0x3a4
#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8
#define BGMAC_RX_GOOD_OCTETS 0x3b0
#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4
#define BGMAC_RX_GOOD_PKTS 0x3b8
#define BGMAC_RX_OCTETS 0x3bc
#define BGMAC_RX_OCTETS_HIGH 0x3c0
#define BGMAC_RX_PKTS 0x3c4
#define BGMAC_RX_BROADCAST_PKTS 0x3c8
#define BGMAC_RX_MULTICAST_PKTS 0x3cc
#define BGMAC_RX_LEN_64 0x3d0
#define BGMAC_RX_LEN_65_TO_127 0x3d4
#define BGMAC_RX_LEN_128_TO_255 0x3d8
#define BGMAC_RX_LEN_256_TO_511 0x3dc
#define BGMAC_RX_LEN_512_TO_1023 0x3e0
#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
#define BGMAC_RX_FRAGMENT_PKTS 0x400
#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */
#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */
#define BGMAC_RX_UNDERSIZE 0x40c /* Error */
#define BGMAC_RX_CRC_ERRS 0x410 /* Error */
#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */
#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */
#define BGMAC_RX_PAUSE_PKTS 0x41c
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
#define BGMAC_UNIMAC_VERSION 0x800
#define BGMAC_HDBKP_CTL 0x804
#define BGMAC_CMDCFG 0x808 /* Configuration */
#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
#define BGMAC_CMDCFG_ES_10 0x00000000
#define BGMAC_CMDCFG_ES_100 0x00000004
#define BGMAC_CMDCFG_ES_1000 0x00000008
#define BGMAC_CMDCFG_ES_2500 0x0000000C
#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
#define BGMAC_CMDCFG_PAD_EN 0x00000020
#define BGMAC_CMDCFG_CF 0x00000040
#define BGMAC_CMDCFG_PF 0x00000080
#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
#define BGMAC_CMDCFG_NLC 0x01000000
#define BGMAC_CMDCFG_RL 0x02000000
#define BGMAC_CMDCFG_RED 0x04000000
#define BGMAC_CMDCFG_PE 0x08000000
#define BGMAC_CMDCFG_TPI 0x10000000
#define BGMAC_CMDCFG_AT 0x20000000
#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
#define BGMAC_PAUSEQUANTA 0x818
#define BGMAC_MAC_MODE 0x844
#define BGMAC_OUTERTAG 0x848
#define BGMAC_INNERTAG 0x84c
#define BGMAC_TXIPG 0x85c
#define BGMAC_PAUSE_CTL 0xb30
#define BGMAC_TX_FLUSH 0xb34
#define BGMAC_RX_STATUS 0xb38
#define BGMAC_TX_STATUS 0xb3c
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
#define BGMAC_NUM_MIB_TX_REGS \
(((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1)
#define BGMAC_NUM_MIB_RX_REGS \
(((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1)
#define BGMAC_DMA_TX_CTL 0x00
#define BGMAC_DMA_TX_ENABLE 0x00000001
#define BGMAC_DMA_TX_SUSPEND 0x00000002
#define BGMAC_DMA_TX_LOOPBACK 0x00000004
#define BGMAC_DMA_TX_FLUSH 0x00000010
#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
#define BGMAC_DMA_TX_MR_SHIFT 6
#define BGMAC_DMA_TX_MR_1 0
#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */
#define BGMAC_DMA_TX_BL_SHIFT 18
#define BGMAC_DMA_TX_BL_16 0
#define BGMAC_DMA_TX_BL_32 1
#define BGMAC_DMA_TX_BL_64 2
#define BGMAC_DMA_TX_BL_128 3
#define BGMAC_DMA_TX_BL_256 4
#define BGMAC_DMA_TX_BL_512 5
#define BGMAC_DMA_TX_BL_1024 6
#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */
#define BGMAC_DMA_TX_PC_SHIFT 21
#define BGMAC_DMA_TX_PC_0 0
#define BGMAC_DMA_TX_PC_4 1
#define BGMAC_DMA_TX_PC_8 2
#define BGMAC_DMA_TX_PC_16 3
#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */
#define BGMAC_DMA_TX_PT_SHIFT 24
#define BGMAC_DMA_TX_PT_1 0
#define BGMAC_DMA_TX_PT_2 1
#define BGMAC_DMA_TX_PT_4 2
#define BGMAC_DMA_TX_PT_8 3
#define BGMAC_DMA_TX_INDEX 0x04
#define BGMAC_DMA_TX_RINGLO 0x08
#define BGMAC_DMA_TX_RINGHI 0x0C
#define BGMAC_DMA_TX_STATUS 0x10
#define BGMAC_DMA_TX_STATDPTR 0x00001FFF
#define BGMAC_DMA_TX_STAT 0xF0000000
#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000
#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000
#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000
#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000
#define BGMAC_DMA_TX_STAT_SUSP 0x40000000
#define BGMAC_DMA_TX_ERROR 0x14
#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF
#define BGMAC_DMA_TX_ERR 0xF0000000
#define BGMAC_DMA_TX_ERR_NOERR 0x00000000
#define BGMAC_DMA_TX_ERR_PROT 0x10000000
#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000
#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000
#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000
#define BGMAC_DMA_TX_ERR_CORE 0x50000000
#define BGMAC_DMA_RX_CTL 0x20
#define BGMAC_DMA_RX_ENABLE 0x00000001
#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE
#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1
#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */
#define BGMAC_DMA_RX_MR_SHIFT 6
#define BGMAC_DMA_TX_MR_1 0
#define BGMAC_DMA_TX_MR_2 1
#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */
#define BGMAC_DMA_RX_BL_SHIFT 18
#define BGMAC_DMA_RX_BL_16 0
#define BGMAC_DMA_RX_BL_32 1
#define BGMAC_DMA_RX_BL_64 2
#define BGMAC_DMA_RX_BL_128 3
#define BGMAC_DMA_RX_BL_256 4
#define BGMAC_DMA_RX_BL_512 5
#define BGMAC_DMA_RX_BL_1024 6
#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */
#define BGMAC_DMA_RX_PC_SHIFT 21
#define BGMAC_DMA_RX_PC_0 0
#define BGMAC_DMA_RX_PC_4 1
#define BGMAC_DMA_RX_PC_8 2
#define BGMAC_DMA_RX_PC_16 3
#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */
#define BGMAC_DMA_RX_PT_SHIFT 24
#define BGMAC_DMA_RX_PT_1 0
#define BGMAC_DMA_RX_PT_2 1
#define BGMAC_DMA_RX_PT_4 2
#define BGMAC_DMA_RX_PT_8 3
#define BGMAC_DMA_RX_INDEX 0x24
#define BGMAC_DMA_RX_RINGLO 0x28
#define BGMAC_DMA_RX_RINGHI 0x2C
#define BGMAC_DMA_RX_STATUS 0x30
#define BGMAC_DMA_RX_STATDPTR 0x00001FFF
#define BGMAC_DMA_RX_STAT 0xF0000000
#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000
#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000
#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000
#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000
#define BGMAC_DMA_RX_STAT_SUSP 0x40000000
#define BGMAC_DMA_RX_ERROR 0x34
#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF
#define BGMAC_DMA_RX_ERR 0xF0000000
#define BGMAC_DMA_RX_ERR_NOERR 0x00000000
#define BGMAC_DMA_RX_ERR_PROT 0x10000000
#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000
#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000
#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000
#define BGMAC_DMA_RX_ERR_CORE 0x50000000
#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */
#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */
#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF
#define BGMAC_PHY_NOREGS 0x1E
#define BGMAC_PHY_MASK 0x1F
#define BGMAC_MAX_TX_RINGS 4
#define BGMAC_MAX_RX_RINGS 1
#define BGMAC_TX_RING_SLOTS 128
#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */
#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */
#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */
#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */
#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080
#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
#define BGMAC_WEIGHT 64
#define ETHER_MAX_LEN 1518
struct bgmac_slot_info {
struct sk_buff *skb;
dma_addr_t dma_addr;
};
struct bgmac_dma_desc {
__le32 ctl0;
__le32 ctl1;
__le32 addr_low;
__le32 addr_high;
} __packed;
enum bgmac_dma_ring_type {
BGMAC_DMA_RING_TX,
BGMAC_DMA_RING_RX,
};
/**
* bgmac_dma_ring - contains info about DMA ring (either TX or RX one)
* @start: index of the first slot containing data
* @end: index of a slot that can *not* be read (yet)
*
* Be really aware of the specific @end meaning. It's an index of a slot *after*
* the one containing data that can be read. If @start equals @end the ring is
* empty.
*/
struct bgmac_dma_ring {
u16 num_slots;
u16 start;
u16 end;
u16 mmio_base;
struct bgmac_dma_desc *cpu_base;
dma_addr_t dma_base;
u32 index_base; /* Used for unaligned rings only, otherwise 0 */
bool unaligned;
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
};
struct bgmac_rx_header {
__le16 len;
__le16 flags;
__le16 pad[12];
};
struct bgmac {
struct bcma_device *core;
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS];
/* Stats */
bool stats_grabbed;
u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS];
u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
/* Int */
u32 int_mask;
u32 int_status;
/* Current MAC state */
int mac_speed;
int mac_duplex;
u8 phyaddr;
bool has_robosw;
bool loopback;
};
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
{
return bcma_read32(bgmac->core, offset);
}
static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
{
bcma_write32(bgmac->core, offset, value);
}
static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
u32 set)
{
bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set);
}
static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask)
{
bgmac_maskset(bgmac, offset, mask, 0);
}
static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
{
bgmac_maskset(bgmac, offset, ~0, set);
}
#endif /* _BGMAC_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,89 @@
/* bnx2_fw.h: QLogic NX2 network driver.
*
* Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
* Copyright (c) 2014 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
/* Initialized Values for the Completion Processor. */
static const struct cpu_reg cpu_reg_com = {
.mode = BNX2_COM_CPU_MODE,
.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
.state = BNX2_COM_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_COM_CPU_REG_FILE,
.evmask = BNX2_COM_CPU_EVENT_MASK,
.pc = BNX2_COM_CPU_PROGRAM_COUNTER,
.inst = BNX2_COM_CPU_INSTRUCTION,
.bp = BNX2_COM_CPU_HW_BREAKPOINT,
.spad_base = BNX2_COM_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values the Command Processor. */
static const struct cpu_reg cpu_reg_cp = {
.mode = BNX2_CP_CPU_MODE,
.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
.state = BNX2_CP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_CP_CPU_REG_FILE,
.evmask = BNX2_CP_CPU_EVENT_MASK,
.pc = BNX2_CP_CPU_PROGRAM_COUNTER,
.inst = BNX2_CP_CPU_INSTRUCTION,
.bp = BNX2_CP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_CP_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the RX Processor. */
static const struct cpu_reg cpu_reg_rxp = {
.mode = BNX2_RXP_CPU_MODE,
.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
.state = BNX2_RXP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_RXP_CPU_REG_FILE,
.evmask = BNX2_RXP_CPU_EVENT_MASK,
.pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
.inst = BNX2_RXP_CPU_INSTRUCTION,
.bp = BNX2_RXP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_RXP_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the TX Patch-up Processor. */
static const struct cpu_reg cpu_reg_tpat = {
.mode = BNX2_TPAT_CPU_MODE,
.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
.state = BNX2_TPAT_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_TPAT_CPU_REG_FILE,
.evmask = BNX2_TPAT_CPU_EVENT_MASK,
.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
.inst = BNX2_TPAT_CPU_INSTRUCTION,
.bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
.spad_base = BNX2_TPAT_SCRATCH,
.mips_view_base = 0x8000000,
};
/* Initialized Values for the TX Processor. */
static const struct cpu_reg cpu_reg_txp = {
.mode = BNX2_TXP_CPU_MODE,
.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
.state = BNX2_TXP_CPU_STATE,
.state_value_clear = 0xffffff,
.gpr0 = BNX2_TXP_CPU_REG_FILE,
.evmask = BNX2_TXP_CPU_EVENT_MASK,
.pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
.inst = BNX2_TXP_CPU_INSTRUCTION,
.bp = BNX2_TXP_CPU_HW_BREAKPOINT,
.spad_base = BNX2_TXP_SCRATCH,
.mips_view_base = 0x8000000,
};

View file

@ -0,0 +1,8 @@
#
# Makefile for Broadcom 10-Gigabit ethernet driver
#
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,205 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
#ifndef BNX2X_DCB_H
#define BNX2X_DCB_H
#include "bnx2x_hsi.h"
#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
struct bnx2x_dcbx_app_params {
u32 enabled;
u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
};
#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
#define BNX2X_MAX_COS_SUPPORT 3
#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
struct bnx2x_dcbx_cos_params {
u32 bw_tbl;
u32 pri_bitmask;
/*
* strict priority: valid values are 0..5; 0 is highest priority.
* There can't be two COSes with the same priority.
*/
u8 strict;
#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
u8 pauseable;
};
struct bnx2x_dcbx_pg_params {
u32 enabled;
u8 num_of_cos; /* valid COS entries */
struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
};
struct bnx2x_dcbx_pfc_params {
u32 enabled;
u32 priority_non_pauseable_mask;
};
struct bnx2x_dcbx_port_params {
struct bnx2x_dcbx_pfc_params pfc;
struct bnx2x_dcbx_pg_params ets;
struct bnx2x_dcbx_app_params app;
};
#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
(bp)->dcbx_port_params.ets.enabled)
struct bnx2x_config_lldp_params {
u32 overwrite_settings;
u32 msg_tx_hold;
u32 msg_fast_tx;
u32 tx_credit_max;
u32 msg_tx_interval;
u32 tx_fast;
};
struct bnx2x_admin_priority_app_table {
u32 valid;
u32 priority;
#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
u32 traffic_type;
#define TRAFFIC_TYPE_ETH 0
#define TRAFFIC_TYPE_PORT 1
u32 app_id;
};
#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
u32 admin_ets_enable;
u32 admin_pfc_enable;
u32 admin_tc_supported_tx_enable;
u32 admin_ets_configuration_tx_enable;
u32 admin_ets_recommendation_tx_enable;
u32 admin_pfc_tx_enable;
u32 admin_application_priority_tx_enable;
u32 admin_ets_willing;
u32 admin_ets_reco_valid;
u32 admin_pfc_willing;
u32 admin_app_priority_willing;
u32 admin_configuration_bw_precentage[8];
u32 admin_configuration_ets_pg[8];
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
struct bnx2x_admin_priority_app_table
admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
u32 admin_default_priority;
};
#define GET_FLAGS(flags, bits) ((flags) & (bits))
#define SET_FLAGS(flags, bits) ((flags) |= (bits))
#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
enum {
DCBX_READ_LOCAL_MIB,
DCBX_READ_REMOTE_MIB
};
#define ETH_TYPE_FCOE (0x8906)
#define TCP_PORT_ISCSI (0xCBC)
#define PFC_VALUE_FRAME_SIZE (512)
#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
struct cos_entry_help_data {
u32 pri_join_mask;
u32 cos_bw;
u8 strict;
bool pausable;
};
struct cos_help_data {
struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
u8 num_of_cos;
};
#define DCBX_ILLEGAL_PG (0xFF)
#define DCBX_PFC_PRI_MASK (0xFF)
#define DCBX_STRICT_PRIORITY (15)
#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
(0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
struct pg_entry_help_data {
u8 num_of_dif_pri;
u8 pg;
u32 pg_priority;
};
struct pg_help_data {
struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
u8 num_of_pg;
};
/* forward DCB/PFC related declarations */
struct bnx2x;
void bnx2x_dcbx_update(struct work_struct *work);
void bnx2x_dcbx_init_params(struct bnx2x *bp);
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
BNX2X_DCBX_STATE_TX_PAUSED,
BNX2X_DCBX_STATE_TX_RELEASED
};
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
/* DCB netlink */
#ifdef BCM_DCBNL
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
#endif /* BNX2X_DCB_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,396 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[151].base + ((assertListEntry) * IRO[151].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
(IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
IRO[157].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
(IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
IRO[158].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
(IRO[163].base + ((funcId) * IRO[163].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[153].base + ((funcId) * IRO[153].m1))
#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
(IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
(IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[324].base + ((pfId) * IRO[324].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[322].base + ((pfId) * IRO[322].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[312].base + ((pfId) * IRO[312].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
(IRO[146].base + ((pfId) * IRO[146].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
(IRO[147].base + ((pfId) * IRO[147].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IRO[145].base + ((pfId) * IRO[145].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IRO[148].base + ((pfId) * IRO[148].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
(IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
(IRO[137].base + ((sbId) * IRO[137].m1))
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
(IRO[138].base + ((sbId) * IRO[138].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
(IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(IRO[136].base + ((sbId) * IRO[136].m1))
#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[141].base + ((sbId) * IRO[141].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
(IRO[159].base + ((vfId) * IRO[159].m1))
#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
(IRO[160].base + ((vfId) * IRO[160].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[154].base + ((funcId) * IRO[154].m1))
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[207].base + ((pfId) * IRO[207].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[205].base + ((pfId) * IRO[205].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[223].base + ((pfId) * IRO[223].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
#define USTORM_AGG_DATA_SIZE (IRO[212].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[325].base + ((pfId) * IRO[325].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
IRO[215].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IRO[216].base + ((qzoneId) * IRO[216].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
#define USTORM_TPA_BTR_SIZE (IRO[213].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[50].base + ((assertListEntry) * IRO[50].m1))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
(IRO[43].base + ((portId) * IRO[43].m1))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
(IRO[45].base + ((pfId) * IRO[45].m1))
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[49].base + ((funcId) * IRO[49].m1))
#define XSTORM_SPQ_DATA_OFFSET(funcId) \
(IRO[32].base + ((funcId) * IRO[32].m1))
#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
(IRO[30].base + ((funcId) * IRO[30].m1))
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
(IRO[217].base + ((portId) * IRO[217].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[218].base + ((portId) * IRO[218].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
(IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
IRO[220].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
#define PARSE_BD_INDEX 1
#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
/* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10
#define U_ETH_SGL_SIZE 8
/* The fw will padd the buffer with this value, so the IP header \
will be align to 4 Byte */
#define IP_HEADER_ALIGNMENT_PADDING 2
#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
#define U_ETH_UNDEFINED_Q 0xFF
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY 10
#define ETH_NUM_OF_RSS_ENGINES_E2 72
#define FILTER_RULES_COUNT 16
#define MULTICAST_RULES_COUNT 16
#define CLASSIFY_RULES_COUNT 16
/*The CRC32 seed, that is used for the hash(reduction) multicast address */
#define ETH_CRC32_HASH_SEED 0x00000000
#define ETH_CRC32_HASH_BIT_SIZE (8)
#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
/* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18
#define ETH_MAX_RX_CLIENTS_E1H 28
#define ETH_MAX_RX_CLIENTS_E2 152
/* Maximal statistics client Ids */
#define MAX_STAT_COUNTER_ID_E1 36
#define MAX_STAT_COUNTER_ID_E1H 56
#define MAX_STAT_COUNTER_ID_E2 140
#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
#define MAX_MAC_CREDIT_E2 272 /* Per Path */
#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
/* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
#define HC_SP_SB_MAX_INDICES 16
/* Number of indices per SB */
#define HC_SB_MAX_INDICES_E1X 8
#define HC_SB_MAX_INDICES_E2 8
#define HC_SB_MAX_SB_E1X 32
#define HC_SB_MAX_SB_E2 136
#define HC_SP_SB_ID 0xde
#define HC_SB_MAX_SM 2
#define HC_SB_MAX_DYNAMIC_INDICES 4
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
#define TSEMI_CLK1_RESUL_CHIP (1e-3)
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define XSTORM_IP_ID_ROLL_HALF 0x8000
#define XSTORM_IP_ID_ROLL_ALL 0
#define FW_LOG_LIST_SIZE 50
#define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
#define STATS_QUERY_CMD_COUNT 16
#define AFEX_LIST_TABLE_SIZE 4096
#define INVALID_VNIC_ID 0xFF
#define UNDEF_IRO 0x80000000
/* used for defining the amount of FCoE tasks supported for PF */
#define MAX_FCOE_FUNCS_PER_ENGINE 2
#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
#endif /* BNX2X_FW_DEFS_H */

View file

@ -0,0 +1,38 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
* Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
#ifndef BNX2X_INIT_FILE_HDR_H
#define BNX2X_INIT_FILE_HDR_H
struct bnx2x_fw_file_section {
__be32 len;
__be32 offset;
};
struct bnx2x_fw_file_hdr {
struct bnx2x_fw_file_section init_ops;
struct bnx2x_fw_file_section init_ops_offsets;
struct bnx2x_fw_file_section init_data;
struct bnx2x_fw_file_section tsem_int_table_data;
struct bnx2x_fw_file_section tsem_pram_data;
struct bnx2x_fw_file_section usem_int_table_data;
struct bnx2x_fw_file_section usem_pram_data;
struct bnx2x_fw_file_section csem_int_table_data;
struct bnx2x_fw_file_section csem_pram_data;
struct bnx2x_fw_file_section xsem_int_table_data;
struct bnx2x_fw_file_section xsem_pram_data;
struct bnx2x_fw_file_section iro_arr;
struct bnx2x_fw_file_section fw_version;
};
#endif /* BNX2X_INIT_FILE_HDR_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,786 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
#define BNX2X_INIT_H
/* Init operation types and structures */
enum {
OP_RD = 0x1, /* read a single register */
OP_WR, /* write a single register */
OP_SW, /* copy a string to the device */
OP_ZR, /* clear memory */
OP_ZP, /* unzip then copy with DMAE */
OP_WR_64, /* write 64 bit pattern */
OP_WB, /* copy a string using DMAE */
OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
/* Skip the following ops if all of the init modes don't match */
OP_IF_MODE_OR,
/* Skip the following ops if any of the init modes don't match */
OP_IF_MODE_AND,
OP_MAX
};
enum {
STAGE_START,
STAGE_END,
};
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
/* structs for the various opcodes */
struct raw_op {
u32 op:8;
u32 offset:24;
u32 raw_data;
};
struct op_read {
u32 op:8;
u32 offset:24;
u32 val;
};
struct op_write {
u32 op:8;
u32 offset:24;
u32 val;
};
struct op_arr_write {
u32 op:8;
u32 offset:24;
#ifdef __BIG_ENDIAN
u16 data_len;
u16 data_off;
#else /* __LITTLE_ENDIAN */
u16 data_off;
u16 data_len;
#endif
};
struct op_zero {
u32 op:8;
u32 offset:24;
u32 len;
};
struct op_if_mode {
u32 op:8;
u32 cmd_offset:24;
u32 mode_bit_map;
};
union init_op {
struct op_read read;
struct op_write write;
struct op_arr_write arr_wr;
struct op_zero zero;
struct raw_op raw;
struct op_if_mode if_mode;
};
/* Init Phases */
enum {
PHASE_COMMON,
PHASE_PORT0,
PHASE_PORT1,
PHASE_PF0,
PHASE_PF1,
PHASE_PF2,
PHASE_PF3,
PHASE_PF4,
PHASE_PF5,
PHASE_PF6,
PHASE_PF7,
NUM_OF_INIT_PHASES
};
/* Init Modes */
enum {
MODE_ASIC = 0x00000001,
MODE_FPGA = 0x00000002,
MODE_EMUL = 0x00000004,
MODE_E2 = 0x00000008,
MODE_E3 = 0x00000010,
MODE_PORT2 = 0x00000020,
MODE_PORT4 = 0x00000040,
MODE_SF = 0x00000080,
MODE_MF = 0x00000100,
MODE_MF_SD = 0x00000200,
MODE_MF_SI = 0x00000400,
MODE_MF_AFEX = 0x00000800,
MODE_E3_A0 = 0x00001000,
MODE_E3_B0 = 0x00002000,
MODE_COS3 = 0x00004000,
MODE_COS6 = 0x00008000,
MODE_LITTLE_ENDIAN = 0x00010000,
MODE_BIG_ENDIAN = 0x00020000,
};
/* Init Blocks */
enum {
BLOCK_ATC,
BLOCK_BRB1,
BLOCK_CCM,
BLOCK_CDU,
BLOCK_CFC,
BLOCK_CSDM,
BLOCK_CSEM,
BLOCK_DBG,
BLOCK_DMAE,
BLOCK_DORQ,
BLOCK_HC,
BLOCK_IGU,
BLOCK_MISC,
BLOCK_NIG,
BLOCK_PBF,
BLOCK_PGLUE_B,
BLOCK_PRS,
BLOCK_PXP2,
BLOCK_PXP,
BLOCK_QM,
BLOCK_SRC,
BLOCK_TCM,
BLOCK_TM,
BLOCK_TSDM,
BLOCK_TSEM,
BLOCK_UCM,
BLOCK_UPB,
BLOCK_USDM,
BLOCK_USEM,
BLOCK_XCM,
BLOCK_XPB,
BLOCK_XSDM,
BLOCK_XSEM,
BLOCK_MISC_AEU,
NUM_OF_INIT_BLOCKS
};
/* QM queue numbers */
#define BNX2X_ETH_Q 0
#define BNX2X_TOE_Q 3
#define BNX2X_TOE_ACK_Q 6
#define BNX2X_ISCSI_Q 9
#define BNX2X_ISCSI_ACK_Q 11
#define BNX2X_FCOE_Q 10
/* Vnics per mode */
#define BNX2X_PORT2_MODE_NUM_VNICS 4
#define BNX2X_PORT4_MODE_NUM_VNICS 2
/* COS offset for port1 in E3 B0 4port mode */
#define BNX2X_E3B0_PORT1_COS_OFFSET 3
/* QM Register addresses */
#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
/* extracts the QM queue number for the specified port and vnic */
#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
((((port) << 1) | (vnic)) * 16 + (q_num))
/* Maps the specified queue to the specified COS */
static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
{
/* find current COS mapping */
u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
/* check if queue->COS mapping has changed */
if (curr_cos != new_cos) {
u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
u32 reg_addr, reg_bit_map, vnic;
/* update parameters for 4port mode */
if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
if (BP_PORT(bp)) {
curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
}
}
/* change queue mapping for each VNIC */
for (vnic = 0; vnic < num_vnics; vnic++) {
u32 pf_q_num =
BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
u32 q_bit_map = 1 << (pf_q_num & 0x1f);
/* overwrite queue->VOQ mapping */
REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
/* clear queue bit from current COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
/* set queue bit in new COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
/* set/clear queue bit in command-queue bit map
* (E2/E3A0 only, valid COS values are 0/1)
*/
if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
q_bit_map = 1 << (2 * (pf_q_num & 0xf));
reg_bit_map = new_cos ?
(reg_bit_map | q_bit_map) :
(reg_bit_map & (~q_bit_map));
REG_WR(bp, reg_addr, reg_bit_map);
}
}
}
}
/* Configures the QM according to the specified per-traffic-type COSes */
static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
struct priority_cos *traffic_cos)
{
bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
if (mode != STATIC_COS) {
/* required only in backward compatible COS mode */
bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
}
}
/* congestion managment port init api description
* the api works as follows:
* the driver should pass the cmng_init_input struct, the port_init function
* will prepare the required internal ram structure which will be passed back
* to the driver (cmng_init) that will write it into the internal ram.
*
* IMPORTANT REMARKS:
* 1. the cmng_init struct does not represent the contiguous internal ram
* structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
* offset in order to write the port sub struct and the
* PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
* words - don't use memcpy!).
* 2. although the cmng_init struct is filled for the maximal vnic number
* possible, the driver should only write the valid vnics into the internal
* ram according to the appropriate port mode.
*/
#define BITS_TO_BYTES(x) ((x)/8)
/* CMNG constants, as derived from system spec calculations */
/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
#define DEF_MIN_RATE 100
/* resolution of the rate shaping timer - 400 usec */
#define RS_PERIODIC_TIMEOUT_USEC 400
/* number of bytes in single QM arbitration cycle -
* coefficient for calculating the fairness timer
*/
#define QM_ARB_BYTES 160000
/* resolution of Min algorithm 1:100 */
#define MIN_RES 100
/* how many bytes above threshold for
* the minimal credit of Min algorithm
*/
#define MIN_ABOVE_THRESH 32768
/* Fairness algorithm integration time coefficient -
* for calculating the actual Tfair
*/
#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
/* Memory of fairness algorithm - 2 cycles */
#define FAIR_MEM 2
#define SAFC_TIMEOUT_USEC 52
#define SDM_TICKS 4
static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
u32 r_param, struct cmng_init *ram_data)
{
u32 vnic;
struct cmng_vnic *vdata = &ram_data->vnic;
struct cmng_struct_per_port *pdata = &ram_data->port;
/* rate shaping per-port variables
* 100 micro seconds in SDM ticks = 25
* since each tick is 4 microSeconds
*/
pdata->rs_vars.rs_periodic_timeout =
RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
/* this is the threshold below which no timer arming will occur.
* 1.25 coefficient is for the threshold to be a little bigger
* then the real time to compensate for timer in-accuracy
*/
pdata->rs_vars.rs_threshold =
(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
/* rate shaping per-vnic variables */
for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
/* global vnic counter */
vdata->vnic_max_rate[vnic].vn_counter.rate =
input_data->vnic_max_rate[vnic];
/* maximal Mbps for this vnic
* the quota in each timer period - number of bytes
* transmitted in this period
*/
vdata->vnic_max_rate[vnic].vn_counter.quota =
RS_PERIODIC_TIMEOUT_USEC *
(u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
}
}
static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
u32 r_param, struct cmng_init *ram_data)
{
u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
struct cmng_vnic *vdata = &ram_data->vnic;
struct cmng_struct_per_port *pdata = &ram_data->port;
/* this is the resolution of the fairness timer */
fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
/* fairness per-port variables
* for 10G it is 1000usec. for 1G it is 10000usec.
*/
tFair = T_FAIR_COEF / input_data->port_rate;
/* this is the threshold below which we won't arm the timer anymore */
pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
/* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
* to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
*/
pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
/* since each tick is 4 microSeconds */
pdata->fair_vars.fairness_timeout =
fair_periodic_timeout_usec / SDM_TICKS;
/* calculate sum of weights */
vnicWeightSum = 0;
for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
vnicWeightSum += input_data->vnic_min_rate[vnic];
/* global vnic counter */
if (vnicWeightSum > 0) {
/* fairness per-vnic variables */
for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
/* this is the credit for each period of the fairness
* algorithm - number of bytes in T_FAIR (this vnic
* share of the port rate)
*/
vdata->vnic_min_rate[vnic].vn_credit_delta =
(u32)input_data->vnic_min_rate[vnic] * 100 *
(T_FAIR_COEF / (8 * 100 * vnicWeightSum));
if (vdata->vnic_min_rate[vnic].vn_credit_delta <
pdata->fair_vars.fair_threshold +
MIN_ABOVE_THRESH) {
vdata->vnic_min_rate[vnic].vn_credit_delta =
pdata->fair_vars.fair_threshold +
MIN_ABOVE_THRESH;
}
}
}
}
static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
u32 r_param, struct cmng_init *ram_data)
{
u32 vnic, cos;
u32 cosWeightSum = 0;
struct cmng_vnic *vdata = &ram_data->vnic;
struct cmng_struct_per_port *pdata = &ram_data->port;
for (cos = 0; cos < MAX_COS_NUMBER; cos++)
cosWeightSum += input_data->cos_min_rate[cos];
if (cosWeightSum > 0) {
for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
/* Since cos and vnic shouldn't work together the rate
* to divide between the coses is the port rate.
*/
u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
/* this is the credit for each period of
* the fairness algorithm - number of bytes
* in T_FAIR (this cos share of the vnic rate)
*/
ccd[cos] =
(u32)input_data->cos_min_rate[cos] * 100 *
(T_FAIR_COEF / (8 * 100 * cosWeightSum));
if (ccd[cos] < pdata->fair_vars.fair_threshold
+ MIN_ABOVE_THRESH) {
ccd[cos] =
pdata->fair_vars.fair_threshold +
MIN_ABOVE_THRESH;
}
}
}
}
}
static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
struct cmng_init *ram_data)
{
/* in microSeconds */
ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
}
/* Congestion management port init */
static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
struct cmng_init *ram_data)
{
u32 r_param;
memset(ram_data, 0, sizeof(struct cmng_init));
ram_data->port.flags = input_data->flags;
/* number of bytes transmitted in a rate of 10Gbps
* in one usec = 1.25KB.
*/
r_param = BITS_TO_BYTES(input_data->port_rate);
bnx2x_init_max(input_data, r_param, ram_data);
bnx2x_init_min(input_data, r_param, ram_data);
bnx2x_init_fw_wrr(input_data, r_param, ram_data);
bnx2x_init_safc(input_data, ram_data);
}
/* Returns the index of start or end of a specific block stage in ops array */
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
#define INITOP_SET 0 /* set the HW directly */
#define INITOP_CLEAR 1 /* clear the HW directly */
#define INITOP_INIT 2 /* set the init-value array */
/****************************************************************************
* ILT management
****************************************************************************/
struct ilt_line {
dma_addr_t page_mapping;
void *page;
u32 size;
};
struct ilt_client_info {
u32 page_size;
u16 start;
u16 end;
u16 client_num;
u16 flags;
#define ILT_CLIENT_SKIP_INIT 0x1
#define ILT_CLIENT_SKIP_MEM 0x2
};
struct bnx2x_ilt {
u32 start_line;
struct ilt_line *lines;
struct ilt_client_info clients[4];
#define ILT_CLIENT_CDU 0
#define ILT_CLIENT_QM 1
#define ILT_CLIENT_SRC 2
#define ILT_CLIENT_TM 3
};
/****************************************************************************
* SRC configuration
****************************************************************************/
struct src_ent {
u8 opaque[56];
u64 next;
};
/****************************************************************************
* Parity configuration
****************************************************************************/
#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK, \
block##_REG_##block##_PRTY_STS_CLR, \
en_mask, {m1, m1h, m2, m3}, #block \
}
#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_0, \
block##_REG_##block##_PRTY_STS_CLR_0, \
en_mask, {m1, m1h, m2, m3}, #block"_0" \
}
#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_1, \
block##_REG_##block##_PRTY_STS_CLR_1, \
en_mask, {m1, m1h, m2, m3}, #block"_1" \
}
static const struct {
u32 mask_addr;
u32 sts_clr_addr;
u32 en_mask; /* Mask to enable parity attentions */
struct {
u32 e1; /* 57710 */
u32 e1h; /* 57711 */
u32 e2; /* 57712 */
u32 e3; /* 578xx */
} reg_mask; /* Register mask (all valid bits) */
char name[8]; /* Block's longest name is 7 characters long
* (name + suffix)
*/
} bnx2x_blocks_parity_data[] = {
/* bit 19 masked */
/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
/* bit 5,18,20-31 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
/* bit 5 */
/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
0x7ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
{0xf, 0xf, 0xf, 0xf}, "UPB"},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
{0xf, 0xf, 0xf, 0xf}, "XPB"},
BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff),
BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
};
/* [28] MCP Latched rom_parity
* [29] MCP Latched ump_rx_parity
* [30] MCP Latched ump_tx_parity
* [31] MCP Latched scpad_parity
*/
#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
/* Below registers control the MCP parity attention output. When
* MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
* enabled, when cleared - disabled.
*/
static const struct {
u32 addr;
u32 bits;
} mcp_attn_ctl_regs[] = {
{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
MISC_AEU_ENABLE_MCP_PRTY_BITS },
{ MISC_REG_AEU_ENABLE4_NIG_0,
MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
{ MISC_REG_AEU_ENABLE4_PXP_0,
MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
MISC_AEU_ENABLE_MCP_PRTY_BITS },
{ MISC_REG_AEU_ENABLE4_NIG_1,
MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
{ MISC_REG_AEU_ENABLE4_PXP_1,
MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
};
static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
{
int i;
u32 reg_val;
for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
if (enable)
reg_val |= mcp_attn_ctl_regs[i].bits;
else
reg_val &= ~mcp_attn_ctl_regs[i].bits;
REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
}
}
static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
{
if (CHIP_IS_E1(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1;
else if (CHIP_IS_E1H(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
else if (CHIP_IS_E2(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e2;
else /* CHIP_IS_E3 */
return bnx2x_blocks_parity_data[idx].reg_mask.e3;
}
static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
if (dis_mask) {
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
dis_mask);
DP(NETIF_MSG_HW, "Setting parity mask "
"for %s to\t\t0x%x\n",
bnx2x_blocks_parity_data[i].name, dis_mask);
}
}
/* Disable MCP parity attentions */
bnx2x_set_mcp_parity(bp, false);
}
/* Clear the parity error status registers. */
static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
{
int i;
u32 reg_val, mcp_aeu_bits =
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
/* Clear SEM_FAST parities */
REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask) {
reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
sts_clr_addr);
if (reg_val & reg_mask)
DP(NETIF_MSG_HW,
"Parity errors in %s: 0x%x\n",
bnx2x_blocks_parity_data[i].name,
reg_val & reg_mask);
}
}
/* Check if there were parity attentions in MCP */
reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
if (reg_val & mcp_aeu_bits)
DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
reg_val & mcp_aeu_bits);
/* Clear parity attentions in MCP:
* [7] clears Latched rom_parity
* [8] clears Latched ump_rx_parity
* [9] clears Latched ump_tx_parity
* [10] clears Latched scpad_parity (both ports)
*/
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
}
static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
if (reg_mask)
REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
bnx2x_blocks_parity_data[i].en_mask & reg_mask);
}
/* Enable MCP parity attentions */
bnx2x_set_mcp_parity(bp, true);
}
#endif /* BNX2X_INIT_H */

View file

@ -0,0 +1,934 @@
/* bnx2x_init_ops.h: Broadcom Everest network driver.
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
* Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
#define BNX2X_INIT_OPS_H
#ifndef BP_ILT
#define BP_ILT(bp) NULL
#endif
#ifndef BP_FUNC
#define BP_FUNC(bp) 0
#endif
#ifndef BP_PORT
#define BP_PORT(bp) 0
#endif
#ifndef BNX2X_ILT_FREE
#define BNX2X_ILT_FREE(x, y, sz)
#endif
#ifndef BNX2X_ILT_ZALLOC
#define BNX2X_ILT_ZALLOC(x, y, sz)
#endif
#ifndef ILOG2
#define ILOG2(x) x
#endif
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
dma_addr_t phys_addr, u32 addr,
u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
REG_WR(bp, addr + i*4, data[i]);
}
static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
}
static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
u8 wb)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
else if (wb && CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
u32 len, u8 wb)
{
u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
u32 buf_len32 = buf_len/4;
u32 i;
memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
}
}
static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len64)
{
u32 buf_len32 = FW_BUF_SIZE/4;
u32 len = len64*2;
u64 data64 = 0;
u32 i;
/* 64 bit value is in a blob: first low DWORD, then high DWORD */
data64 = HILO_U64((*(data + 1)), (*data));
len64 = min((u32)(FW_BUF_SIZE/8), len64);
for (i = 0; i < len64; i++) {
u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
*pdata = data64;
}
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
}
}
/*********************************************************
There are different blobs for each PRAM section.
In addition, each blob write operation is divided into a few operations
in order to decrease the amount of phys. contiguous buffer needed.
Thus, when we select a blob the address may be with some offset
from the beginning of PRAM section.
The same holds for the INT_TABLE sections.
**********************************************************/
#define IF_IS_INT_TABLE_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
#define IF_IS_PRAM_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
const u8 *data)
{
IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
data = INIT_TSEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
data = INIT_CSEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
data = INIT_USEM_INT_TABLE_DATA(bp);
else
IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
data = INIT_XSEM_INT_TABLE_DATA(bp);
else
IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
data = INIT_TSEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
data = INIT_CSEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
data = INIT_USEM_PRAM_DATA(bp);
else
IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
data = INIT_XSEM_PRAM_DATA(bp);
return data;
}
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
if (bp->dmae_ready)
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
/* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, data, len);
/* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
else
bnx2x_init_str_wr(bp, addr, data, len);
}
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
u32 val_hi)
{
u32 wb_write[2];
wb_write[0] = val_lo;
wb_write[1] = val_hi;
REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
}
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
u32 blob_off)
{
const u8 *data = NULL;
int rc;
u32 i;
data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
rc = bnx2x_gunzip(bp, data, len);
if (rc)
return;
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
}
static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
{
u16 op_start =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_START)];
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
const union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
/* If empty block */
if (op_start == op_end)
return;
data_base = INIT_DATA(bp);
for (op_idx = op_start; op_idx < op_end; op_idx++) {
op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
* OP_WR64 (we assume that op_arr_write and op_write have the
* same structure).
*/
len = op->arr_wr.data_len;
data = data_base + op->arr_wr.data_off;
switch (op_type) {
case OP_RD:
REG_RD(bp, addr);
break;
case OP_WR:
REG_WR(bp, addr, op->write.val);
break;
case OP_SW:
bnx2x_init_str_wr(bp, addr, data, len);
break;
case OP_WB:
bnx2x_init_wr_wb(bp, addr, data, len);
break;
case OP_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
break;
case OP_WB_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
break;
case OP_ZP:
bnx2x_init_wr_zp(bp, addr, len,
op->arr_wr.data_off);
break;
case OP_WR_64:
bnx2x_init_wr_64(bp, addr, data, len);
break;
case OP_IF_MODE_AND:
/* if any of the flags doesn't match, skip the
* conditional block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) !=
op->if_mode.mode_bit_map)
op_idx += op->if_mode.cmd_offset;
break;
case OP_IF_MODE_OR:
/* if all the flags don't match, skip the conditional
* block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) == 0)
op_idx += op->if_mode.cmd_offset;
break;
default:
/* Should never get here! */
break;
}
}
}
/****************************************************************************
* PXP Arbiter
****************************************************************************/
/*
* This code configures the PCI read/write arbiter
* which implements a weighted round robin
* between the virtual queues in the chip.
*
* The values were derived for each PCI max payload and max request size.
* since max payload and max request size are only known at run time,
* this is done as a separate init stage.
*/
#define NUM_WR_Q 13
#define NUM_RD_Q 29
#define MAX_RD_ORD 3
#define MAX_WR_ORD 2
/* configuration for one arbiter queue */
struct arb_line {
int l;
int add;
int ubound;
};
/* derived configuration for each read queue for each max request size */
static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
{ {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
{ {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
{ {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
};
/* derived configuration for each write queue for each max request size */
static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
{ {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
{ {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
{ {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
{ {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
};
/* register addresses for read queues */
static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
PXP2_REG_RQ_BW_RD_UBOUND0},
{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
PXP2_REG_PSWRQ_BW_UB1},
{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
PXP2_REG_PSWRQ_BW_UB2},
{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
PXP2_REG_PSWRQ_BW_UB3},
{PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
PXP2_REG_RQ_BW_RD_UBOUND4},
{PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
PXP2_REG_RQ_BW_RD_UBOUND5},
{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
PXP2_REG_PSWRQ_BW_UB6},
{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
PXP2_REG_PSWRQ_BW_UB7},
{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
PXP2_REG_PSWRQ_BW_UB8},
/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
PXP2_REG_PSWRQ_BW_UB9},
{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
PXP2_REG_PSWRQ_BW_UB10},
{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
PXP2_REG_PSWRQ_BW_UB11},
{PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
PXP2_REG_RQ_BW_RD_UBOUND12},
{PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
PXP2_REG_RQ_BW_RD_UBOUND13},
{PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
PXP2_REG_RQ_BW_RD_UBOUND14},
{PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
PXP2_REG_RQ_BW_RD_UBOUND15},
{PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
PXP2_REG_RQ_BW_RD_UBOUND16},
{PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
PXP2_REG_RQ_BW_RD_UBOUND17},
{PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
PXP2_REG_RQ_BW_RD_UBOUND18},
/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
PXP2_REG_RQ_BW_RD_UBOUND19},
{PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
PXP2_REG_RQ_BW_RD_UBOUND20},
{PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
PXP2_REG_RQ_BW_RD_UBOUND22},
{PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
PXP2_REG_RQ_BW_RD_UBOUND23},
{PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
PXP2_REG_RQ_BW_RD_UBOUND24},
{PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
PXP2_REG_RQ_BW_RD_UBOUND25},
{PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
PXP2_REG_RQ_BW_RD_UBOUND26},
{PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
PXP2_REG_RQ_BW_RD_UBOUND27},
{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
PXP2_REG_PSWRQ_BW_UB28}
};
/* register addresses for write queues */
static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
PXP2_REG_PSWRQ_BW_UB1},
{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
PXP2_REG_PSWRQ_BW_UB2},
{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
PXP2_REG_PSWRQ_BW_UB3},
{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
PXP2_REG_PSWRQ_BW_UB6},
{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
PXP2_REG_PSWRQ_BW_UB7},
{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
PXP2_REG_PSWRQ_BW_UB8},
{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
PXP2_REG_PSWRQ_BW_UB9},
{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
PXP2_REG_PSWRQ_BW_UB10},
{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
PXP2_REG_PSWRQ_BW_UB11},
/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
PXP2_REG_PSWRQ_BW_UB28},
{PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
PXP2_REG_RQ_BW_WR_UBOUND29},
{PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
PXP2_REG_RQ_BW_WR_UBOUND30}
};
static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
int w_order)
{
u32 val, i;
if (r_order > MAX_RD_ORD) {
DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
r_order, MAX_RD_ORD);
r_order = MAX_RD_ORD;
}
if (w_order > MAX_WR_ORD) {
DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
w_order, MAX_WR_ORD);
w_order = MAX_WR_ORD;
}
if (CHIP_REV_IS_FPGA(bp)) {
DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
w_order = 0;
}
DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
for (i = 0; i < NUM_RD_Q-1; i++) {
REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
REG_WR(bp, read_arb_addr[i].add,
read_arb_data[i][r_order].add);
REG_WR(bp, read_arb_addr[i].ubound,
read_arb_data[i][r_order].ubound);
}
for (i = 0; i < NUM_WR_Q-1; i++) {
if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
(write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
REG_WR(bp, write_arb_addr[i].l,
write_arb_data[i][w_order].l);
REG_WR(bp, write_arb_addr[i].add,
write_arb_data[i][w_order].add);
REG_WR(bp, write_arb_addr[i].ubound,
write_arb_data[i][w_order].ubound);
} else {
val = REG_RD(bp, write_arb_addr[i].l);
REG_WR(bp, write_arb_addr[i].l,
val | (write_arb_data[i][w_order].l << 10));
val = REG_RD(bp, write_arb_addr[i].add);
REG_WR(bp, write_arb_addr[i].add,
val | (write_arb_data[i][w_order].add << 10));
val = REG_RD(bp, write_arb_addr[i].ubound);
REG_WR(bp, write_arb_addr[i].ubound,
val | (write_arb_data[i][w_order].ubound << 7));
}
}
val = write_arb_data[NUM_WR_Q-1][w_order].add;
val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
val = read_arb_data[NUM_RD_Q-1][r_order].add;
val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
if (CHIP_IS_E3(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
else if (CHIP_IS_E2(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
else
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
if (!CHIP_IS_E1(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
/* DMAE is special */
if (!CHIP_IS_E1H(bp)) {
/* E2 can use optimal TH */
val = w_order;
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
} else {
val = ((w_order == 0) ? 2 : 3);
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
}
REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
}
/* Validate number of tags suppoted by device */
#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
val &= 0xFF;
if (val <= 0x20)
REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
}
/****************************************************************************
* ILT management
****************************************************************************/
/*
* This codes hides the low level HW interaction for ILT management and
* configuration. The API consists of a shadow ILT table which is set by the
* driver and a set of routines to use it to configure the HW.
*
*/
/* ILT HW init operations */
/* ILT memory management operations */
#define ILT_MEMOP_ALLOC 0
#define ILT_MEMOP_FREE 1
/* the phys address is shifted right 12 bits and has an added
* 1=valid bit added to the 53rd bit
* then since this is a wide register(TM)
* we split it into two 32 bit writes
*/
#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
#define ILT_RANGE(f, l) (((l) << 10) | f)
static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
struct ilt_line *line, u32 size, u8 memop)
{
if (memop == ILT_MEMOP_FREE) {
BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
return 0;
}
BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
if (!line->page)
return -1;
line->size = size;
return 0;
}
static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
u8 memop)
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (!ilt || !ilt->lines)
return -1;
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
ilt_cli->page_size, memop);
}
return rc;
}
static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
{
int rc = 0;
if (CONFIGURE_NIC_MODE(bp))
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
return rc;
}
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
return rc;
}
static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
dma_addr_t page_mapping)
{
u32 reg;
if (CHIP_IS_E1(bp))
reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
else
reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
}
static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
struct bnx2x_ilt *ilt, int idx, u8 initop)
{
dma_addr_t null_mapping;
int abs_idx = ilt->start_line + idx;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
break;
case INITOP_CLEAR:
null_mapping = 0;
bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
break;
}
}
static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
u32 start_reg = 0;
u32 end_reg = 0;
/* The boundary is either SET or INIT,
CLEAR => SET and for now SET ~~ INIT */
/* find the appropriate regs */
if (CHIP_IS_E1(bp)) {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_PSWRQ_QM0_L2P;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_PSWRQ_TM0_L2P;
break;
}
REG_WR(bp, start_reg + BP_FUNC(bp)*4,
ILT_RANGE((ilt_start + ilt_cli->start),
(ilt_start + ilt_cli->end)));
} else {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
end_reg = PXP2_REG_RQ_QM_LAST_ILT;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
end_reg = PXP2_REG_RQ_TM_LAST_ILT;
break;
}
REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
}
}
static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli,
u8 initop)
{
int i;
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
for (i = ilt_cli->start; i <= ilt_cli->end; i++)
bnx2x_ilt_line_init_op(bp, ilt, i, initop);
/* init/clear the ILT boundries */
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
}
static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
int cli_num, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
{
if (CONFIGURE_NIC_MODE(bp))
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
}
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
u32 psz_reg, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
break;
case INITOP_CLEAR:
break;
}
}
/*
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
PXP2_REG_RQ_QM_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
PXP2_REG_RQ_SRC_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
PXP2_REG_RQ_TM_P_SIZE, initop);
}
/****************************************************************************
* QM initializations
****************************************************************************/
#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
#define QM_INIT_MIN_CID_COUNT 31
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
int port = BP_PORT(bp);
if (QM_INIT(qm_cid_count)) {
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
qm_cid_count/16 - 1);
break;
case INITOP_CLEAR:
break;
}
}
}
static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
u32 base_reg, u32 reg)
{
int i;
u32 wb_data[2] = {0, 0};
for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
REG_WR(bp, base_reg + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
}
}
/* called during init common stage */
static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_qm_set_ptr_table(bp, qm_cid_count,
QM_REG_BASEADDR, QM_REG_PTRTBL);
if (CHIP_IS_E1H(bp))
bnx2x_qm_set_ptr_table(bp, qm_cid_count,
QM_REG_BASEADDR_EXT_A,
QM_REG_PTRTBL_EXT_A);
break;
case INITOP_CLEAR:
break;
}
}
/****************************************************************************
* SRC initializations
****************************************************************************/
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
/* Initialize T2 */
for (i = 0; i < src_cid_count-1; i++)
t2[i].next = (u64)(t2_mapping +
(i+1)*sizeof(struct src_ent));
/* tell the searcher where the T2 table is */
REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
U64_LO(t2_mapping), U64_HI(t2_mapping));
bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
U64_LO((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)),
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
#endif /* BNX2X_INIT_OPS_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,544 @@
/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Written by Yaniv Rosner
*
*/
#ifndef BNX2X_LINK_H
#define BNX2X_LINK_H
/***********************************************************/
/* Defines */
/***********************************************************/
#define DEFAULT_PHY_DEV_ADDR 3
#define E2_DEFAULT_PHY_DEV_ADDR 5
#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
#define NET_SERDES_IF_XFI 1
#define NET_SERDES_IF_SFI 2
#define NET_SERDES_IF_KR 3
#define NET_SERDES_IF_DXGXS 4
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define SFP_EEPROM_REVISION_ADDR 0x38
#define SFP_EEPROM_REVISION_SIZE 4
#define SFP_EEPROM_SERIAL_ADDR 0x44
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
#define SFP_EEPROM_DIAG_TYPE_SIZE 1
#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
(((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
/* Single Media board contains single external phy */
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
FW_PARAM_PHY_ADDR_MASK)
#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
FW_PARAM_PHY_TYPE_MASK)
#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
FW_PARAM_MDIO_CTRL_MASK) >> \
FW_PARAM_MDIO_CTRL_OFFSET)
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Structs */
/***********************************************************/
#define INT_PHY 0
#define EXT_PHY1 1
#define EXT_PHY2 2
#define MAX_PHYS 3
/* Same configuration is shared between the XGXS and the first external phy */
#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
0 : (_phy_idx - 1))
/***********************************************************/
/* bnx2x_phy struct */
/* Defines the required arguments and function per phy */
/***********************************************************/
struct link_vars;
struct link_params;
struct bnx2x_phy;
typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
struct link_vars *vars);
typedef void (*link_reset_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
struct link_params *params);
typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
struct bnx2x_reg_set {
u8 devad;
u16 reg;
u16 val;
};
struct bnx2x_phy {
u32 type;
/* Loaded during init */
u8 addr;
u8 def_md_devad;
u16 flags;
/* No Over-Current detection */
#define FLAGS_NOC (1<<1)
/* Fan failure detection required */
#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
/* Initialize first the XGXS and only then the phy itself */
#define FLAGS_INIT_XGXS_FIRST (1<<3)
#define FLAGS_WC_DUAL_MODE (1<<4)
#define FLAGS_4_PORT_MODE (1<<5)
#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
#define FLAGS_SFP_NOT_APPROVED (1<<7)
#define FLAGS_MDC_MDIO_WA (1<<8)
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
#define FLAGS_EEE (1<<13)
#define FLAGS_MDC_MDIO_WA_G (1<<15)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
/* preemphasis values for the tx side */
u16 tx_preemphasis[4];
/* EMAC address for access MDIO */
u32 mdio_ctrl;
u32 supported;
u32 media_type;
#define ETH_PHY_UNSPECIFIED 0x0
#define ETH_PHY_SFPP_10G_FIBER 0x1
#define ETH_PHY_XFP_FIBER 0x2
#define ETH_PHY_DA_TWINAX 0x3
#define ETH_PHY_BASE_T 0x4
#define ETH_PHY_SFP_1G_FIBER 0x5
#define ETH_PHY_KR 0xf0
#define ETH_PHY_CX4 0xf1
#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
u16 req_flow_ctrl;
u16 req_line_speed;
u32 speed_cap_mask;
u16 req_duplex;
u16 rsrv;
/* Called per phy/port init, and it configures LASI, speed, autoneg,
duplex, flow control negotiation, etc. */
config_init_t config_init;
/* Called due to interrupt. It determines the link, speed */
read_status_t read_status;
/* Called when driver is unloading. Should reset the phy */
link_reset_t link_reset;
/* Set the loopback configuration for the phy */
config_loopback_t config_loopback;
/* Format the given raw number into str up to len */
format_fw_ver_t format_fw_ver;
/* Reset the phy (both ports) */
hw_reset_t hw_reset;
/* Set link led mode (on/off/oper)*/
set_link_led_t set_link_led;
/* PHY Specific tasks */
phy_specific_func_t phy_specific_func;
#define DISABLE_TX 1
#define ENABLE_TX 2
#define PHY_INIT 3
};
/* Inputs parameters to the CLC */
struct link_params {
u8 port;
/* Default / User Configuration */
u8 loopback_mode;
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
#define LOOPBACK_UMAC 6
#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
u16 req_duplex[LINK_CONFIG_SIZE];
u16 req_flow_ctrl[LINK_CONFIG_SIZE];
u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
/* shmem parameters */
u32 shmem_base;
u32 shmem2_base;
u32 speed_cap_mask[LINK_CONFIG_SIZE];
u32 switch_cfg;
#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
u32 lane_config;
/* Phy register parameter */
u32 chip_id;
/* features */
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
#define FEATURE_CONFIG_MT_SUPPORT (1<<13)
#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
/* Will be populated during common init */
u8 num_phys;
u8 rsrv;
/* Used to configure the EEE Tx LPI timer, has several modes of
* operation, according to bits 29:28 -
* 2'b00: Timer will be configured by nvram, output will be the value
* from nvram.
* 2'b01: Timer will be configured by nvram, output will be in
* microseconds.
* 2'b10: bits 1:0 contain an nvram value which will be used instead
* of the one located in the nvram. Output will be that value.
* 2'b11: bits 19:0 contain the idle timer in microseconds; output
* will be in microseconds.
* Bits 31:30 should be 2'b11 in order for EEE to be enabled.
*/
u32 eee_mode;
#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
#define EEE_MODE_NVRAM_MASK (0x3)
#define EEE_MODE_TIMER_MASK (0xfffff)
#define EEE_MODE_OUTPUT_TIME (1<<28)
#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
#define EEE_MODE_ENABLE_LPI (1<<30)
#define EEE_MODE_ADV_LPI (1<<31)
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
/* Device pointer passed to all callback functions */
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
u16 link_flags;
#define LINK_FLAGS_INT_DISABLED (1<<0)
#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
/* The same definitions as the shmem2 parameter */
u32 link_attr_sync;
};
/* Output parameters */
struct link_vars {
u8 phy_flags;
#define PHY_XGXS_FLAG (1<<0)
#define PHY_SGMII_FLAG (1<<1)
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
#define MAC_TYPE_EMAC 1
#define MAC_TYPE_BMAC 2
#define MAC_TYPE_UMAC 3
#define MAC_TYPE_XMAC 4
u8 phy_link_up; /* internal phy link indication */
u8 link_up;
u16 line_speed;
u16 duplex;
u16 flow_ctrl;
u16 ieee_fc;
/* The same definitions as the shmem parameter */
u32 link_status;
u32 eee_status;
u8 fault_detected;
u8 check_kr2_recovery_cnt;
#define CHECK_KR2_RECOVERY_CNT 5
u16 periodic_flags;
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
u32 aeu_int_mask;
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
};
/***********************************************************/
/* Functions */
/***********************************************************/
int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
/* Reset the link. Should be called when driver or interface goes down
Before calling phy firmware upgrade, the reset_ext_phy should be set
to 0 */
int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy);
int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
/* bnx2x_link_update should be called upon link interrupt */
int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
/* use the following phy functions to read/write from external_phy
In order to use it to read/write internal phy registers, use
DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
the register */
int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val);
int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
u16 len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
int bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed);
#define LED_MODE_OFF 0
#define LED_MODE_ON 1
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
/* Get the actual link status. In case it returns 0, link is up,
otherwise link is down*/
int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 is_serdes);
/* One-time initialization for external phy after power up */
int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u8 dev_addr,
u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
/* Probe the phys on board, and populate them in "params" */
int bnx2x_phy_probe(struct link_params *params);
/* Checks if fan failure detection is required on one of the phys on board */
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
/* Open / close the gate between the NIG and the BRB */
void bnx2x_set_rx_filter(struct link_params *params, u8 en);
/* DCBX structs */
/* Number of maximum COS per chip */
#define DCBX_E2E3_MAX_NUM_COS (2)
#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
#define DCBX_E3B0_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
DCBX_E3B0_MAX_NUM_COS_PORT1))
#define DCBX_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
DCBX_E2E3_MAX_NUM_COS))
/* PFC port configuration params */
struct bnx2x_nig_brb_pfc_port_params {
/* NIG */
u32 pause_enable;
u32 llfc_out_en;
u32 llfc_enable;
u32 pkt_priority_to_cos;
u8 num_of_rx_cos_priority_mask;
u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
};
/* ETS port configuration params */
struct bnx2x_ets_bw_params {
u8 bw;
};
struct bnx2x_ets_sp_params {
/**
* valid values are 0 - 5. 0 is highest strict priority.
* There can't be two COS's with the same pri.
*/
u8 pri;
};
enum bnx2x_cos_state {
bnx2x_cos_state_strict = 0,
bnx2x_cos_state_bw = 1,
};
struct bnx2x_ets_cos_params {
enum bnx2x_cos_state state ;
union {
struct bnx2x_ets_bw_params bw_params;
struct bnx2x_ets_sp_params sp_params;
} params;
};
struct bnx2x_ets_params {
u8 num_of_cos; /* Number of valid COS entries*/
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params);
/* Used to configure the ETS to disable */
int bnx2x_ets_disabled(struct link_params *params,
struct link_vars *vars);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw);
/* Used to configure the ETS to strict */
int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
struct bnx2x_ets_params *ets_params);
void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
u32 chip_id, u32 shmem_base, u32 shmem2_base,
u8 port);
void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
#endif /* BNX2X_LINK_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,168 @@
/* bnx2x_mfw_req.h: Broadcom Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNX2X_MFW_REQ_H
#define BNX2X_MFW_REQ_H
#define PORT_0 0
#define PORT_1 1
#define PORT_MAX 2
#define NVM_PATH_MAX 2
/* FCoE capabilities required from the driver */
struct fcoe_capabilities {
u32 capability1;
/* Maximum number of I/Os per connection */
#define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
#define FCOE_IOS_PER_CONNECTION_SHIFT 0
/* Maximum number of Logins per port */
#define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
#define FCOE_LOGINS_PER_PORT_SHIFT 16
u32 capability2;
/* Maximum number of exchanges */
#define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
#define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
/* Maximum NPIV WWN per port */
#define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
#define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
u32 capability3;
/* Maximum number of targets supported */
#define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
#define FCOE_TARGETS_SUPPORTED_SHIFT 0
/* Maximum number of outstanding commands across all connections */
#define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
#define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
u32 capability4;
#define FCOE_CAPABILITY4_STATEFUL 0x00000001
#define FCOE_CAPABILITY4_STATELESS 0x00000002
#define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
};
struct glob_ncsi_oem_data {
u32 driver_version;
u32 unused[3];
struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
};
/* current drv_info version */
#define DRV_INFO_CUR_VER 2
/* drv_info op codes supported */
enum drv_info_opcode {
ETH_STATS_OPCODE,
FCOE_STATS_OPCODE,
ISCSI_STATS_OPCODE
};
#define ETH_STAT_INFO_VERSION_LEN 12
/* Per PCI Function Ethernet Statistics required from the driver */
struct eth_stats_info {
/* Function's Driver Version. padded to 12 */
u8 version[ETH_STAT_INFO_VERSION_LEN];
/* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
u8 mac_local[8];
u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
u32 feature_flags; /* Feature_Flags. */
#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
#define FEATURE_ETH_LSO_MASK 0x02
#define FEATURE_ETH_BOOTMODE_MASK 0x1C
#define FEATURE_ETH_BOOTMODE_SHIFT 2
#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
#define FEATURE_ETH_TOE_MASK 0x20
u32 lso_max_size; /* LSO MaxOffloadSize. */
u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
/* Num Offloaded Connections TCP_IPv4. */
u32 ipv4_ofld_cnt;
/* Num Offloaded Connections TCP_IPv6. */
u32 ipv6_ofld_cnt;
u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
u32 txq_size; /* TX Descriptors Queue Size */
u32 rxq_size; /* RX Descriptors Queue Size */
/* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
u32 txq_avg_depth;
/* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
u32 rxq_avg_depth;
/* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
u32 iov_offload;
/* Number of NetQueue/VMQ Config'd. */
u32 netq_cnt;
u32 vf_cnt; /* Num VF assigned to this PF. */
};
/* Per PCI Function FCOE Statistics required from the driver */
struct fcoe_stats_info {
u8 version[12]; /* Function's Driver Version. */
u8 mac_local[8]; /* Locally Admin Addr. */
u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
/* QoS Priority (per 802.1p). 0-7255 */
u32 qos_priority;
u32 txq_size; /* FCoE TX Descriptors Queue Size. */
u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
/* FCoE TX Descriptor Queue Avg Depth. */
u32 txq_avg_depth;
/* FCoE RX Descriptors Queue Avg Depth. */
u32 rxq_avg_depth;
u32 rx_frames_lo; /* FCoE RX Frames received. */
u32 rx_frames_hi; /* FCoE RX Frames received. */
u32 rx_bytes_lo; /* FCoE RX Bytes received. */
u32 rx_bytes_hi; /* FCoE RX Bytes received. */
u32 tx_frames_lo; /* FCoE TX Frames sent. */
u32 tx_frames_hi; /* FCoE TX Frames sent. */
u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
};
/* Per PCI Function iSCSI Statistics required from the driver*/
struct iscsi_stats_info {
u8 version[12]; /* Function's Driver Version. */
u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
/* QoS Priority (per 802.1p). 0-7255 */
u32 qos_priority;
u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
u8 ww_port_name[64]; /* iSCSI World wide port name */
u8 boot_target_name[64];/* iSCSI Boot Target Name. */
u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
u32 boot_target_portal; /* iSCSI Boot Target Portal. */
u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
u32 max_frame_size; /* Max Frame Size. bytes */
u32 txq_size; /* PDU TX Descriptors Queue Size. */
u32 rxq_size; /* PDU RX Descriptors Queue Size. */
u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
u32 rx_pdus_lo; /* iSCSI PDUs received. */
u32 rx_pdus_hi; /* iSCSI PDUs received. */
u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
u32 tx_pdus_lo; /* iSCSI PDUs sent. */
u32 tx_pdus_hi; /* iSCSI PDUs sent. */
u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
* 9 nibbles, the position of each nibble
* represents the C-PCP value, the value
* of the nibble = S-PCP value.
*/
};
union drv_info_to_mcp {
struct eth_stats_info ether_stat;
struct fcoe_stats_info fcoe_stat;
struct iscsi_stats_info iscsi_stat;
};
#endif /* BNX2X_MFW_REQ_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,606 @@
/* bnx2x_sriov.h: Broadcom Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Shmulik Ravid
* Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef BNX2X_SRIOV_H
#define BNX2X_SRIOV_H
#include "bnx2x_vfpf.h"
#include "bnx2x.h"
enum sample_bulletin_result {
PFVF_BULLETIN_UNCHANGED,
PFVF_BULLETIN_UPDATED,
PFVF_BULLETIN_CRC_ERR
};
#ifdef CONFIG_BNX2X_SRIOV
extern struct workqueue_struct *bnx2x_iov_wq;
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
*/
#define BNX2X_VF_MAX_QUEUES 16
#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
struct bnx2x_sriov {
u32 first_vf_in_pf;
/* standard SRIOV capability fields, mostly for debugging */
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total; /* total VFs associated with the PF */
u16 initial; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
};
/* bars */
struct bnx2x_vf_bar {
u64 bar;
u32 size;
};
struct bnx2x_vf_bar_info {
struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
u8 nr_bars;
};
/* vf queue (used both for rx or tx) */
struct bnx2x_vf_queue {
struct eth_context *cxt;
/* MACs object */
struct bnx2x_vlan_mac_obj mac_obj;
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
u32 cid;
u16 index;
u16 sb_idx;
bool is_leading;
bool sp_initialized;
};
/* struct bnx2x_vf_queue_construct_params - prepare queue construction
* parameters: q-init, q-setup and SB index
*/
struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
struct bnx2x_vf_mac_vlan_filter {
int type;
#define BNX2X_VF_FILTER_MAC 1
#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
struct bnx2x_vf_mac_vlan_filters {
int count;
struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
struct bnx2x_virtf {
u16 cfg_flags;
#define VF_CFG_STATS 0x0001
#define VF_CFG_FW_FC 0x0002
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
#define VF_CFG_STATS_COALESCE 0x0040
#define VF_CFG_EXT_BULLETIN 0x0080
u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
* IFLA_VF_LINK_STATE_ENABLE
* IFLA_VF_LINK_STATE_DISABLE
*/
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map;
/* Allocated resources counters. Before the VF is acquired, the
* counters hold the following values:
*
* - xxq_count = 0 as the queues memory is not allocated yet.
*
* - sb_count = The number of status blocks configured for this VF in
* the IGU CAM. Initially read during probe.
*
* - xx_rules_count = The number of rules statically and equally
* allocated for each VF, during PF load.
*/
struct vf_pf_resc_request alloc_resc;
#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
/* Hide a single vlan filter credit for the hypervisor */
#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs;
#define LEADING_IDX 0
#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */
u8 abs_vfid;
u8 sp_cl_id;
u32 error; /* 0 means all's-well */
/* BDF */
unsigned int bus;
unsigned int devfn;
/* bars */
struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
/* set-mac ramrod state 1-pending, 0-done */
unsigned long filter_state;
/* leading rss client id ~~ the client id of the first rxq, must be
* set for each txq.
*/
int leading_rss;
/* MCAST object */
int mcast_list_len;
struct bnx2x_mcast_obj mcast_obj;
/* RSS configuration object */
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
#define for_each_vf(bp, var) \
for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
#define for_each_vfq(vf, var) \
for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
#define for_each_vf_sb(vf, var) \
for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
#define HW_VF_HANDLE(bp, abs_vfid) \
(u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
#define FW_PF_MAX_HANDLE 8
#define FW_VF_HANDLE(abs_vfid) \
(abs_vfid + FW_PF_MAX_HANDLE)
/* locking and unlocking the channel mutex */
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv);
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv);
/* VF mail box (aka vf-pf channel) */
/* a container for the bi-directional vf<-->pf messages.
* The actual response will be placed according to the offset parameter
* provided in the request
*/
#define MBX_MSG_ALIGN 8
#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
MBX_MSG_ALIGN))
struct bnx2x_vf_mbx_msg {
union vfpf_tlvs req;
union pfvf_tlvs resp;
};
struct bnx2x_vf_mbx {
struct bnx2x_vf_mbx_msg *msg;
dma_addr_t msg_mapping;
/* VF GPA address */
u32 vf_addr_lo;
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
};
struct bnx2x_vf_sp {
union {
struct eth_classify_rules_ramrod_data e2;
} mac_rdata;
union {
struct eth_classify_rules_ramrod_data e2;
} vlan_rdata;
union {
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
union {
struct eth_multicast_rules_ramrod_data e2;
} mcast_rdata;
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
} q_data;
union {
struct eth_rss_update_ramrod_data e2;
} rss_rdata;
};
struct hw_dma {
void *addr;
dma_addr_t mapping;
size_t size;
};
struct bnx2x_vfdb {
#define BP_VFDB(bp) ((bp)->vfdb)
/* vf array */
struct bnx2x_virtf *vfs;
#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
&((bp)->vfdb->vfs[idx]) : NULL)
#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
/* queue array - for all vfs */
struct bnx2x_vf_queue *vfqs;
/* vf HW contexts */
struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i])
/* SR-IOV information */
struct bnx2x_sriov sriov;
struct hw_dma mbx_dma;
#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid]))
struct hw_dma bulletin_dma;
#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
#define BP_VF_BULLETIN(bp, vf) \
(((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ (vf))
struct hw_dma sp_dma;
#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
(vf)->index * sizeof(struct bnx2x_vf_sp) + \
offsetof(struct bnx2x_vf_sp, field))
#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
(vf)->index * sizeof(struct bnx2x_vf_sp) + \
offsetof(struct bnx2x_vf_sp, field))
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS];
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
/* sp_rtnl synchronization */
struct mutex event_mutex;
u64 event_occur;
/* bulletin board update synchronization */
struct mutex bulletin_mutex;
};
/* queue access */
static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
{
return &(vf->vfqs[index]);
}
/* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{
return vf->igu_base_id + sb_idx;
}
static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
{
return vf_igu_sb(vf, sb_idx);
}
static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
return vf->igu_base_id + q->index;
}
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
return vf->leading_rss;
else
return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
return vfq_cl_id(vf, q);
}
/* global iov routines */
int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
void bnx2x_iov_remove_one(struct bnx2x *bp);
void bnx2x_iov_free_mem(struct bnx2x *bp);
int bnx2x_iov_alloc_mem(struct bnx2x *bp);
int bnx2x_iov_nic_init(struct bnx2x *bp);
int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
/* global vf mailbox routines */
void bnx2x_vf_mbx(struct bnx2x *bp);
void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
/* CORE VF API */
typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
/* acquire */
int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct vf_pf_resc_request *resc);
/* init */
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
u16 q_idx, u16 sb_idx);
void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
u16 q_idx, u16 sb_idx);
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mac_vlan_filters *filters,
int qid, bool drv_only);
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_queue_construct_params *qctor);
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, unsigned long accept_flags);
int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_config_rss_params *rss);
int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct vfpf_tpa_tlv *tlv,
struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
/* FLR routines */
/* VF FLR helpers */
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
bool bnx2x_tlv_supported(u16 tlvtype);
u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin);
int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long);
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
/* VF side vfpf channel functions */
int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
struct bnx2x_fastpath *fp)
{
return PXP_VF_ADDR_USDM_QUEUES_START +
bp->acquire_resp.resc.hw_qid[fp->index] *
sizeof(struct ustorm_queue_zone_data);
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_timer_sriov(struct bnx2x *bp);
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
}
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
void bnx2x_iov_task(struct work_struct *work);
void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
void bnx2x_iov_link_update(struct bnx2x *bp);
int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len) {}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
struct bnx2x_fastpath *fp) {return 0; }
static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
{
return PFVF_BULLETIN_UNCHANGED;
}
static inline void bnx2x_timer_sriov(struct bnx2x *bp) {}
static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
{
return NULL;
}
static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
static inline void bnx2x_iov_task(struct work_struct *work) {}
static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
static inline void bnx2x_iov_link_update(struct bnx2x *bp) {}
static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; }
static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf,
int link_state) {return 0; }
struct pf_vf_bulletin_content;
static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long) {}
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,555 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
* Slowpath and fastpath rework by Vladislav Zolotarov
* Statistics and Link management by Yitchak Gertner
*
*/
#ifndef BNX2X_STATS_H
#define BNX2X_STATS_H
#include <linux/types.h>
struct nig_stats {
u32 brb_discard;
u32 brb_packet;
u32 brb_truncate;
u32 flow_ctrl_discard;
u32 flow_ctrl_octets;
u32 flow_ctrl_packet;
u32 mng_discard;
u32 mng_octet_inp;
u32 mng_octet_out;
u32 mng_packet_inp;
u32 mng_packet_out;
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
u32 egress_mac_pkt0_lo;
u32 egress_mac_pkt0_hi;
u32 egress_mac_pkt1_lo;
u32 egress_mac_pkt1_hi;
};
enum bnx2x_stats_event {
STATS_EVENT_PMF = 0,
STATS_EVENT_LINK_UP,
STATS_EVENT_UPDATE,
STATS_EVENT_STOP,
STATS_EVENT_MAX
};
enum bnx2x_stats_state {
STATS_STATE_DISABLED = 0,
STATS_STATE_ENABLED,
STATS_STATE_MAX
};
struct bnx2x_eth_stats {
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 error_bytes_received_hi;
u32 error_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
u32 tx_stat_ifhcoutbadoctets_lo;
u32 rx_stat_dot3statsfcserrors_hi;
u32 rx_stat_dot3statsfcserrors_lo;
u32 rx_stat_dot3statsalignmenterrors_hi;
u32 rx_stat_dot3statsalignmenterrors_lo;
u32 rx_stat_dot3statscarriersenseerrors_hi;
u32 rx_stat_dot3statscarriersenseerrors_lo;
u32 rx_stat_falsecarriererrors_hi;
u32 rx_stat_falsecarriererrors_lo;
u32 rx_stat_etherstatsundersizepkts_hi;
u32 rx_stat_etherstatsundersizepkts_lo;
u32 rx_stat_dot3statsframestoolong_hi;
u32 rx_stat_dot3statsframestoolong_lo;
u32 rx_stat_etherstatsfragments_hi;
u32 rx_stat_etherstatsfragments_lo;
u32 rx_stat_etherstatsjabbers_hi;
u32 rx_stat_etherstatsjabbers_lo;
u32 rx_stat_maccontrolframesreceived_hi;
u32 rx_stat_maccontrolframesreceived_lo;
u32 rx_stat_bmac_xpf_hi;
u32 rx_stat_bmac_xpf_lo;
u32 rx_stat_bmac_xcf_hi;
u32 rx_stat_bmac_xcf_lo;
u32 rx_stat_xoffstateentered_hi;
u32 rx_stat_xoffstateentered_lo;
u32 rx_stat_xonpauseframesreceived_hi;
u32 rx_stat_xonpauseframesreceived_lo;
u32 rx_stat_xoffpauseframesreceived_hi;
u32 rx_stat_xoffpauseframesreceived_lo;
u32 tx_stat_outxonsent_hi;
u32 tx_stat_outxonsent_lo;
u32 tx_stat_outxoffsent_hi;
u32 tx_stat_outxoffsent_lo;
u32 tx_stat_flowcontroldone_hi;
u32 tx_stat_flowcontroldone_lo;
u32 tx_stat_etherstatscollisions_hi;
u32 tx_stat_etherstatscollisions_lo;
u32 tx_stat_dot3statssinglecollisionframes_hi;
u32 tx_stat_dot3statssinglecollisionframes_lo;
u32 tx_stat_dot3statsmultiplecollisionframes_hi;
u32 tx_stat_dot3statsmultiplecollisionframes_lo;
u32 tx_stat_dot3statsdeferredtransmissions_hi;
u32 tx_stat_dot3statsdeferredtransmissions_lo;
u32 tx_stat_dot3statsexcessivecollisions_hi;
u32 tx_stat_dot3statsexcessivecollisions_lo;
u32 tx_stat_dot3statslatecollisions_hi;
u32 tx_stat_dot3statslatecollisions_lo;
u32 tx_stat_etherstatspkts64octets_hi;
u32 tx_stat_etherstatspkts64octets_lo;
u32 tx_stat_etherstatspkts65octetsto127octets_hi;
u32 tx_stat_etherstatspkts65octetsto127octets_lo;
u32 tx_stat_etherstatspkts128octetsto255octets_hi;
u32 tx_stat_etherstatspkts128octetsto255octets_lo;
u32 tx_stat_etherstatspkts256octetsto511octets_hi;
u32 tx_stat_etherstatspkts256octetsto511octets_lo;
u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
u32 tx_stat_etherstatspktsover1522octets_hi;
u32 tx_stat_etherstatspktsover1522octets_lo;
u32 tx_stat_bmac_2047_hi;
u32 tx_stat_bmac_2047_lo;
u32 tx_stat_bmac_4095_hi;
u32 tx_stat_bmac_4095_lo;
u32 tx_stat_bmac_9216_hi;
u32 tx_stat_bmac_9216_lo;
u32 tx_stat_bmac_16383_hi;
u32 tx_stat_bmac_16383_lo;
u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
u32 pause_frames_sent_hi;
u32 pause_frames_sent_lo;
u32 etherstatspkts1024octetsto1522octets_hi;
u32 etherstatspkts1024octetsto1522octets_lo;
u32 etherstatspktsover1522octets_hi;
u32 etherstatspktsover1522octets_lo;
u32 brb_drop_hi;
u32 brb_drop_lo;
u32 brb_truncate_hi;
u32 brb_truncate_lo;
u32 mac_filter_discard;
u32 mf_tag_discard;
u32 brb_truncate_discard;
u32 mac_discard;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
u32 nig_timer_max;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
/* PFC */
u32 pfc_frames_received_hi;
u32 pfc_frames_received_lo;
u32 pfc_frames_sent_hi;
u32 pfc_frames_sent_lo;
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
};
struct bnx2x_eth_q_stats {
u32 total_unicast_bytes_received_hi;
u32 total_unicast_bytes_received_lo;
u32 total_broadcast_bytes_received_hi;
u32 total_broadcast_bytes_received_lo;
u32 total_multicast_bytes_received_hi;
u32 total_multicast_bytes_received_lo;
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_unicast_bytes_transmitted_hi;
u32 total_unicast_bytes_transmitted_lo;
u32 total_broadcast_bytes_transmitted_hi;
u32 total_broadcast_bytes_transmitted_lo;
u32 total_multicast_bytes_transmitted_hi;
u32 total_multicast_bytes_transmitted_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
u32 total_packets_received_checksum_discarded_hi;
u32 total_packets_received_checksum_discarded_lo;
u32 total_packets_received_ttl0_discarded_hi;
u32 total_packets_received_ttl0_discarded_lo;
u32 total_transmitted_dropped_packets_error_hi;
u32 total_transmitted_dropped_packets_error_lo;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
u32 driver_filtered_tx_pkt;
};
struct bnx2x_eth_stats_old {
u32 rx_stat_dot3statsframestoolong_hi;
u32 rx_stat_dot3statsframestoolong_lo;
};
struct bnx2x_eth_q_stats_old {
/* Fields to perserve over fw reset*/
u32 total_unicast_bytes_received_hi;
u32 total_unicast_bytes_received_lo;
u32 total_broadcast_bytes_received_hi;
u32 total_broadcast_bytes_received_lo;
u32 total_multicast_bytes_received_hi;
u32 total_multicast_bytes_received_lo;
u32 total_unicast_bytes_transmitted_hi;
u32 total_unicast_bytes_transmitted_lo;
u32 total_broadcast_bytes_transmitted_hi;
u32 total_broadcast_bytes_transmitted_lo;
u32 total_multicast_bytes_transmitted_hi;
u32 total_multicast_bytes_transmitted_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
/* Fields to perserve last of */
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 total_tpa_bytes_hi_old;
u32 total_tpa_bytes_lo_old;
u32 driver_xoff_old;
u32 rx_err_discard_pkt_old;
u32 rx_skb_alloc_failed_old;
u32 hw_csum_err_old;
u32 driver_filtered_tx_pkt_old;
};
struct bnx2x_net_stats_old {
u32 rx_dropped;
};
struct bnx2x_fw_port_stats_old {
u32 mac_filter_discard;
u32 mf_tag_discard;
u32 brb_truncate_discard;
u32 mac_discard;
};
/****************************************************************************
* Macros
****************************************************************************/
/* sum[hi:lo] += add[hi:lo] */
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
do { \
s_lo += a_lo; \
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0)
#define LE32_0 ((__force __le32) 0)
#define LE16_0 ((__force __le16) 0)
/* The _force is for cases where high value is 0 */
#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le32_to_cpu(a_hi_le), \
s_lo, le32_to_cpu(a_lo_le))
#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le16_to_cpu(a_hi_le), \
s_lo, le16_to_cpu(a_lo_le))
/* difference = minuend - subtrahend */
#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
do { \
if (m_lo < s_lo) { \
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
/* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
/* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
} else { \
/* m_lo >= s_lo */ \
if (m_hi < s_hi) { \
d_hi = 0; \
d_lo = 0; \
} else { \
/* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
} \
} while (0)
#define UPDATE_STAT64(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
pstats->mac_stx[0].t##_hi = new->s##_hi; \
pstats->mac_stx[0].t##_lo = new->s##_lo; \
ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
pstats->mac_stx[1].t##_lo, diff.lo); \
} while (0)
#define UPDATE_STAT64_NIG(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
diff.lo, new->s##_lo, old->s##_lo); \
ADD_64(estats->t##_hi, diff.hi, \
estats->t##_lo, diff.lo); \
} while (0)
/* sum[hi:lo] += add */
#define ADD_EXTEND_64(s_hi, s_lo, a) \
do { \
s_lo += a; \
s_hi += (s_lo < a) ? 1 : 0; \
} while (0)
#define ADD_STAT64(diff, t) \
do { \
ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
pstats->mac_stx[1].t##_lo, new->diff##_lo); \
} while (0)
#define UPDATE_EXTEND_STAT(s) \
do { \
ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
pstats->mac_stx[1].s##_lo, \
new->s); \
} while (0)
#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
diff = le##size##_to_cpu(tclient->s) - \
le##size##_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
old_uclient->s = uclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_E_USTAT(s, t) \
do { \
UPDATE_EXTEND_USTAT(s, t); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
old_xclient->s = xclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_QSTAT(s, t) \
do { \
qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+ ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
} while (0)
#define UPDATE_QSTAT_OLD(f) \
do { \
qstats_old->f = qstats->f; \
} while (0)
#define UPDATE_ESTAT_QSTAT_64(s) \
do { \
ADD_64(estats->s##_hi, qstats->s##_hi, \
estats->s##_lo, qstats->s##_lo); \
SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
estats->s##_lo, qstats_old->s##_lo_old); \
qstats_old->s##_hi_old = qstats->s##_hi; \
qstats_old->s##_lo_old = qstats->s##_lo; \
} while (0)
#define UPDATE_ESTAT_QSTAT(s) \
do { \
estats->s += qstats->s; \
estats->s -= qstats_old->s##_old; \
qstats_old->s##_old = qstats->s; \
} while (0)
#define UPDATE_FSTAT_QSTAT(s) \
do { \
ADD_64(fstats->s##_hi, qstats->s##_hi, \
fstats->s##_lo, qstats->s##_lo); \
SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
fstats->s##_lo, qstats_old->s##_lo); \
estats->s##_hi = fstats->s##_hi; \
estats->s##_lo = fstats->s##_lo; \
qstats_old->s##_hi = qstats->s##_hi; \
qstats_old->s##_lo = qstats->s##_lo; \
} while (0)
#define UPDATE_FW_STAT(s) \
do { \
estats->s = le32_to_cpu(tport->s) + fwstats->s; \
} while (0)
#define UPDATE_FW_STAT_OLD(f) \
do { \
fwstats->f = estats->f; \
} while (0)
#define UPDATE_ESTAT(s, t) \
do { \
SUB_64(estats->s##_hi, estats_old->t##_hi, \
estats->s##_lo, estats_old->t##_lo); \
ADD_64(estats->s##_hi, estats->t##_hi, \
estats->s##_lo, estats->t##_lo); \
estats_old->t##_hi = estats->t##_hi; \
estats_old->t##_lo = estats->t##_lo; \
} while (0)
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
} while (0)
/* minuend[hi:lo] -= subtrahend */
#define SUB_EXTEND_64(m_hi, m_lo, s) \
do { \
SUB_64(m_hi, 0, m_lo, s); \
} while (0)
#define SUB_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
/* forward */
struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
*
* @bp: driver handle
*/
void bnx2x_save_statistics(struct bnx2x *bp);
void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
u32 stats_type);
#endif /* BNX2X_STATS_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,455 @@
/* bnx2x_vfpf.h: Broadcom Everest network driver.
*
* Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
#ifdef CONFIG_BNX2X_SRIOV
/* Common definitions for all HVs */
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters; /* No limit so superfluous */
};
struct hw_sb_info {
u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
u8 sb_qid; /* used to update DHC for sb */
};
/* HW VF-PF channel definitions
* A.K.A VF-PF mailbox
*/
#define TLV_BUFFER_SIZE 1024
#define PF_VF_BULLETIN_SIZE 512
#define VFPF_QUEUE_FLG_TPA 0x0001
#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
#define VFPF_QUEUE_FLG_STATS 0x0010
#define VFPF_QUEUE_FLG_OV 0x0020
#define VFPF_QUEUE_FLG_VLAN 0x0040
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_CONTENT_LEGACY_SIZE (32)
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
#define BULLETIN_CRC_SEED 0
enum {
PFVF_STATUS_WAITING = 0,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 resp_msg_offset;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_general_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
/* Acquire */
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
/* the following fields are for debug purposes */
u8 vf_id; /* ME register value */
u8 vf_os; /* e.g. Linux, W2K8 */
#define VF_OS_SUBVERSION_MASK (0x1f)
#define VF_OS_MASK (0xe0)
#define VF_OS_SHIFT (5)
#define VF_OS_UNDEFINED (0 << VF_OS_SHIFT)
#define VF_OS_WINDOWS (1 << VF_OS_SHIFT)
u8 padding;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
} vfdev_info;
struct vf_pf_resc_request resc_request;
aligned_u64 bulletin_addr;
};
/* simple operation request on queue */
struct vfpf_q_op_tlv {
struct vfpf_first_tlv first_tlv;
u8 vf_qid;
u8 padding[3];
};
/* receive side scaling tlv */
struct vfpf_rss_tlv {
struct vfpf_first_tlv first_tlv;
u32 rss_flags;
#define VFPF_RSS_MODE_DISABLED (1 << 0)
#define VFPF_RSS_MODE_REGULAR (1 << 1)
#define VFPF_RSS_SET_SRCH (1 << 2)
#define VFPF_RSS_IPV4 (1 << 3)
#define VFPF_RSS_IPV4_TCP (1 << 4)
#define VFPF_RSS_IPV4_UDP (1 << 5)
#define VFPF_RSS_IPV6 (1 << 6)
#define VFPF_RSS_IPV6_TCP (1 << 7)
#define VFPF_RSS_IPV6_UDP (1 << 8)
u8 rss_result_mask;
u8 ind_table_size;
u8 rss_key_size;
u8 padding;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
};
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 pf_cap;
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
u8 padding;
} pfdev_info;
struct pf_vf_resc {
/* in case of status NO_RESOURCE in message hdr, pf will fill
* this struct with suggested amount of resources for next
* acquire request
*/
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 permanent_mac_addr[ETH_ALEN];
u8 current_mac_addr[ETH_ALEN];
u8 padding[2];
} resc;
};
struct vfpf_port_phys_id_resp_tlv {
struct channel_tlv tl;
u8 id[ETH_ALEN];
u8 padding[2];
};
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
*/
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
u16 stats_stride;
u32 flags;
u32 padding[2];
};
/* Setup Queue */
struct vfpf_setup_q_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_rxq_params {
/* physical addresses */
aligned_u64 rcq_addr;
aligned_u64 rcq_np_addr;
aligned_u64 rxq_addr;
aligned_u64 sge_addr;
/* sb + hc info */
u8 vf_sb; /* index in hw_sbs[] */
u8 sb_index; /* Index in the SB */
u16 hc_rate; /* desired interrupts per sec. */
/* valid iff VFPF_QUEUE_FLG_HC */
/* rx buffer info */
u16 mtu;
u16 buf_sz;
u16 flags; /* VFPF_QUEUE_FLG_X flags */
u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
/* valid iff VFPF_QUEUE_FLG_TPA */
u16 sge_buf_sz;
u16 tpa_agg_sz;
u8 max_sge_pkt;
u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
* all the flags are turned off
*/
u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
u8 padding;
} rxq;
struct vf_pf_txq_params {
/* physical addresses */
aligned_u64 txq_addr;
/* sb + hc info */
u8 vf_sb; /* index in hw_sbs[] */
u8 sb_index; /* Index in the SB */
u16 hc_rate; /* desired interrupts per sec. */
/* valid iff VFPF_QUEUE_FLG_HC */
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
u8 traffic_type; /* see in setup_context() */
u8 padding;
} txq;
u8 vf_qid; /* index in hw_qid[] */
u8 param_valid;
#define VFPF_RXQ_VALID 0x01
#define VFPF_TXQ_VALID 0x02
u8 padding[2];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
};
/* configure queue filters */
struct vfpf_set_q_filters_tlv {
struct vfpf_first_tlv first_tlv;
u32 flags;
#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
u8 vf_qid; /* index in hw_qid[] */
u8 n_mac_vlan_filters;
u8 n_multicast;
u8 padding;
#define PFVF_MAX_MAC_FILTERS 16
#define PFVF_MAX_VLAN_FILTERS 16
#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
PFVF_MAX_VLAN_FILTERS)
struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
#define PFVF_MAX_MULTICAST_PER_VF 32
u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
u32 rx_mask; /* see mask constants at the top of the file */
};
struct vfpf_tpa_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_tpa_client_info {
aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u16 sge_buff_size;
u16 max_agg_size;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
} tpa_client_info;
};
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
u16 vf_id; /* for debug */
u8 padding[2];
};
/* release the VF's acquired resources */
struct vfpf_release_tlv {
struct vfpf_first_tlv first_tlv;
u16 vf_id;
u8 padding[2];
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_init_tlv init;
struct vfpf_close_tlv close;
struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
/* This is a structure which is allocated in the VF, which the PF may update
* when it deems it necessary to do so. The bulletin board is sampled
* periodically by the VF. A copy per VF is maintained in the PF (to prevent
* loss of data upon multiple updates (or the need for read modify write)).
*/
struct pf_vf_bulletin_size {
u8 size[PF_VF_BULLETIN_SIZE];
};
struct pf_vf_bulletin_content {
u32 crc; /* crc of structure to ensure is not in
* mid-update
*/
u16 version;
u16 length;
aligned_u64 valid_bitmap; /* bitmap indicating which fields
* hold valid values
*/
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
#define VLAN_VALID 1 /* when set, the vf should not access
* the vfpf channel
*/
#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not
* to attempt to send messages on the
* channel after this bit is set
*/
#define LINK_VALID 3 /* alert the VF thet a new link status
* update is available for it
*/
u8 mac[ETH_ALEN];
u8 mac_padding[2];
u16 vlan;
u8 vlan_padding[6];
u16 link_speed; /* Effective line speed */
u8 link_speed_padding[6];
u32 link_flags; /* VFPF_LINK_REPORT_XXX flags */
#define VFPF_LINK_REPORT_LINK_DOWN (1 << 0)
#define VFPF_LINK_REPORT_FULL_DUPLEX (1 << 1)
#define VFPF_LINK_REPORT_RX_FC_ON (1 << 2)
#define VFPF_LINK_REPORT_TX_FC_ON (1 << 3)
u8 link_flags_padding[4];
};
union pf_vf_bulletin {
struct pf_vf_bulletin_content content;
struct pf_vf_bulletin_size size;
};
#define MAX_TLVS_IN_LIST 50
enum channel_tlvs {
CHANNEL_TLV_NONE,
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
CHANNEL_TLV_ACTIVATE_Q,
CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* VF_PF_IF_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,427 @@
/* cnic.h: QLogic CNIC core network driver.
*
* Copyright (c) 2006-2014 Broadcom Corporation
* Copyright (c) 2014 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_H
#define CNIC_H
#define HC_INDEX_ISCSI_EQ_CONS 6
#define HC_INDEX_FCOE_EQ_CONS 3
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
#define KWQ_CID 24
#define KCQ_CID 25
/*
* krnlq_context definition
*/
#define L5_KRNLQ_FLAGS 0x00000000
#define L5_KRNLQ_SIZE 0x00000000
#define L5_KRNLQ_TYPE 0x00000000
#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
#define KRNLQ_TYPE_TYPE (0xf<<28)
#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
#define L5_KRNLQ_HOST_QIDX 0x00000004
#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
#define L5_KRNLQ_NX_PG_QIDX 0x00000018
#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
#define L5_KRNLQ_QIDX_INCR 0x0000001c
#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
#define CNIC_LOCAL_PORT_MAX 61024
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe))
#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
(MAX_KCQE_CNT - 1)) ? \
(x) + 2 : (x) + 1
#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
#define DEF_IPID_START 0x8000
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
#define DEF_KA_MAX_PROBE_COUNT 3
#define DEF_TOS 0
#define DEF_TTL 0xfe
#define DEF_SND_SEQ_SCALE 0
#define DEF_RCV_BUF 0xffff
#define DEF_SND_BUF 0xffff
#define DEF_SEED 0
#define DEF_MAX_RT_TIME 500
#define DEF_MAX_DA_COUNT 2
#define DEF_SWS_TIMER 1000
#define DEF_MAX_CWND 0xffff
struct cnic_ctx {
u32 cid;
void *ctx;
dma_addr_t mapping;
};
#define BNX2_MAX_CID 0x2000
struct cnic_dma {
int num_pages;
void **pg_arr;
dma_addr_t *pg_map_arr;
int pgtbl_size;
u32 *pgtbl;
dma_addr_t pgtbl_map;
};
struct cnic_id_tbl {
spinlock_t lock;
u32 start;
u32 max;
u32 next;
unsigned long *table;
};
#define CNIC_KWQ16_DATA_SIZE 128
struct kwqe_16_data {
u8 data[CNIC_KWQ16_DATA_SIZE];
};
struct cnic_iscsi {
struct cnic_dma task_array_info;
struct cnic_dma r2tq_info;
struct cnic_dma hq_info;
};
struct cnic_context {
u32 cid;
struct kwqe_16_data *kwqe_data;
dma_addr_t kwqe_data_mapping;
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
unsigned long ctx_flags;
#define CTX_FL_OFFLD_START 0
#define CTX_FL_DELETE_WAIT 1
#define CTX_FL_CID_ERROR 2
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
} proto;
};
struct kcq_info {
struct cnic_dma dma;
struct kcqe **kcq;
u16 *hw_prod_idx_ptr;
u16 sw_prod_idx;
u16 *status_idx_ptr;
u32 io_addr;
u16 (*next_idx)(u16);
u16 (*hw_idx)(u16);
};
#define UIO_USE_TX_DOORBELL 0x017855DB
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
int l2_ring_size;
void *l2_ring;
dma_addr_t l2_ring_map;
int l2_buf_size;
void *l2_buf;
dma_addr_t l2_buf_map;
struct cnic_dev *dev;
struct pci_dev *pdev;
struct list_head list;
};
struct cnic_local {
spinlock_t cnic_ulp_lock;
void *ulp_handle[MAX_CNIC_ULP_TYPE];
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
#define CNIC_LCL_FL_L2_WAIT 0x1
#define CNIC_LCL_FL_RINGS_INITED 0x2
#define CNIC_LCL_FL_STOP_ISCSI 0x4
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
struct cnic_uio_dev *udev;
int l2_rx_ring_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
u16 *tx_cons_ptr;
u16 rx_cons;
u16 tx_cons;
struct cnic_dma kwq_info;
struct kwqe **kwq;
struct cnic_dma kwq_16_data_info;
u16 max_kwq_idx;
u16 kwq_prod_idx;
u32 kwq_io_addr;
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
struct kcq_info kcq1;
struct kcq_info kcq2;
union {
void *gen;
struct status_block_msix *bnx2;
struct host_hc_status_block_e1x *bnx2x_e1x;
/* index values - which counter to update */
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
struct host_sp_status_block *bnx2x_def_status_blk;
u32 status_blk_num;
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
atomic_t iscsi_conn;
u32 iscsi_start_cid;
u32 fcoe_init_cid;
u32 fcoe_start_cid;
struct cnic_id_tbl fcoe_cid_tbl;
u32 max_cid_space;
/* per connection parameters */
int num_iscsi_tasks;
int num_ccells;
int task_array_size;
int r2tq_size;
int hq_size;
int num_cqs;
struct delayed_work delete_task;
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
unsigned long ctx_align;
int cids_per_blk;
u32 chip_id;
int func;
u32 shmem_base;
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
void (*setup_pgtbl)(struct cnic_dev *,
struct cnic_dma *);
int (*alloc_resc)(struct cnic_dev *);
void (*free_resc)(struct cnic_dev *);
int (*start_cm)(struct cnic_dev *);
void (*stop_cm)(struct cnic_dev *);
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*arm_int)(struct cnic_dev *, u32 index);
void (*close_conn)(struct cnic_sock *, u32 opcode);
};
struct bnx2x_bd_chain_next {
u32 addr_lo;
u32 addr_hi;
u8 reserved[8];
};
#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
#define CDU_REGION_NUMBER_XCM_AG 2
#define CDU_REGION_NUMBER_UCM_AG 4
#define CDU_VALID_DATA(_cid, _region, _type) \
(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
#define CDU_CRC8(_cid, _region, _type) \
(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
#define BNX2X_CONTEXT_MEM_SIZE 1024
#define BNX2X_FCOE_CID 16
#define BNX2X_ISCSI_START_CID 18
#define BNX2X_ISCSI_NUM_CONNECTIONS 128
#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
#define BNX2X_ISCSI_R2TQE_SIZE 8
#define BNX2X_ISCSI_HQ_BD_SIZE 64
#define BNX2X_ISCSI_GLB_BUF_SIZE 64
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
#define BNX2X_FCOE_NUM_CONNECTIONS 1024
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(struct eth_rx_bd))
#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
#define BNX2X_RCQ_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
(BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
((x) + 2) : ((x) + 1)
#define BNX2X_DEF_SB_ID HC_SP_SB_ID
#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
#define BNX2X_SHMEM_ADDR(base, field) (base + \
offsetof(struct shmem_region, field))
#define BNX2X_SHMEM2_ADDR(base, field) (base + \
offsetof(struct shmem2_region, field))
#define BNX2X_SHMEM2_HAS(base, field) \
((base) && \
(CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
offsetof(struct shmem2_region, field)))
#define BNX2X_MF_CFG_ADDR(base, field) \
((base) + offsetof(struct mf_cfg, field))
#ifndef ETH_MAX_RX_CLIENTS_E2
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
#define CNIC_FUNC(cp) ((cp)->func)
#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_VN(bp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(bp, cli) \
(BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
(CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
MAX_STAT_COUNTER_ID_E1))
#endif
#define CNIC_SUPPORTS_FCOE(cp) \
(BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
#define CNIC_RAMROD_TMO (HZ / 4)
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,371 @@
/* cnic_if.h: QLogic CNIC core network driver.
*
* Copyright (c) 2006-2014 Broadcom Corporation
* Copyright (c) 2014 QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_IF_H
#define CNIC_IF_H
#include "bnx2x/bnx2x_mfw_req.h"
#define CNIC_MODULE_VERSION "2.5.20"
#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
#define CNIC_ULP_FCOE 2
#define CNIC_ULP_L4 3
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
/* Use CPU native page size up to 16K for cnic ring sizes. */
#if (PAGE_SHIFT > 14)
#define CNIC_PAGE_BITS 14
#else
#define CNIC_PAGE_BITS PAGE_SHIFT
#endif
#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
struct kwqe {
u32 kwqe_op_flag;
#define KWQE_QID_SHIFT 8
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
#define KWQE_LAYER_MASK 0x70000000
#define KWQE_LAYER_SHIFT 28
#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
u32 kwqe_info4;
u32 kwqe_info5;
u32 kwqe_info6;
};
struct kwqe_16 {
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
};
struct kcqe {
u32 kcqe_info0;
u32 kcqe_info1;
u32 kcqe_info2;
u32 kcqe_info3;
u32 kcqe_info4;
u32 kcqe_info5;
u32 kcqe_info6;
u32 kcqe_op_flag;
#define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
#define KCQE_FLAGS_LAYER_MASK (0x7<<28)
#define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
#define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
#define KCQE_OPCODE(op) \
(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
};
#define MAX_CNIC_CTL_DATA 64
#define MAX_DRV_CTL_DATA 64
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
#define CNIC_CTL_FCOE_STATS_GET_CMD 5
#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
#define DRV_CTL_START_L2_CMD 0x106
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
#define DRV_CTL_ULP_REGISTER_CMD 0x10e
#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
struct cnic_ctl_completion {
u32 cid;
u8 opcode;
u8 error;
};
struct cnic_ctl_info {
int cmd;
union {
struct cnic_ctl_completion comp;
char bytes[MAX_CNIC_CTL_DATA];
} data;
};
struct drv_ctl_spq_credit {
u32 credit_count;
};
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
u32 data;
dma_addr_t dma_addr;
};
struct drv_ctl_l2_ring {
u32 client_id;
u32 cid;
};
struct drv_ctl_register_data {
int ulp_type;
struct fcoe_capabilities fcoe_features;
};
struct drv_ctl_info {
int cmd;
union {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
int (*cnic_handler)(void *, void *);
int (*cnic_ctl)(void *, struct cnic_ctl_info *);
};
#define MAX_CNIC_VEC 8
struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
struct cnic_eth_dev {
struct module *drv_owner;
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *io_base2;
const void *iro_arr;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
int ctx_blk_size;
u32 starting_cid;
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 fcoe_init_cid;
u32 max_fcoe_exchanges;
u32 fcoe_wwn_port_name_hi;
u32 fcoe_wwn_port_name_lo;
u32 fcoe_wwn_node_name_hi;
u32 fcoe_wwn_node_name_lo;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
int (*drv_register_cnic)(struct net_device *,
struct cnic_ops *, void *);
int (*drv_unregister_cnic)(struct net_device *);
int (*drv_submit_kwqes_32)(struct net_device *,
struct kwqe *[], u32);
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
union drv_info_to_mcp *addr_drv_info_to_mcp;
};
struct cnic_sockaddr {
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} local;
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} remote;
};
struct cnic_sock {
struct cnic_dev *dev;
void *context;
u32 src_ip[4];
u32 dst_ip[4];
u16 src_port;
u16 dst_port;
u16 vlan_id;
unsigned char old_ha[ETH_ALEN];
unsigned char ha[ETH_ALEN];
u32 mtu;
u32 cid;
u32 l5_cid;
u32 pg_cid;
int ulp_type;
u32 ka_timeout;
u32 ka_interval;
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
u32 rcv_buf;
u32 snd_buf;
u32 seed;
unsigned long tcp_flags;
#define SK_TCP_NO_DELAY_ACK 0x1
#define SK_TCP_KEEP_ALIVE 0x2
#define SK_TCP_NAGLE 0x4
#define SK_TCP_TIMESTAMP 0x8
#define SK_TCP_SACK 0x10
#define SK_TCP_SEG_SCALING 0x20
unsigned long flags;
#define SK_F_INUSE 0
#define SK_F_OFFLD_COMPLETE 1
#define SK_F_OFFLD_SCHED 2
#define SK_F_PG_OFFLD_COMPLETE 3
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
#define SK_F_HW_ERR 8
atomic_t ref_count;
u32 state;
struct kwqe kwqe1;
struct kwqe kwqe2;
struct kwqe kwqe3;
};
struct cnic_dev {
struct net_device *netdev;
struct pci_dev *pcidev;
void __iomem *regview;
struct list_head list;
int (*register_device)(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx);
int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes);
int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
u32 num_wqes);
int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
void *);
int (*cm_destroy)(struct cnic_sock *);
int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
int (*cm_abort)(struct cnic_sock *);
int (*cm_close)(struct cnic_sock *);
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
u8 mac_addr[ETH_ALEN];
int max_iscsi_conn;
int max_fcoe_conn;
int max_rdma_conn;
int max_fcoe_exchanges;
union drv_info_to_mcp *stats_addr;
struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
#define CNIC_RD(dev, off) readl(dev->regview + off)
#define CNIC_RD16(dev, off) readw(dev->regview + off)
struct cnic_ulp_ops {
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
void (*cnic_init)(struct cnic_dev *dev);
void (*cnic_exit)(struct cnic_dev *dev);
void (*cnic_start)(void *ulp_ctx);
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
int (*cnic_get_stats)(void *ulp_ctx);
struct module *owner;
atomic_t ref_count;
};
int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
int cnic_unregister_driver(int ulp_type);
#endif

View file

@ -0,0 +1,2 @@
obj-$(CONFIG_BCMGENET) += genet.o
genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,635 @@
/*
* Copyright (c) 2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __BCMGENET_H__
#define __BCMGENET_H__
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/phy.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
/* which ring is descriptor based */
#define DESC_INDEX 16
/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
* 1536 is multiple of 256 bytes
*/
#define ENET_BRCM_TAG_LEN 6
#define ENET_PAD 8
#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
#define DMA_MAX_BURST_LENGTH 0x10
/* misc. configuration */
#define CLEAR_ALL_HFB 0xFF
#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
#define DMA_FC_THRESH_LO 5
/* 64B receive/transmit status block */
struct status_64 {
u32 length_status; /* length and peripheral status */
u32 ext_status; /* Extended status*/
u32 rx_csum; /* partial rx checksum */
u32 unused1[9]; /* unused */
u32 tx_csum_info; /* Tx checksum info. */
u32 unused2[3]; /* unused */
};
/* Rx status bits */
#define STATUS_RX_EXT_MASK 0x1FFFFF
#define STATUS_RX_CSUM_MASK 0xFFFF
#define STATUS_RX_CSUM_OK 0x10000
#define STATUS_RX_CSUM_FR 0x20000
#define STATUS_RX_PROTO_TCP 0
#define STATUS_RX_PROTO_UDP 1
#define STATUS_RX_PROTO_ICMP 2
#define STATUS_RX_PROTO_OTHER 3
#define STATUS_RX_PROTO_MASK 3
#define STATUS_RX_PROTO_SHIFT 18
#define STATUS_FILTER_INDEX_MASK 0xFFFF
/* Tx status bits */
#define STATUS_TX_CSUM_START_MASK 0X7FFF
#define STATUS_TX_CSUM_START_SHIFT 16
#define STATUS_TX_CSUM_PROTO_UDP 0x8000
#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
#define STATUS_TX_CSUM_LV 0x80000000
/* DMA Descriptor */
#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
/* Rx/Tx common counter group */
struct bcmgenet_pkt_counters {
u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
};
/* RSV, Receive Status Vector */
struct bcmgenet_rx_counters {
struct bcmgenet_pkt_counters pkt_cnt;
u32 pkt; /* RO (0x428) Received pkt count*/
u32 bytes; /* RO Received byte count */
u32 mca; /* RO # of Received multicast pkt */
u32 bca; /* RO # of Receive broadcast pkt */
u32 fcs; /* RO # of Received FCS error */
u32 cf; /* RO # of Received control frame pkt*/
u32 pf; /* RO # of Received pause frame pkt */
u32 uo; /* RO # of unknown op code pkt */
u32 aln; /* RO # of alignment error count */
u32 flr; /* RO # of frame length out of range count */
u32 cde; /* RO # of code error pkt */
u32 fcr; /* RO # of carrier sense error pkt */
u32 ovr; /* RO # of oversize pkt*/
u32 jbr; /* RO # of jabber count */
u32 mtue; /* RO # of MTU error pkt*/
u32 pok; /* RO # of Received good pkt */
u32 uc; /* RO # of unicast pkt */
u32 ppp; /* RO # of PPP pkt */
u32 rcrc; /* RO (0x470),# of CRC match pkt */
};
/* TSV, Transmit Status Vector */
struct bcmgenet_tx_counters {
struct bcmgenet_pkt_counters pkt_cnt;
u32 pkts; /* RO (0x4a8) Transmited pkt */
u32 mca; /* RO # of xmited multicast pkt */
u32 bca; /* RO # of xmited broadcast pkt */
u32 pf; /* RO # of xmited pause frame count */
u32 cf; /* RO # of xmited control frame count */
u32 fcs; /* RO # of xmited FCS error count */
u32 ovr; /* RO # of xmited oversize pkt */
u32 drf; /* RO # of xmited deferral pkt */
u32 edf; /* RO # of xmited Excessive deferral pkt*/
u32 scl; /* RO # of xmited single collision pkt */
u32 mcl; /* RO # of xmited multiple collision pkt*/
u32 lcl; /* RO # of xmited late collision pkt */
u32 ecl; /* RO # of xmited excessive collision pkt*/
u32 frg; /* RO # of xmited fragments pkt*/
u32 ncl; /* RO # of xmited total collision count */
u32 jbr; /* RO # of xmited jabber count*/
u32 bytes; /* RO # of xmited byte count */
u32 pok; /* RO # of xmited good pkt */
u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
};
struct bcmgenet_mib_counters {
struct bcmgenet_rx_counters rx;
struct bcmgenet_tx_counters tx;
u32 rx_runt_cnt;
u32 rx_runt_fcs;
u32 rx_runt_fcs_align;
u32 rx_runt_bytes;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
u32 mdf_err_cnt;
};
#define UMAC_HD_BKP_CTRL 0x004
#define HD_FC_EN (1 << 0)
#define HD_FC_BKOFF_OK (1 << 1)
#define IPG_CONFIG_RX_SHIFT 2
#define IPG_CONFIG_RX_MASK 0x1F
#define UMAC_CMD 0x008
#define CMD_TX_EN (1 << 0)
#define CMD_RX_EN (1 << 1)
#define UMAC_SPEED_10 0
#define UMAC_SPEED_100 1
#define UMAC_SPEED_1000 2
#define UMAC_SPEED_2500 3
#define CMD_SPEED_SHIFT 2
#define CMD_SPEED_MASK 3
#define CMD_PROMISC (1 << 4)
#define CMD_PAD_EN (1 << 5)
#define CMD_CRC_FWD (1 << 6)
#define CMD_PAUSE_FWD (1 << 7)
#define CMD_RX_PAUSE_IGNORE (1 << 8)
#define CMD_TX_ADDR_INS (1 << 9)
#define CMD_HD_EN (1 << 10)
#define CMD_SW_RESET (1 << 13)
#define CMD_LCL_LOOP_EN (1 << 15)
#define CMD_AUTO_CONFIG (1 << 22)
#define CMD_CNTL_FRM_EN (1 << 23)
#define CMD_NO_LEN_CHK (1 << 24)
#define CMD_RMT_LOOP_EN (1 << 25)
#define CMD_PRBL_EN (1 << 27)
#define CMD_TX_PAUSE_IGNORE (1 << 28)
#define CMD_TX_RX_EN (1 << 29)
#define CMD_RUNT_FILTER_DIS (1 << 30)
#define UMAC_MAC0 0x00C
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
#define UMAC_TX_FLUSH 0x334
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
#define MDIO_START_BUSY (1 << 29)
#define MDIO_READ_FAIL (1 << 28)
#define MDIO_RD (2 << 26)
#define MDIO_WR (1 << 26)
#define MDIO_PMD_SHIFT 21
#define MDIO_PMD_MASK 0x1F
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
#define UMAC_RBUF_OVFL_CNT 0x61C
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
#define MPD_PW_EN (1 << 27)
#define MPD_MSEQ_LEN_SHIFT 16
#define MPD_MSEQ_LEN_MASK 0xFF
#define UMAC_MPD_PW_MS 0x624
#define UMAC_MPD_PW_LS 0x628
#define UMAC_RBUF_ERR_CNT 0x634
#define UMAC_MDF_ERR_CNT 0x638
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
#define UMAC_MIB_CTRL 0x580
#define MIB_RESET_RX (1 << 0)
#define MIB_RESET_RUNT (1 << 1)
#define MIB_RESET_TX (1 << 2)
#define RBUF_CTRL 0x00
#define RBUF_64B_EN (1 << 0)
#define RBUF_ALIGN_2B (1 << 1)
#define RBUF_BAD_DIS (1 << 2)
#define RBUF_STATUS 0x0C
#define RBUF_STATUS_WOL (1 << 0)
#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
#define RBUF_CHK_CTRL 0x14
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
#define RBUF_TBUF_SIZE_CTRL 0xb4
#define RBUF_HFB_CTRL_V1 0x38
#define RBUF_HFB_FILTER_EN_SHIFT 16
#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
#define RBUF_HFB_EN (1 << 0)
#define RBUF_HFB_256B (1 << 1)
#define RBUF_ACPI_EN (1 << 2)
#define RBUF_HFB_LEN_V1 0x3C
#define RBUF_FLTR_LEN_MASK 0xFF
#define RBUF_FLTR_LEN_SHIFT 8
#define TBUF_CTRL 0x00
#define TBUF_BP_MC 0x0C
#define TBUF_CTRL_V1 0x80
#define TBUF_BP_MC_V1 0xA0
#define HFB_CTRL 0x00
#define HFB_FLT_ENABLE_V3PLUS 0x04
#define HFB_FLT_LEN_V2 0x04
#define HFB_FLT_LEN_V3PLUS 0x1C
/* uniMac intrl2 registers */
#define INTRL2_CPU_STAT 0x00
#define INTRL2_CPU_SET 0x04
#define INTRL2_CPU_CLEAR 0x08
#define INTRL2_CPU_MASK_STATUS 0x0C
#define INTRL2_CPU_MASK_SET 0x10
#define INTRL2_CPU_MASK_CLEAR 0x14
/* INTRL2 instance 0 definitions */
#define UMAC_IRQ_SCB (1 << 0)
#define UMAC_IRQ_EPHY (1 << 1)
#define UMAC_IRQ_PHY_DET_R (1 << 2)
#define UMAC_IRQ_PHY_DET_F (1 << 3)
#define UMAC_IRQ_LINK_UP (1 << 4)
#define UMAC_IRQ_LINK_DOWN (1 << 5)
#define UMAC_IRQ_UMAC (1 << 6)
#define UMAC_IRQ_UMAC_TSV (1 << 7)
#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
#define UMAC_IRQ_HFB_SM (1 << 10)
#define UMAC_IRQ_HFB_MM (1 << 11)
#define UMAC_IRQ_MPD_R (1 << 12)
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
/* Register block offsets */
#define GENET_SYS_OFF 0x0000
#define GENET_GR_BRIDGE_OFF 0x0040
#define GENET_EXT_OFF 0x0080
#define GENET_INTRL2_0_OFF 0x0200
#define GENET_INTRL2_1_OFF 0x0240
#define GENET_RBUF_OFF 0x0300
#define GENET_UMAC_OFF 0x0800
/* SYS block offsets and register definitions */
#define SYS_REV_CTRL 0x00
#define SYS_PORT_CTRL 0x04
#define PORT_MODE_INT_EPHY 0
#define PORT_MODE_INT_GPHY 1
#define PORT_MODE_EXT_EPHY 2
#define PORT_MODE_EXT_GPHY 3
#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
#define PORT_MODE_EXT_RVMII_50 4
#define LED_ACT_SOURCE_MAC (1 << 9)
#define SYS_RBUF_FLUSH_CTRL 0x08
#define SYS_TBUF_FLUSH_CTRL 0x0C
#define RBUF_FLUSH_CTRL_V1 0x04
/* Ext block register offsets and definitions */
#define EXT_EXT_PWR_MGMT 0x00
#define EXT_PWR_DOWN_BIAS (1 << 0)
#define EXT_PWR_DOWN_DLL (1 << 1)
#define EXT_PWR_DOWN_PHY (1 << 2)
#define EXT_PWR_DN_EN_LD (1 << 3)
#define EXT_ENERGY_DET (1 << 4)
#define EXT_IDDQ_FROM_PHY (1 << 5)
#define EXT_PHY_RESET (1 << 8)
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C
#define EXT_CFG_IDDQ_BIAS (1 << 0)
#define EXT_CFG_PWR_DOWN (1 << 1)
#define EXT_GPHY_RESET (1 << 5)
/* DMA rings size */
#define DMA_RING_SIZE (0x40)
#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
/* DMA registers common definitions */
#define DMA_RW_POINTER_MASK 0x1FF
#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
#define DMA_BUFFER_DONE_CNT_SHIFT 16
#define DMA_P_INDEX_MASK 0xFFFF
#define DMA_C_INDEX_MASK 0xFFFF
/* DMA ring size register */
#define DMA_RING_SIZE_MASK 0xFFFF
#define DMA_RING_SIZE_SHIFT 16
#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
/* DMA interrupt threshold register */
#define DMA_INTR_THRESHOLD_MASK 0x00FF
/* DMA XON/XOFF register */
#define DMA_XON_THREHOLD_MASK 0xFFFF
#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
#define DMA_XOFF_THRESHOLD_SHIFT 16
/* DMA flow period register */
#define DMA_FLOW_PERIOD_MASK 0xFFFF
#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
#define DMA_MAX_PKT_SIZE_SHIFT 16
/* DMA control register */
#define DMA_EN (1 << 0)
#define DMA_RING_BUF_EN_SHIFT 0x01
#define DMA_RING_BUF_EN_MASK 0xFFFF
#define DMA_TSB_SWAP_EN (1 << 20)
/* DMA status register */
#define DMA_DISABLED (1 << 0)
#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
/* DMA SCB burst size register */
#define DMA_SCB_BURST_SIZE_MASK 0x1F
/* DMA activity vector register */
#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
/* DMA backpressure mask register */
#define DMA_BACKPRESSURE_MASK 0x1FFFF
#define DMA_PFC_ENABLE (1 << 31)
/* DMA backpressure status register */
#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
/* DMA override register */
#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
#define DMA_REGISTER_MODE (1 << 1)
/* DMA timeout register */
#define DMA_TIMEOUT_MASK 0xFFFF
#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
/* TDMA rate limiting control register */
#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
/* TDMA arbitration control register */
#define DMA_ARBITER_MODE_MASK 0x03
#define DMA_RING_BUF_PRIORITY_MASK 0x1F
#define DMA_RING_BUF_PRIORITY_SHIFT 5
#define DMA_PRIO_REG_INDEX(q) ((q) / 6)
#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT)
#define DMA_RATE_ADJ_MASK 0xFF
/* Tx/Rx Dma Descriptor common bits*/
#define DMA_BUFLENGTH_MASK 0x0fff
#define DMA_BUFLENGTH_SHIFT 16
#define DMA_OWN 0x8000
#define DMA_EOP 0x4000
#define DMA_SOP 0x2000
#define DMA_WRAP 0x1000
/* Tx specific Dma descriptor bits */
#define DMA_TX_UNDERRUN 0x0200
#define DMA_TX_APPEND_CRC 0x0040
#define DMA_TX_OW_CRC 0x0020
#define DMA_TX_DO_CSUM 0x0010
#define DMA_TX_QTAG_SHIFT 7
/* Rx Specific Dma descriptor bits */
#define DMA_RX_CHK_V3PLUS 0x8000
#define DMA_RX_CHK_V12 0x1000
#define DMA_RX_BRDCAST 0x0040
#define DMA_RX_MULT 0x0020
#define DMA_RX_LG 0x0010
#define DMA_RX_NO 0x0008
#define DMA_RX_RXER 0x0004
#define DMA_RX_CRC_ERROR 0x0002
#define DMA_RX_OV 0x0001
#define DMA_RX_FI_MASK 0x001F
#define DMA_RX_FI_SHIFT 0x0007
#define DMA_DESC_ALLOC_MASK 0x00FF
#define DMA_ARBITER_RR 0x00
#define DMA_ARBITER_WRR 0x01
#define DMA_ARBITER_SP 0x02
struct enet_cb {
struct sk_buff *skb;
void __iomem *bd_addr;
DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(dma_len);
};
/* power management mode */
enum bcmgenet_power_mode {
GENET_POWER_CABLE_SENSE = 0,
GENET_POWER_PASSIVE,
GENET_POWER_WOL_MAGIC,
};
struct bcmgenet_priv;
/* We support both runtime GENET detection and compile-time
* to optimize code-paths for a given hardware
*/
enum bcmgenet_version {
GENET_V1 = 1,
GENET_V2,
GENET_V3,
GENET_V4
};
#define GENET_IS_V1(p) ((p)->version == GENET_V1)
#define GENET_IS_V2(p) ((p)->version == GENET_V2)
#define GENET_IS_V3(p) ((p)->version == GENET_V3)
#define GENET_IS_V4(p) ((p)->version == GENET_V4)
/* Hardware flags */
#define GENET_HAS_40BITS (1 << 0)
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
*/
struct bcmgenet_hw_params {
u8 tx_queues;
u8 rx_queues;
u8 bds_cnt;
u8 bp_in_en_shift;
u32 bp_in_mask;
u8 hfb_filter_cnt;
u8 qtag_mask;
u16 tbuf_offset;
u32 hfb_offset;
u32 hfb_reg_offset;
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
u32 flags;
};
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int c_index; /* last consumer index of each ring*/
unsigned int free_bds; /* # of free bds for each ring */
unsigned int write_ptr; /* Tx ring write pointer SW copy */
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
void (*int_enable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
void (*int_disable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
/* device context */
struct bcmgenet_priv {
void __iomem *base;
enum bcmgenet_version version;
struct net_device *dev;
u32 int0_mask;
u32 int1_mask;
/* NAPI for descriptor based rx */
struct napi_struct napi ____cacheline_aligned;
/* transmit variables */
void __iomem *tx_bds;
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
/* receive variables */
void __iomem *rx_bds;
void __iomem *rx_bd_assign_ptr;
int rx_bd_assign_index;
struct enet_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_buf_len;
unsigned int rx_read_ptr;
unsigned int rx_c_index;
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
/* MDIO bus variables */
wait_queue_head_t wq;
struct phy_device *phydev;
struct device_node *phy_dn;
struct mii_bus *mii_bus;
u16 gphy_rev;
/* PHY device variables */
int old_link;
int old_speed;
int old_duplex;
int old_pause;
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
int irq0;
int irq1;
unsigned int irq0_stat;
unsigned int irq1_stat;
int wol_irq;
bool wol_irq_disabled;
/* HW descriptors/checksum variables */
bool desc_64b_en;
bool desc_rxchk_en;
bool crc_fwd_en;
unsigned int dma_rx_chk_bit;
u32 msg_enable;
struct clk *clk;
struct platform_device *pdev;
/* WOL */
struct clk *clk_wol;
u32 wolopts;
struct bcmgenet_mib_counters mib;
};
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
{ \
return __raw_readl(priv->base + offset + off); \
} \
static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
u32 val, u32 off) \
{ \
__raw_writel(val, priv->base + offset + off); \
}
GENET_IO_MACRO(ext, GENET_EXT_OFF);
GENET_IO_MACRO(umac, GENET_UMAC_OFF);
GENET_IO_MACRO(sys, GENET_SYS_OFF);
/* interrupt l2 registers accessors */
GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
/* HFB register accessors */
GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
/* GENET v2+ HFB control and filter len helpers */
GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
/* RBUF register accessors */
GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_mii_setup(struct net_device *dev);
/* Wake-on-LAN routines */
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
#endif /* __BCMGENET_H__ */

View file

@ -0,0 +1,208 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
* Copyright (c) 2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/platform_device.h>
#include <net/arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
#include "bcmgenet.h"
/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet
* Detection is supported through ethtool
*/
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
put_unaligned_be16(reg, &wol->sopass[0]);
reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
put_unaligned_be32(reg, &wol->sopass[2]);
}
}
/* ethtool function - set WOL (Wake on LAN) settings.
* Only for magic packet detection mode.
*/
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
u32 reg;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
return -EINVAL;
if (wol->wolopts & WAKE_MAGICSECURE) {
bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
UMAC_MPD_PW_MS);
bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
UMAC_MPD_PW_LS);
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_PW_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
if (priv->wol_irq_disabled)
enable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
if (!priv->wol_irq_disabled)
disable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = true;
}
priv->wolopts = wol->wolopts;
return 0;
}
static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
{
struct net_device *dev = priv->dev;
int retries = 0;
while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS)
& RBUF_STATUS_WOL)) {
retries++;
if (retries > 5) {
netdev_crit(dev, "polling wol mode timeout\n");
return -ETIMEDOUT;
}
mdelay(1);
}
return retries;
}
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
u32 cpu_mask_clear;
int retries = 0;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
return -EINVAL;
}
/* disable RX */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
mdelay(10);
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
if (retries < 0) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
return retries;
}
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
/* Enable CRC forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg &= ~EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
/* Enable the MPD interrupt */
cpu_mask_clear = UMAC_IRQ_MPD_R;
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
return 0;
}
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
u32 cpu_mask_set;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
return;
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->crc_fwd_en = 0;
/* Stop monitoring magic packet IRQ */
cpu_mask_set = UMAC_IRQ_MPD_R;
/* Stop monitoring magic packet IRQ */
bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
}

View file

@ -0,0 +1,477 @@
/*
* Broadcom GENET MDIO routines
*
* Copyright (c) 2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/brcmphy.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include "bcmgenet.h"
/* read a value from the MII */
static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
{
int ret;
struct net_device *dev = bus->priv;
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
(location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
/* Start MDIO transaction*/
reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
reg |= MDIO_START_BUSY;
bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
wait_event_timeout(priv->wq,
!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
& MDIO_START_BUSY),
HZ / 100);
ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
if (ret & MDIO_READ_FAIL)
return -EIO;
return ret & 0xffff;
}
/* write a value to the MII */
static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
int location, u16 val)
{
struct net_device *dev = bus->priv;
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
(location << MDIO_REG_SHIFT) | (0xffff & val)),
UMAC_MDIO_CMD);
reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
reg |= MDIO_START_BUSY;
bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
wait_event_timeout(priv->wq,
!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
MDIO_START_BUSY),
HZ / 100);
return 0;
}
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
u32 reg, cmd_bits = 0;
bool status_changed = false;
if (priv->old_link != phydev->link) {
status_changed = true;
priv->old_link = phydev->link;
}
if (phydev->link) {
/* check speed/duplex/pause changes */
if (priv->old_speed != phydev->speed) {
status_changed = true;
priv->old_speed = phydev->speed;
}
if (priv->old_duplex != phydev->duplex) {
status_changed = true;
priv->old_duplex = phydev->duplex;
}
if (priv->old_pause != phydev->pause) {
status_changed = true;
priv->old_pause = phydev->pause;
}
/* done if nothing has changed */
if (!status_changed)
return;
/* speed */
if (phydev->speed == SPEED_1000)
cmd_bits = UMAC_SPEED_1000;
else if (phydev->speed == SPEED_100)
cmd_bits = UMAC_SPEED_100;
else
cmd_bits = UMAC_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
if (phydev->duplex != DUPLEX_FULL)
cmd_bits |= CMD_HD_EN;
/* pause capability */
if (!phydev->pause)
cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
/*
* Program UMAC and RGMII block based on established
* link speed, duplex, and pause. The speed set in
* umac->cmd tell RGMII block which clock to use for
* transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
* Receive clock is provided by the PHY.
*/
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
} else {
/* done if nothing has changed */
if (!status_changed)
return;
/* needed for MoCA fixed PHY to reflect correct link status */
netif_carrier_off(dev);
}
phy_print_status(phydev);
}
void bcmgenet_mii_reset(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
if (priv->phydev) {
phy_init_hw(priv->phydev);
phy_start_aneg(priv->phydev);
}
}
static void bcmgenet_ephy_power_up(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
if (!GENET_IS_V4(priv))
return;
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
reg |= EXT_GPHY_RESET;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(2);
reg &= ~EXT_GPHY_RESET;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
udelay(20);
}
static void bcmgenet_internal_phy_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg;
/* Power up EPHY */
bcmgenet_ephy_power_up(dev);
/* enable APD */
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_mii_reset(dev);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
/* Speed settings are set in bcmgenet_mii_setup() */
reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
reg |= LED_ACT_SOURCE_MAC;
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
}
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
struct device *kdev = &priv->pdev->dev;
const char *phy_name = NULL;
u32 id_mode_dis = 0;
u32 port_ctrl;
u32 reg;
priv->ext_phy = !phy_is_internal(priv->phydev) &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
if (phy_is_internal(priv->phydev))
priv->phy_interface = PHY_INTERFACE_MODE_NA;
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
* up masking the Gigabit features from what we support, not
* switching to the EPHY
*/
if (GENET_IS_V4(priv))
port_ctrl = PORT_MODE_INT_GPHY;
else
port_ctrl = PORT_MODE_INT_EPHY;
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
if (phy_is_internal(priv->phydev)) {
phy_name = "internal PHY";
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
}
break;
case PHY_INTERFACE_MODE_MII:
phy_name = "external MII";
phydev->supported &= PHY_BASIC_FEATURES;
bcmgenet_sys_writel(priv,
PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
break;
case PHY_INTERFACE_MODE_REVMII:
phy_name = "external RvMII";
/* of_mdiobus_register took care of reading the 'max-speed'
* PHY property for us, effectively limiting the PHY supported
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
PHY_BASIC_FEATURES)
port_ctrl = PORT_MODE_EXT_RVMII_25;
else
port_ctrl = PORT_MODE_EXT_RVMII_50;
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
break;
case PHY_INTERFACE_MODE_RGMII:
/* RGMII_NO_ID: TXC transitions at the same time as TXD
* (requires PCB or receiver-side delay)
* RGMII: Add 2ns delay on TXC (90 degree shift)
*
* ID is implicitly disabled for 100Mbps (RG)MII operation.
*/
id_mode_dis = BIT(16);
/* fall through */
case PHY_INTERFACE_MODE_RGMII_TXID:
if (id_mode_dis)
phy_name = "external RGMII (no delay)";
else
phy_name = "external RGMII (TX delay)";
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg |= RGMII_MODE_EN | id_mode_dis;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
bcmgenet_sys_writel(priv,
PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
break;
default:
dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
return -EINVAL;
}
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
static int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
u32 phy_flags;
int ret;
if (priv->phydev) {
pr_info("PHY already attached\n");
return 0;
}
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
*/
if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
ret = of_phy_register_fixed_link(dn);
if (ret)
return ret;
priv->phy_dn = of_node_get(dn);
}
/* Communicate the integrated PHY revision */
phy_flags = priv->gphy_rev;
/* Initialize link state variables that bcmgenet_mii_setup() uses */
priv->old_link = -1;
priv->old_speed = -1;
priv->old_duplex = -1;
priv->old_pause = -1;
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
}
priv->phydev = phydev;
/* Configure port multiplexer based on what the probed PHY device since
* reading the 'max-speed' property determines the maximum supported
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
}
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs
*/
if (phy_is_internal(priv->phydev))
priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
else
priv->mii_bus->irq[phydev->addr] = PHY_POLL;
pr_info("attached PHY at address %d [%s]\n",
phydev->addr, phydev->drv->name);
return 0;
}
static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
{
struct mii_bus *bus;
if (priv->mii_bus)
return 0;
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus) {
pr_err("failed to allocate\n");
return -ENOMEM;
}
bus = priv->mii_bus;
bus->priv = priv->dev;
bus->name = "bcmgenet MII bus";
bus->parent = &priv->pdev->dev;
bus->read = bcmgenet_mii_read;
bus->write = bcmgenet_mii_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
priv->pdev->name, priv->pdev->id);
bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (!bus->irq) {
mdiobus_free(priv->mii_bus);
return -ENOMEM;
}
return 0;
}
static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
struct device_node *mdio_dn;
char *compat;
int ret;
compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
if (!compat)
return -ENOMEM;
mdio_dn = of_find_compatible_node(dn, NULL, compat);
kfree(compat);
if (!mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
return -ENODEV;
}
ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
if (ret) {
dev_err(kdev, "failed to register MDIO bus\n");
return ret;
}
/* Fetch the PHY phandle */
priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
/* Get the link mode */
priv->phy_interface = of_get_phy_mode(dn);
return 0;
}
int bcmgenet_mii_init(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int ret;
ret = bcmgenet_mii_alloc(priv);
if (ret)
return ret;
ret = bcmgenet_mii_of_init(priv);
if (ret)
goto out_free;
ret = bcmgenet_mii_probe(dev);
if (ret)
goto out;
return 0;
out:
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
out_free:
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
return ret;
}
void bcmgenet_mii_exit(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
}

File diff suppressed because it is too large Load diff