Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,97 @@
#
# Freescale device configuration
#
config NET_VENDOR_FREESCALE
bool "Freescale devices"
default y
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Freescale devices. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
select PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
select CRC32
select PHYLIB
select PPC_BESTCOMM_FEC
---help---
This option enables support for the MPC5200's on-chip
Fast Ethernet Controller
If compiled as module, it will be called fec_mpc52xx.
config FEC_MPC52xx_MDIO
bool "FEC MPC52xx MDIO bus driver"
depends on FEC_MPC52xx
default y
---help---
The MPC5200's FEC can connect to the Ethernet either with
an external MII PHY chip or 10 Mbps 7-wire interface
(Motorola? industry standard).
If your board uses an external PHY connected to FEC, enable this.
If not sure, enable.
If compiled as module, it will be called fec_mpc52xx_phy.
source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
depends on FSL_SOC
select PHYLIB
---help---
This driver supports the MDIO bus used by the gianfar and UCC drivers.
config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
depends on FSL_SOC
select PHYLIB
select OF_MDIO
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE
select FSL_PQ_MDIO
select PHYLIB
---help---
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
config UGETH_TX_ON_DEMAND
bool "Transmit on Demand support"
depends on UCC_GETH
config GIANFAR
tristate "Gianfar Ethernet"
depends on FSL_SOC
select FSL_PQ_MDIO
select PHYLIB
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
endif # NET_VENDOR_FREESCALE

View file

@ -0,0 +1,19 @@
#
# Makefile for the Freescale network device drivers.
#
obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
endif
obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o

View file

@ -0,0 +1,555 @@
/****************************************************************************/
/*
* fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC
* processors.
*
* (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2000-2001, Lineo (www.lineo.com)
*/
/****************************************************************************/
#ifndef FEC_H
#define FEC_H
/****************************************************************************/
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
* of the ColdFire!
*/
#define FEC_IEVENT 0x004 /* Interrupt event reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
#define FEC_ECNTRL 0x024 /* Ethernet control reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
#define FEC_R_CNTRL 0x084 /* Receive control reg */
#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */
#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */
#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */
#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */
#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_RACC 0x1C4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
#define BM_MIIGSK_CFGR_MII 0x00
#define BM_MIIGSK_CFGR_RMII 0x01
#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */
#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */
#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
#define RMON_T_COL 0x224 /* RMON TX collision count */
#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */
#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */
#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
#define RMON_T_OCTETS 0x244 /* RMON TX octets */
#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */
#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */
#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */
#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */
#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */
#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
#define RMON_R_RESVD_O 0x2A4 /* Reserved */
#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */
#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */
#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */
#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */
#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */
#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */
#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */
#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */
#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */
#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */
#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */
#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */
#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */
#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */
#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */
#else
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
#define FEC_IEVENT 0x004 /* Interrupt even reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_IVEC 0x00c /* Interrupt vec status reg */
#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0
#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0
#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0
#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
#define FEC_R_CNTRL 0x104 /* Receive control reg */
#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */
#define FEC_R_DES_START_1 FEC_R_DES_START_0
#define FEC_R_DES_START_2 FEC_R_DES_START_0
#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
#define FEC_X_DES_START_1 FEC_X_DES_START_0
#define FEC_X_DES_START_2 FEC_X_DES_START_0
#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
/* Not existed in real chip
* Just for pass build.
*/
#define FEC_RCMR_1 0xFFF
#define FEC_RCMR_2 0xFFF
#define FEC_DMA_CFG_1 0xFFF
#define FEC_DMA_CFG_2 0xFFF
#define FEC_TXIC0 0xFFF
#define FEC_TXIC1 0xFFF
#define FEC_TXIC2 0xFFF
#define FEC_RXIC0 0xFFF
#define FEC_RXIC1 0xFFF
#define FEC_RXIC2 0xFFF
#endif /* CONFIG_M5272 */
/*
* Define the buffer descriptor structure.
*/
#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
};
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */
};
#endif
struct bufdesc_ex {
struct bufdesc desc;
unsigned long cbd_esc;
unsigned long cbd_prot;
unsigned long cbd_bdu;
unsigned long ts;
unsigned short res0[4];
};
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
*/
#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
#define BD_SC_BR ((ushort)0x0020) /* Break received */
#define BD_SC_FR ((ushort)0x0010) /* Framing error */
#define BD_SC_PR ((ushort)0x0008) /* Parity error */
#define BD_SC_OV ((ushort)0x0002) /* Overrun */
#define BD_SC_CD ((ushort)0x0001) /* ?? */
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
#define BD_ENET_RX_WRAP ((ushort)0x2000)
#define BD_ENET_RX_INTR ((ushort)0x1000)
#define BD_ENET_RX_LAST ((ushort)0x0800)
#define BD_ENET_RX_FIRST ((ushort)0x0400)
#define BD_ENET_RX_MISS ((ushort)0x0100)
#define BD_ENET_RX_LG ((ushort)0x0020)
#define BD_ENET_RX_NO ((ushort)0x0010)
#define BD_ENET_RX_SH ((ushort)0x0008)
#define BD_ENET_RX_CR ((ushort)0x0004)
#define BD_ENET_RX_OV ((ushort)0x0002)
#define BD_ENET_RX_CL ((ushort)0x0001)
#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
/* Enhanced buffer descriptor control/status used by Ethernet receive */
#define BD_ENET_RX_VLAN 0x00000004
/* Buffer descriptor control/status used by Ethernet transmit.
*/
#define BD_ENET_TX_READY ((ushort)0x8000)
#define BD_ENET_TX_PAD ((ushort)0x4000)
#define BD_ENET_TX_WRAP ((ushort)0x2000)
#define BD_ENET_TX_INTR ((ushort)0x1000)
#define BD_ENET_TX_LAST ((ushort)0x0800)
#define BD_ENET_TX_TC ((ushort)0x0400)
#define BD_ENET_TX_DEF ((ushort)0x0200)
#define BD_ENET_TX_HB ((ushort)0x0100)
#define BD_ENET_TX_LC ((ushort)0x0080)
#define BD_ENET_TX_RL ((ushort)0x0040)
#define BD_ENET_TX_RCMASK ((ushort)0x003c)
#define BD_ENET_TX_UN ((ushort)0x0002)
#define BD_ENET_TX_CSL ((ushort)0x0001)
#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
/*enhanced buffer descriptor control/status used by Ethernet transmit*/
#define BD_ENET_TX_INT 0x40000000
#define BD_ENET_TX_TS 0x20000000
#define BD_ENET_TX_PINS 0x10000000
#define BD_ENET_TX_IINS 0x08000000
/* This device has up to three irqs on some platforms */
#define FEC_IRQ_NUM 3
/* Maximum number of queues supported
* ENET with AVB IP can support up to 3 independent tx queues and rx queues.
* User can point the queue number that is less than or equal to 3.
*/
#define FEC_ENET_MAX_TX_QS 3
#define FEC_ENET_MAX_RX_QS 3
#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
((X == 2) ? \
FEC_R_DES_START_2 : FEC_R_DES_START_0))
#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
((X == 2) ? \
FEC_X_DES_START_2 : FEC_X_DES_START_0))
#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
((X == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
((X == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
#define DMA_CLASS_EN (1 << 16)
#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
#define IDLE_SLOPE_MASK 0xFFFF
#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
(IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
#define RCMR_MATCHEN (0x1 << 16)
#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
*/
#define FEC_ENET_RX_PAGES 256
#define FEC_ENET_RX_FRSIZE 2048
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
#define TX_RING_SIZE 512 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
#define BD_ENET_RX_ICE 0x00000020
#define BD_ENET_RX_PCR 0x00000010
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
/* Interrupt events/masks. */
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */
#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */
#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */
#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */
#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */
#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */
#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31)
#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20)
#define FEC_ITR_ICTT(X) ((X) & 0xFFFF)
#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
#define FEC_VLAN_TAG_LEN 0x04
#define FEC_ETHTYPE_LEN 0x02
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
/* Controller needs driver to swap frame */
#define FEC_QUIRK_SWAP_FRAME (1 << 1)
/* Controller uses gasket */
#define FEC_QUIRK_USE_GASKET (1 << 2)
/* Controller has GBIT support */
#define FEC_QUIRK_HAS_GBIT (1 << 3)
/* Controller has extend desc buffer */
#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
/* Controller has hardware checksum support */
#define FEC_QUIRK_HAS_CSUM (1 << 5)
/* Controller has hardware vlan support */
#define FEC_QUIRK_HAS_VLAN (1 << 6)
/* ENET IP errata ERR006358
*
* If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
* detected as not set during a prior frame transmission, then the
* ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
* were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
* frames not being transmitted until there is a 0-to-1 transition on
* ENET_TDAR[TDAR].
*/
#define FEC_QUIRK_ERR006358 (1 << 7)
/* ENET IP hw AVB
*
* i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
* - Two class indicators on receive with configurable priority
* - Two class indicators and line speed timer on transmit allowing
* implementation class credit based shapers externally
* - Additional DMA registers provisioned to allow managing up to 3
* independent rings
*/
#define FEC_QUIRK_HAS_AVB (1 << 8)
/* There is a TDAR race condition for mutliQ when the software sets TDAR
* and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
* This will cause the udma_tx and udma_tx_arbiter state machines to hang.
* The issue exist at i.MX6SX enet IP.
*/
#define FEC_QUIRK_ERR007885 (1 << 9)
/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
* After set ENET_ATCR[Capture], there need some time cycles before the counter
* value is capture in the register clock domain.
* The wait-time-cycles is at least 6 clock cycles of the slower clock between
* the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
* register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
* (40ns * 6).
*/
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
struct fec_enet_priv_tx_q {
int index;
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *tx_bd_base;
uint tx_ring_size;
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
struct bufdesc *cur_tx;
struct bufdesc *dirty_tx;
char *tso_hdrs;
dma_addr_t tso_hdrs_dma;
};
struct fec_enet_priv_rx_q {
int index;
struct sk_buff *rx_skbuff[RX_RING_SIZE];
dma_addr_t bd_dma;
struct bufdesc *rx_bd_base;
uint rx_ring_size;
struct bufdesc *cur_rx;
};
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
* The dirty_tx tracks the current buffer that is being sent by the
* controller. The cur_tx and dirty_tx are equal under both completely
* empty and completely full conditions. The empty/ready indicator in
* the buffer descriptor determines the actual condition.
*/
struct fec_enet_private {
/* Hardware registers of the FEC device */
void __iomem *hwp;
struct net_device *netdev;
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_ref;
struct clk *clk_enet_out;
struct clk *clk_ptp;
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
unsigned int num_tx_queues;
unsigned int num_rx_queues;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
unsigned long work_tx;
unsigned long work_rx;
unsigned long work_ts;
unsigned long work_mdio;
unsigned short bufdesc_size;
struct platform_device *pdev;
int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
struct device_node *phy_node;
int link;
int full_duplex;
int speed;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
int bufdesc_ex;
int pause_flag;
struct napi_struct napi;
int csum_flags;
struct work_struct tx_timeout_work;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
int rx_hwtstamp_filter;
u32 base_incval;
u32 cycle_speed;
int hwts_rx_en;
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
unsigned int tx_align;
unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
unsigned int rx_time_itr;
unsigned int tx_pkts_itr;
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
u32 rx_copybreak;
/* ptp clock period in ns*/
unsigned int ptp_inc;
/* pps */
int pps_channel;
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
};
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
/****************************************************************************/
#endif /* FEC_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,294 @@
/*
* drivers/net/ethernet/freescale/fec_mpc52xx.h
*
* Driver for the MPC5200 Fast Ethernet Controller
*
* Author: Dale Farnsworth <dfarnsworth@mvista.com>
*
* 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#ifndef __DRIVERS_NET_MPC52XX_FEC_H__
#define __DRIVERS_NET_MPC52XX_FEC_H__
#include <linux/phy.h>
/* Tunable constant */
/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */
#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */
#define FEC_RX_NUM_BD 256
#define FEC_TX_NUM_BD 64
#define FEC_RESET_DELAY 50 /* uS */
#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
/* ======================================================================== */
/* Hardware register sets & bits */
/* ======================================================================== */
struct mpc52xx_fec {
u32 fec_id; /* FEC + 0x000 */
u32 ievent; /* FEC + 0x004 */
u32 imask; /* FEC + 0x008 */
u32 reserved0[1]; /* FEC + 0x00C */
u32 r_des_active; /* FEC + 0x010 */
u32 x_des_active; /* FEC + 0x014 */
u32 r_des_active_cl; /* FEC + 0x018 */
u32 x_des_active_cl; /* FEC + 0x01C */
u32 ivent_set; /* FEC + 0x020 */
u32 ecntrl; /* FEC + 0x024 */
u32 reserved1[6]; /* FEC + 0x028-03C */
u32 mii_data; /* FEC + 0x040 */
u32 mii_speed; /* FEC + 0x044 */
u32 mii_status; /* FEC + 0x048 */
u32 reserved2[5]; /* FEC + 0x04C-05C */
u32 mib_data; /* FEC + 0x060 */
u32 mib_control; /* FEC + 0x064 */
u32 reserved3[6]; /* FEC + 0x068-7C */
u32 r_activate; /* FEC + 0x080 */
u32 r_cntrl; /* FEC + 0x084 */
u32 r_hash; /* FEC + 0x088 */
u32 r_data; /* FEC + 0x08C */
u32 ar_done; /* FEC + 0x090 */
u32 r_test; /* FEC + 0x094 */
u32 r_mib; /* FEC + 0x098 */
u32 r_da_low; /* FEC + 0x09C */
u32 r_da_high; /* FEC + 0x0A0 */
u32 reserved4[7]; /* FEC + 0x0A4-0BC */
u32 x_activate; /* FEC + 0x0C0 */
u32 x_cntrl; /* FEC + 0x0C4 */
u32 backoff; /* FEC + 0x0C8 */
u32 x_data; /* FEC + 0x0CC */
u32 x_status; /* FEC + 0x0D0 */
u32 x_mib; /* FEC + 0x0D4 */
u32 x_test; /* FEC + 0x0D8 */
u32 fdxfc_da1; /* FEC + 0x0DC */
u32 fdxfc_da2; /* FEC + 0x0E0 */
u32 paddr1; /* FEC + 0x0E4 */
u32 paddr2; /* FEC + 0x0E8 */
u32 op_pause; /* FEC + 0x0EC */
u32 reserved5[4]; /* FEC + 0x0F0-0FC */
u32 instr_reg; /* FEC + 0x100 */
u32 context_reg; /* FEC + 0x104 */
u32 test_cntrl; /* FEC + 0x108 */
u32 acc_reg; /* FEC + 0x10C */
u32 ones; /* FEC + 0x110 */
u32 zeros; /* FEC + 0x114 */
u32 iaddr1; /* FEC + 0x118 */
u32 iaddr2; /* FEC + 0x11C */
u32 gaddr1; /* FEC + 0x120 */
u32 gaddr2; /* FEC + 0x124 */
u32 random; /* FEC + 0x128 */
u32 rand1; /* FEC + 0x12C */
u32 tmp; /* FEC + 0x130 */
u32 reserved6[3]; /* FEC + 0x134-13C */
u32 fifo_id; /* FEC + 0x140 */
u32 x_wmrk; /* FEC + 0x144 */
u32 fcntrl; /* FEC + 0x148 */
u32 r_bound; /* FEC + 0x14C */
u32 r_fstart; /* FEC + 0x150 */
u32 r_count; /* FEC + 0x154 */
u32 r_lag; /* FEC + 0x158 */
u32 r_read; /* FEC + 0x15C */
u32 r_write; /* FEC + 0x160 */
u32 x_count; /* FEC + 0x164 */
u32 x_lag; /* FEC + 0x168 */
u32 x_retry; /* FEC + 0x16C */
u32 x_write; /* FEC + 0x170 */
u32 x_read; /* FEC + 0x174 */
u32 reserved7[2]; /* FEC + 0x178-17C */
u32 fm_cntrl; /* FEC + 0x180 */
u32 rfifo_data; /* FEC + 0x184 */
u32 rfifo_status; /* FEC + 0x188 */
u32 rfifo_cntrl; /* FEC + 0x18C */
u32 rfifo_lrf_ptr; /* FEC + 0x190 */
u32 rfifo_lwf_ptr; /* FEC + 0x194 */
u32 rfifo_alarm; /* FEC + 0x198 */
u32 rfifo_rdptr; /* FEC + 0x19C */
u32 rfifo_wrptr; /* FEC + 0x1A0 */
u32 tfifo_data; /* FEC + 0x1A4 */
u32 tfifo_status; /* FEC + 0x1A8 */
u32 tfifo_cntrl; /* FEC + 0x1AC */
u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */
u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */
u32 tfifo_alarm; /* FEC + 0x1B8 */
u32 tfifo_rdptr; /* FEC + 0x1BC */
u32 tfifo_wrptr; /* FEC + 0x1C0 */
u32 reset_cntrl; /* FEC + 0x1C4 */
u32 xmit_fsm; /* FEC + 0x1C8 */
u32 reserved8[3]; /* FEC + 0x1CC-1D4 */
u32 rdes_data0; /* FEC + 0x1D8 */
u32 rdes_data1; /* FEC + 0x1DC */
u32 r_length; /* FEC + 0x1E0 */
u32 x_length; /* FEC + 0x1E4 */
u32 x_addr; /* FEC + 0x1E8 */
u32 cdes_data; /* FEC + 0x1EC */
u32 status; /* FEC + 0x1F0 */
u32 dma_control; /* FEC + 0x1F4 */
u32 des_cmnd; /* FEC + 0x1F8 */
u32 data; /* FEC + 0x1FC */
u32 rmon_t_drop; /* FEC + 0x200 */
u32 rmon_t_packets; /* FEC + 0x204 */
u32 rmon_t_bc_pkt; /* FEC + 0x208 */
u32 rmon_t_mc_pkt; /* FEC + 0x20C */
u32 rmon_t_crc_align; /* FEC + 0x210 */
u32 rmon_t_undersize; /* FEC + 0x214 */
u32 rmon_t_oversize; /* FEC + 0x218 */
u32 rmon_t_frag; /* FEC + 0x21C */
u32 rmon_t_jab; /* FEC + 0x220 */
u32 rmon_t_col; /* FEC + 0x224 */
u32 rmon_t_p64; /* FEC + 0x228 */
u32 rmon_t_p65to127; /* FEC + 0x22C */
u32 rmon_t_p128to255; /* FEC + 0x230 */
u32 rmon_t_p256to511; /* FEC + 0x234 */
u32 rmon_t_p512to1023; /* FEC + 0x238 */
u32 rmon_t_p1024to2047; /* FEC + 0x23C */
u32 rmon_t_p_gte2048; /* FEC + 0x240 */
u32 rmon_t_octets; /* FEC + 0x244 */
u32 ieee_t_drop; /* FEC + 0x248 */
u32 ieee_t_frame_ok; /* FEC + 0x24C */
u32 ieee_t_1col; /* FEC + 0x250 */
u32 ieee_t_mcol; /* FEC + 0x254 */
u32 ieee_t_def; /* FEC + 0x258 */
u32 ieee_t_lcol; /* FEC + 0x25C */
u32 ieee_t_excol; /* FEC + 0x260 */
u32 ieee_t_macerr; /* FEC + 0x264 */
u32 ieee_t_cserr; /* FEC + 0x268 */
u32 ieee_t_sqe; /* FEC + 0x26C */
u32 t_fdxfc; /* FEC + 0x270 */
u32 ieee_t_octets_ok; /* FEC + 0x274 */
u32 reserved9[2]; /* FEC + 0x278-27C */
u32 rmon_r_drop; /* FEC + 0x280 */
u32 rmon_r_packets; /* FEC + 0x284 */
u32 rmon_r_bc_pkt; /* FEC + 0x288 */
u32 rmon_r_mc_pkt; /* FEC + 0x28C */
u32 rmon_r_crc_align; /* FEC + 0x290 */
u32 rmon_r_undersize; /* FEC + 0x294 */
u32 rmon_r_oversize; /* FEC + 0x298 */
u32 rmon_r_frag; /* FEC + 0x29C */
u32 rmon_r_jab; /* FEC + 0x2A0 */
u32 rmon_r_resvd_0; /* FEC + 0x2A4 */
u32 rmon_r_p64; /* FEC + 0x2A8 */
u32 rmon_r_p65to127; /* FEC + 0x2AC */
u32 rmon_r_p128to255; /* FEC + 0x2B0 */
u32 rmon_r_p256to511; /* FEC + 0x2B4 */
u32 rmon_r_p512to1023; /* FEC + 0x2B8 */
u32 rmon_r_p1024to2047; /* FEC + 0x2BC */
u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */
u32 rmon_r_octets; /* FEC + 0x2C4 */
u32 ieee_r_drop; /* FEC + 0x2C8 */
u32 ieee_r_frame_ok; /* FEC + 0x2CC */
u32 ieee_r_crc; /* FEC + 0x2D0 */
u32 ieee_r_align; /* FEC + 0x2D4 */
u32 r_macerr; /* FEC + 0x2D8 */
u32 r_fdxfc; /* FEC + 0x2DC */
u32 ieee_r_octets_ok; /* FEC + 0x2E0 */
u32 reserved10[7]; /* FEC + 0x2E4-2FC */
u32 reserved11[64]; /* FEC + 0x300-3FF */
};
#define FEC_MIB_DISABLE 0x80000000
#define FEC_IEVENT_HBERR 0x80000000
#define FEC_IEVENT_BABR 0x40000000
#define FEC_IEVENT_BABT 0x20000000
#define FEC_IEVENT_GRA 0x10000000
#define FEC_IEVENT_TFINT 0x08000000
#define FEC_IEVENT_MII 0x00800000
#define FEC_IEVENT_LATE_COL 0x00200000
#define FEC_IEVENT_COL_RETRY_LIM 0x00100000
#define FEC_IEVENT_XFIFO_UN 0x00080000
#define FEC_IEVENT_XFIFO_ERROR 0x00040000
#define FEC_IEVENT_RFIFO_ERROR 0x00020000
#define FEC_IMASK_HBERR 0x80000000
#define FEC_IMASK_BABR 0x40000000
#define FEC_IMASK_BABT 0x20000000
#define FEC_IMASK_GRA 0x10000000
#define FEC_IMASK_MII 0x00800000
#define FEC_IMASK_LATE_COL 0x00200000
#define FEC_IMASK_COL_RETRY_LIM 0x00100000
#define FEC_IMASK_XFIFO_UN 0x00080000
#define FEC_IMASK_XFIFO_ERROR 0x00040000
#define FEC_IMASK_RFIFO_ERROR 0x00020000
/* all but MII, which is enabled separately */
#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \
FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \
FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \
FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR)
#define FEC_RCNTRL_MAX_FL_SHIFT 16
#define FEC_RCNTRL_LOOP 0x01
#define FEC_RCNTRL_DRT 0x02
#define FEC_RCNTRL_MII_MODE 0x04
#define FEC_RCNTRL_PROM 0x08
#define FEC_RCNTRL_BC_REJ 0x10
#define FEC_RCNTRL_FCE 0x20
#define FEC_TCNTRL_GTS 0x00000001
#define FEC_TCNTRL_HBC 0x00000002
#define FEC_TCNTRL_FDEN 0x00000004
#define FEC_TCNTRL_TFC_PAUSE 0x00000008
#define FEC_TCNTRL_RFC_PAUSE 0x00000010
#define FEC_ECNTRL_RESET 0x00000001
#define FEC_ECNTRL_ETHER_EN 0x00000002
#define FEC_MII_DATA_ST 0x40000000 /* Start frame */
#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */
#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */
#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */
#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */
#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */
#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */
#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA)
#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA)
#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */
#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */
#define FEC_PADDR2_TYPE 0x8808
#define FEC_OP_PAUSE_OPCODE 0x00010000
#define FEC_FIFO_WMRK_256B 0x3
#define FEC_FIFO_STATUS_ERR 0x00400000
#define FEC_FIFO_STATUS_UF 0x00200000
#define FEC_FIFO_STATUS_OF 0x00100000
#define FEC_FIFO_CNTRL_FRAME 0x08000000
#define FEC_FIFO_CNTRL_LTG_7 0x07000000
#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000
#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000
#define FEC_XMIT_FSM_APPEND_CRC 0x02000000
#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
extern struct platform_driver mpc52xx_fec_mdio_driver;
#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */

View file

@ -0,0 +1,158 @@
/*
* Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
*
* Copyright (C) 2007 Domen Puncer, Telargo, Inc.
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
#include "fec_mpc52xx.h"
struct mpc52xx_fec_mdio_priv {
struct mpc52xx_fec __iomem *regs;
int mdio_irqs[PHY_MAX_ADDR];
};
static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
int reg, u32 value)
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
struct mpc52xx_fec __iomem *fec = priv->regs;
int tries = 3;
value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
out_be32(&fec->ievent, FEC_IEVENT_MII);
out_be32(&fec->mii_data, value);
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
msleep(1);
if (!tries)
return -ETIMEDOUT;
return value & FEC_MII_DATA_OP_RD ?
in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
}
static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
}
static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 data)
{
return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
data | FEC_MII_WRITE_FRAME);
}
static int mpc52xx_fec_mdio_probe(struct platform_device *of)
{
struct device *dev = &of->dev;
struct device_node *np = of->dev.of_node;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
struct resource res;
int err;
bus = mdiobus_alloc();
if (bus == NULL)
return -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
err = -ENOMEM;
goto out_free;
}
bus->name = "mpc52xx MII bus";
bus->read = mpc52xx_fec_mdio_read;
bus->write = mpc52xx_fec_mdio_write;
/* setup irqs */
bus->irq = priv->mdio_irqs;
/* setup registers */
err = of_address_to_resource(np, 0, &res);
if (err)
goto out_free;
priv->regs = ioremap(res.start, resource_size(&res));
if (priv->regs == NULL) {
err = -ENOMEM;
goto out_free;
}
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
bus->priv = priv;
bus->parent = dev;
dev_set_drvdata(dev, bus);
/* set MII speed */
out_be32(&priv->regs->mii_speed,
((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
goto out_unmap;
return 0;
out_unmap:
iounmap(priv->regs);
out_free:
kfree(priv);
mdiobus_free(bus);
return err;
}
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
return 0;
}
static struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "fsl,mpc5200b-mdio", },
{ .compatible = "fsl,mpc5200-mdio", },
{ .compatible = "mpc5200b-fec-phy", },
{}
};
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct platform_driver mpc52xx_fec_mdio_driver = {
.driver = {
.name = "mpc5200b-fec-phy",
.owner = THIS_MODULE,
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
.remove = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
MODULE_LICENSE("Dual BSD/GPL");

View file

@ -0,0 +1,654 @@
/*
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "fec.h"
/* FEC 1588 register bits */
#define FEC_T_CTRL_SLAVE 0x00002000
#define FEC_T_CTRL_CAPTURE 0x00000800
#define FEC_T_CTRL_RESTART 0x00000200
#define FEC_T_CTRL_PERIOD_RST 0x00000030
#define FEC_T_CTRL_PERIOD_EN 0x00000010
#define FEC_T_CTRL_ENABLE 0x00000001
#define FEC_T_INC_MASK 0x0000007f
#define FEC_T_INC_OFFSET 0
#define FEC_T_INC_CORR_MASK 0x00007f00
#define FEC_T_INC_CORR_OFFSET 8
#define FEC_T_CTRL_PINPER 0x00000080
#define FEC_T_TF0_MASK 0x00000001
#define FEC_T_TF0_OFFSET 0
#define FEC_T_TF1_MASK 0x00000002
#define FEC_T_TF1_OFFSET 1
#define FEC_T_TF2_MASK 0x00000004
#define FEC_T_TF2_OFFSET 2
#define FEC_T_TF3_MASK 0x00000008
#define FEC_T_TF3_OFFSET 3
#define FEC_T_TDRE_MASK 0x00000001
#define FEC_T_TDRE_OFFSET 0
#define FEC_T_TMODE_MASK 0x0000003C
#define FEC_T_TMODE_OFFSET 2
#define FEC_T_TIE_MASK 0x00000040
#define FEC_T_TIE_OFFSET 6
#define FEC_T_TF_MASK 0x00000080
#define FEC_T_TF_OFFSET 7
#define FEC_ATIME_CTRL 0x400
#define FEC_ATIME 0x404
#define FEC_ATIME_EVT_OFFSET 0x408
#define FEC_ATIME_EVT_PERIOD 0x40c
#define FEC_ATIME_CORR 0x410
#define FEC_ATIME_INC 0x414
#define FEC_TS_TIMESTAMP 0x418
#define FEC_TGSR 0x604
#define FEC_TCSR(n) (0x608 + n * 0x08)
#define FEC_TCCR(n) (0x60C + n * 0x08)
#define MAX_TIMER_CHANNEL 3
#define FEC_TMODE_TOGGLE 0x05
#define FEC_HIGH_PULSE 0x0F
#define FEC_CC_MULT (1 << 31)
#define FEC_COUNTER_PERIOD (1 << 31)
#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
#define FEC_CHANNLE_0 0
#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
* This function enble the PPS ouput on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
unsigned long flags;
u32 val, tempval;
int inc;
struct timespec ts;
u64 ns;
u32 remainder;
val = 0;
if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
dev_err(&fep->pdev->dev, "No ptp stack is running\n");
return -EINVAL;
}
if (fep->pps_enable == enable)
return 0;
fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
inc = fep->ptp_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
if (enable) {
/* clear capture or output compare interrupt status if have.
*/
writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
/* It is recommended to doulbe check the TMODE field in the
* TCSR register to be cleared before the first compare counter
* is written into TCCR register. Just add a double check.
*/
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
do {
val &= ~(FEC_T_TMODE_MASK);
writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
} while (val & FEC_T_TMODE_MASK);
/* Dummy read counter to update the counter */
timecounter_read(&fep->tc);
/* We want to find the first compare event in the next
* second point. So we need to know what the ptp time
* is now and how many nanoseconds is ahead to get next second.
* The remaining nanosecond ahead before the next second would be
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
tempval = readl(fep->hwp + FEC_ATIME);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
ts.tv_nsec = remainder;
/* The tempval is less than 3 seconds, and so val is less than
* 4 seconds. No overflow for 32bit calculation.
*/
val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
/* Need to consider the situation that the current time is
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
* possible that the remaining nanoseonds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
* around over 2 seconds. So it is okay to set the next of next
* seond for the timer.
*/
val += NSEC_PER_SEC;
/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
* ptp counter, which maybe cause 32-bit wrap. Since the
* (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
* We can ensure the wrap will not cause issue. If the offset
* is bigger than fep->cc.mask would be a error.
*/
val &= fep->cc.mask;
writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
/* Calculate the second the compare event timestamp */
fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
/* * Enable compare event when overflow */
val = readl(fep->hwp + FEC_ATIME_CTRL);
val |= FEC_T_CTRL_PINPER;
writel(val, fep->hwp + FEC_ATIME_CTRL);
/* Compare channel setting. */
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
val &= ~(1 << FEC_T_TDRE_OFFSET);
val &= ~(FEC_T_TMODE_MASK);
val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
/* Write the second compare event timestamp and calculate
* the third timestamp. Refer the TCCR register detail in the spec.
*/
writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
} else {
writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
}
fep->pps_enable = enable;
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
}
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
static cycle_t fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
}
/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
* fixed point cycles registers in the hardware.
*/
void fec_ptp_start_cyclecounter(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned long flags;
int inc;
inc = 1000000000 / fep->cycle_speed;
/* grab the ptp lock */
spin_lock_irqsave(&fep->tmreg_lock, flags);
/* 1ns counter */
writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
/* use 31-bit timer counter */
writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
fep->hwp + FEC_ATIME_CTRL);
memset(&fep->cc, 0, sizeof(fep->cc));
fep->cc.read = fec_ptp_read;
fep->cc.mask = CLOCKSOURCE_MASK(31);
fep->cc.shift = 31;
fep->cc.mult = FEC_CC_MULT;
/* reset the ns time counter */
timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
/**
* fec_ptp_adjfreq - adjust ptp cycle frequency
* @ptp: the ptp clock structure
* @ppb: parts per billion adjustment from base
*
* Adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
*
* Because ENET hardware frequency adjust is complex,
* using software method to do that.
*/
static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
unsigned long flags;
int neg_adj = 0;
u32 i, tmp;
u32 corr_inc, corr_period;
u32 corr_ns;
u64 lhs, rhs;
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
if (ppb == 0)
return 0;
if (ppb < 0) {
ppb = -ppb;
neg_adj = 1;
}
/* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
* Try to find the corr_inc between 1 to fep->ptp_inc to
* meet adjustment requirement.
*/
lhs = NSEC_PER_SEC;
rhs = (u64)ppb * (u64)fep->ptp_inc;
for (i = 1; i <= fep->ptp_inc; i++) {
if (lhs >= rhs) {
corr_inc = i;
corr_period = div_u64(lhs, rhs);
break;
}
lhs += NSEC_PER_SEC;
}
/* Not found? Set it to high value - double speed
* correct in every clock step.
*/
if (i > fep->ptp_inc) {
corr_inc = fep->ptp_inc;
corr_period = 1;
}
if (neg_adj)
corr_ns = fep->ptp_inc - corr_inc;
else
corr_ns = fep->ptp_inc + corr_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
writel(tmp, fep->hwp + FEC_ATIME_INC);
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
/* dummy read to update the timer. */
timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
}
/**
* fec_ptp_adjtime
* @ptp: the ptp clock structure
* @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
unsigned long flags;
u64 now;
u32 counter;
spin_lock_irqsave(&fep->tmreg_lock, flags);
now = timecounter_read(&fep->tc);
now += delta;
/* Get the timer value based on adjusted timestamp.
* Update the counter with the masked value.
*/
counter = now & fep->cc.mask;
writel(counter, fep->hwp + FEC_ATIME);
/* reset the timecounter */
timecounter_init(&fep->tc, &fep->cc, now);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
}
/**
* fec_ptp_gettime
* @ptp: the ptp clock structure
* @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct fec_enet_private *adapter =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
u32 remainder;
unsigned long flags;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
ts->tv_nsec = remainder;
return 0;
}
/**
* fec_ptp_settime
* @ptp: the ptp clock structure
* @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
*/
static int fec_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
u32 counter;
mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
if (!fep->ptp_clk_on) {
mutex_unlock(&fep->ptp_clk_mutex);
return -EINVAL;
}
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
/* Get the timer value based on timestamp.
* Update the counter with the masked value.
*/
counter = ns & fep->cc.mask;
spin_lock_irqsave(&fep->tmreg_lock, flags);
writel(counter, fep->hwp + FEC_ATIME);
timecounter_init(&fep->tc, &fep->cc, ns);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
mutex_unlock(&fep->ptp_clk_mutex);
return 0;
}
/**
* fec_ptp_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
*
*/
static int fec_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
ret = fec_ptp_enable_pps(fep, on);
return ret;
}
return -EOPNOTSUPP;
}
/**
* fec_ptp_hwtstamp_ioctl - control hardware time stamping
* @ndev: pointer to net_device
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*/
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
break;
case HWTSTAMP_TX_ON:
fep->hwts_tx_en = 1;
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (fep->hwts_rx_en)
fep->hwts_rx_en = 0;
config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/*
* register RXMTRL must be set in order to do V1 packets,
* therefore it is not possible to time stamp both V1 Sync and
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
fep->hwts_rx_en = 1;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct hwtstamp_config config;
config.flags = 0;
config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
config.rx_filter = (fep->hwts_rx_en ?
HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
*/
static void fec_time_keep(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
u64 ns;
unsigned long flags;
mutex_lock(&fep->ptp_clk_mutex);
if (fep->ptp_clk_on) {
spin_lock_irqsave(&fep->tmreg_lock, flags);
ns = timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
mutex_unlock(&fep->ptp_clk_mutex);
schedule_delayed_work(&fep->time_keep, HZ);
}
/**
* fec_ptp_init
* @ndev: The FEC network adapter
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
* cyclecounter init routine and exits.
*/
void fec_ptp_init(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
fep->ptp_caps.owner = THIS_MODULE;
snprintf(fep->ptp_caps.name, 16, "fec ptp");
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
fep->ptp_caps.n_per_out = 0;
fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 1;
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
fep->ptp_caps.gettime = fec_ptp_gettime;
fep->ptp_caps.settime = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
fec_ptp_start_cyclecounter(ndev);
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
pr_err("ptp_clock_register failed\n");
}
schedule_delayed_work(&fep->time_keep, HZ);
}
/**
* fec_ptp_check_pps_event
* @fep: the fec_enet_private structure handle
*
* This function check the pps event and reload the timer compare counter.
*/
uint fec_ptp_check_pps_event(struct fec_enet_private *fep)
{
u32 val;
u8 channel = fep->pps_channel;
struct ptp_clock_event event;
val = readl(fep->hwp + FEC_TCSR(channel));
if (val & FEC_T_TF_MASK) {
/* Write the next next compare(not the next according the spec)
* value to the register
*/
writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
do {
writel(val, fep->hwp + FEC_TCSR(channel));
} while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
/* Update the counter; */
fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
event.type = PTP_CLOCK_PPS;
ptp_clock_event(fep->ptp_clock, &event);
return 1;
}
return 0;
}

View file

@ -0,0 +1,34 @@
config FS_ENET
tristate "Freescale Ethernet Driver"
depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
select MII
select PHYLIB
config FS_ENET_MPC5121_FEC
def_bool y if (FS_ENET && PPC_MPC512x)
select FS_ENET_HAS_FEC
config FS_ENET_HAS_SCC
bool "Chip has an SCC usable for ethernet"
depends on FS_ENET && (CPM1 || CPM2)
default y
config FS_ENET_HAS_FCC
bool "Chip has an FCC usable for ethernet"
depends on FS_ENET && CPM2
default y
config FS_ENET_HAS_FEC
bool "Chip has an FEC usable for ethernet"
depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
select FS_ENET_MDIO_FEC
default y
config FS_ENET_MDIO_FEC
tristate "MDIO driver for FEC"
depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
config FS_ENET_MDIO_FCC
tristate "MDIO driver for FCC"
depends on FS_ENET && CPM2
select MDIO_BITBANG

View file

@ -0,0 +1,14 @@
#
# Makefile for the Freescale Ethernet controllers
#
obj-$(CONFIG_FS_ENET) += fs_enet.o
fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
fs_enet-objs := fs_enet-main.o $(fs_enet-m)

View file

@ -0,0 +1,44 @@
#ifndef FS_ENET_FEC_H
#define FS_ENET_FEC_H
/* CRC polynomium used by the FEC for the multicast group filtering */
#define FEC_CRC_POLY 0x04C11DB7
#define FEC_MAX_MULTICAST_ADDRS 64
/* Interrupt events/masks.
*/
#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
#define FEC_ENET_RXF 0x02000000U /* Full frame received */
#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
#define FEC_ENET_MII 0x00800000U /* MII interrupt */
#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
#define FEC_ECNTRL_PINMUX 0x00000004
#define FEC_ECNTRL_ETHER_EN 0x00000002
#define FEC_ECNTRL_RESET 0x00000001
/* RMII mode enabled only when MII_MODE bit is set too. */
#define FEC_RCNTRL_RMII_MODE (0x00000100 | \
FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE)
#define FEC_RCNTRL_FCE 0x00000020
#define FEC_RCNTRL_BC_REJ 0x00000010
#define FEC_RCNTRL_PROM 0x00000008
#define FEC_RCNTRL_MII_MODE 0x00000004
#define FEC_RCNTRL_DRT 0x00000002
#define FEC_RCNTRL_LOOP 0x00000001
#define FEC_TCNTRL_FDEN 0x00000004
#define FEC_TCNTRL_HBC 0x00000002
#define FEC_TCNTRL_GTS 0x00000001
/*
* Delay to wait for FEC reset command to complete (in us)
*/
#define FEC_RESET_DELAY 50
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,249 @@
#ifndef FS_ENET_H
#define FS_ENET_H
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/phy.h>
#include <linux/dma-mapping.h>
#include <linux/fs_enet_pd.h>
#include <asm/fs_pd.h>
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
#endif
#if defined(CONFIG_FS_ENET_HAS_FEC)
#include <asm/cpm.h>
#if defined(CONFIG_FS_ENET_MPC5121_FEC)
/* MPC5121 FEC has different register layout */
struct fec {
u32 fec_reserved0;
u32 fec_ievent; /* Interrupt event reg */
u32 fec_imask; /* Interrupt mask reg */
u32 fec_reserved1;
u32 fec_r_des_active; /* Receive descriptor reg */
u32 fec_x_des_active; /* Transmit descriptor reg */
u32 fec_reserved2[3];
u32 fec_ecntrl; /* Ethernet control reg */
u32 fec_reserved3[6];
u32 fec_mii_data; /* MII manage frame reg */
u32 fec_mii_speed; /* MII speed control reg */
u32 fec_reserved4[7];
u32 fec_mib_ctrlstat; /* MIB control/status reg */
u32 fec_reserved5[7];
u32 fec_r_cntrl; /* Receive control reg */
u32 fec_reserved6[15];
u32 fec_x_cntrl; /* Transmit Control reg */
u32 fec_reserved7[7];
u32 fec_addr_low; /* Low 32bits MAC address */
u32 fec_addr_high; /* High 16bits MAC address */
u32 fec_opd; /* Opcode + Pause duration */
u32 fec_reserved8[10];
u32 fec_hash_table_high; /* High 32bits hash table */
u32 fec_hash_table_low; /* Low 32bits hash table */
u32 fec_grp_hash_table_high; /* High 32bits hash table */
u32 fec_grp_hash_table_low; /* Low 32bits hash table */
u32 fec_reserved9[7];
u32 fec_x_wmrk; /* FIFO transmit water mark */
u32 fec_reserved10;
u32 fec_r_bound; /* FIFO receive bound reg */
u32 fec_r_fstart; /* FIFO receive start reg */
u32 fec_reserved11[11];
u32 fec_r_des_start; /* Receive descriptor ring */
u32 fec_x_des_start; /* Transmit descriptor ring */
u32 fec_r_buff_size; /* Maximum receive buff size */
u32 fec_reserved12[26];
u32 fec_dma_control; /* DMA Endian and other ctrl */
};
#endif
struct fec_info {
struct fec __iomem *fecp;
u32 mii_speed;
};
#endif
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#endif
/* hw driver ops */
struct fs_ops {
int (*setup_data)(struct net_device *dev);
int (*allocate_bd)(struct net_device *dev);
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
void (*napi_clear_rx_event)(struct net_device *dev);
void (*napi_enable_rx)(struct net_device *dev);
void (*napi_disable_rx)(struct net_device *dev);
void (*napi_clear_tx_event)(struct net_device *dev);
void (*napi_enable_tx)(struct net_device *dev);
void (*napi_disable_tx)(struct net_device *dev);
void (*rx_bd_done)(struct net_device *dev);
void (*tx_kickstart)(struct net_device *dev);
u32 (*get_int_events)(struct net_device *dev);
void (*clear_int_events)(struct net_device *dev, u32 int_events);
void (*ev_error)(struct net_device *dev, u32 int_events);
int (*get_regs)(struct net_device *dev, void *p, int *sizep);
int (*get_regs_len)(struct net_device *dev);
void (*tx_restart)(struct net_device *dev);
};
struct phy_info {
unsigned int id;
const char *name;
void (*startup) (struct net_device * dev);
void (*shutdown) (struct net_device * dev);
void (*ack_int) (struct net_device * dev);
};
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
#define MIN_MTU 46 /* this is data size */
#define CRC_LEN 4
#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
/* Must be a multiple of 32 (to cover both FEC & FCC) */
#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
/* This is needed so that invalidate_xxx wont invalidate too much */
#define ENET_RX_ALIGN 16
#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
struct fs_enet_private {
struct napi_struct napi;
struct napi_struct napi_tx;
struct device *dev; /* pointer back to the device (must be initialized first) */
struct net_device *ndev;
spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi;
const struct fs_ops *ops;
int rx_ring, tx_ring;
dma_addr_t ring_mem_addr;
void __iomem *ring_base;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
cbd_t __iomem *tx_bd_base;
cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
struct net_device_stats stats;
struct timer_list phy_timer_list;
const struct phy_info *phy;
u32 msg_enable;
struct mii_if_info mii_if;
unsigned int last_mii_status;
int interrupt;
struct phy_device *phydev;
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
u32 ev_napi_rx; /* mask of NAPI rx events */
u32 ev_napi_tx; /* mask of NAPI rx events */
u32 ev_rx; /* rx event mask */
u32 ev_tx; /* tx event mask */
u32 ev_err; /* error event mask */
u16 bd_rx_empty; /* mask of BD rx empty */
u16 bd_rx_err; /* mask of BD rx errors */
union {
struct {
int idx; /* FEC1 = 0, FEC2 = 1 */
void __iomem *fecp; /* hw registers */
u32 hthi, htlo; /* state for multicast */
} fec;
struct {
int idx; /* FCC1-3 = 0-2 */
void __iomem *fccp; /* hw registers */
void __iomem *ep; /* parameter ram */
void __iomem *fcccp; /* hw registers cont. */
void __iomem *mem; /* FCC DPRAM */
u32 gaddrh, gaddrl; /* group address */
} fcc;
struct {
int idx; /* FEC1 = 0, FEC2 = 1 */
void __iomem *sccp; /* hw registers */
void __iomem *ep; /* parameter ram */
u32 hthi, htlo; /* state for multicast */
} scc;
};
};
/***************************************************************************/
void fs_init_bds(struct net_device *dev);
void fs_cleanup_bds(struct net_device *dev);
/***************************************************************************/
#define DRV_MODULE_NAME "fs_enet"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "Sep 22, 2014"
/***************************************************************************/
int fs_enet_platform_init(void);
void fs_enet_platform_cleanup(void);
/***************************************************************************/
/* buffer descriptor access macros */
/* access macros */
#if defined(CONFIG_CPM1)
/* for a a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
#define __cbd_in16(addr) __raw_readw(addr)
#else
/* for others play it safe */
#define __cbd_out32(addr, x) out_be32(addr, x)
#define __cbd_out16(addr, x) out_be16(addr, x)
#define __cbd_in32(addr) in_be32(addr)
#define __cbd_in16(addr) in_be16(addr)
#endif
/* write */
#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
/* read */
#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
/* set bits */
#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
/* clear bits */
#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
/*******************************************************************/
extern const struct fs_ops fs_fec_ops;
extern const struct fs_ops fs_fcc_ops;
extern const struct fs_ops fs_scc_ops;
/*******************************************************************/
#endif

View file

@ -0,0 +1,614 @@
/*
* FCC driver for Motorola MPC82xx (PQ2).
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/cpm2.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include "fs_enet.h"
/*************************************************/
/* FCC access macros */
/* write, read, set bits, clear bits */
#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
#define R32(_p, _m) in_be32(&(_p)->_m)
#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
#define R16(_p, _m) in_be16(&(_p)->_m)
#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
#define R8(_p, _m) in_8(&(_p)->_m)
#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
/*************************************************/
#define FCC_MAX_MULTICAST_ADDRS 64
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
#define mk_mii_end 0
#define MAX_CR_CMD_LOOPS 10000
static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
{
const struct fs_platform_info *fpi = fep->fpi;
return cpm_command(fpi->cp_command, op);
}
static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
goto out;
fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
goto out;
fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->fcc.ep)
goto out_fccp;
fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
if (!fep->fcc.fcccp)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
fpi->dpram_offset = cpm_dpalloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
}
return 0;
out_fcccp:
iounmap(fep->fcc.fcccp);
out_ep:
iounmap(fep->fcc.ep);
out_fccp:
iounmap(fep->fcc.fccp);
out:
return ret;
}
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
#define FCC_RX_EVENT (FCC_ENET_RXF)
#define FCC_TX_EVENT (FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (do_pd_setup(fep) != 0)
return -EINVAL;
fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
fep->ev_rx = FCC_RX_EVENT;
fep->ev_tx = FCC_TX_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
return 0;
}
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev,
(fpi->tx_ring + fpi->rx_ring) *
sizeof(cbd_t), &fep->ring_mem_addr,
GFP_KERNEL);
if (fep->ring_base == NULL)
return -ENOMEM;
return 0;
}
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
dma_free_coherent(fep->dev,
(fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
(void __force *)fep->ring_base, fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
{
/* nothing */
}
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
}
static void set_multicast_start(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_enet_t __iomem *ep = fep->fcc.ep;
W32(ep, fen_gaddrh, 0);
W32(ep, fen_gaddrl, 0);
}
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_enet_t __iomem *ep = fep->fcc.ep;
u16 taddrh, taddrm, taddrl;
taddrh = ((u16)mac[5] << 8) | mac[4];
taddrm = ((u16)mac[3] << 8) | mac[2];
taddrl = ((u16)mac[1] << 8) | mac[0];
W16(ep, fen_taddrh, taddrh);
W16(ep, fen_taddrm, taddrm);
W16(ep, fen_taddrl, taddrl);
fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
}
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
fcc_enet_t __iomem *ep = fep->fcc.ep;
/* clear promiscuous always */
C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
}
/* read back */
fep->fcc.gaddrh = R32(ep, fen_gaddrh);
fep->fcc.gaddrl = R32(ep, fen_gaddrl);
}
static void set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
netdev_for_each_mc_addr(ha, dev)
set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
}
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fcc_t __iomem *fccp = fep->fcc.fccp;
fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
fcc_enet_t __iomem *ep = fep->fcc.ep;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
u16 paddrh, paddrm, paddrl;
const unsigned char *mac;
int i;
C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
/* clear everything (slow & steady does it) */
for (i = 0; i < sizeof(*ep); i++)
out_8((u8 __iomem *)ep + i, 0);
/* get physical address */
rx_bd_base_phys = fep->ring_mem_addr;
tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
/* point to bds */
W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
/* Set maximum bytes per receive buffer.
* It must be a multiple of 32.
*/
W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
/* Allocate space in the reserved FCC area of DPRAM for the
* internal buffers. No one uses this space (yet), so we
* can do this. Later, we will add resource management for
* this area.
*/
W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
W16(ep, fen_padptr, fpi->dpram_offset + 64);
/* fill with special symbol... */
memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
W32(ep, fen_genfcc.fcc_rbptr, 0);
W32(ep, fen_genfcc.fcc_tbptr, 0);
W32(ep, fen_genfcc.fcc_rcrc, 0);
W32(ep, fen_genfcc.fcc_tcrc, 0);
W16(ep, fen_genfcc.fcc_res1, 0);
W32(ep, fen_genfcc.fcc_res2, 0);
/* no CAM */
W32(ep, fen_camptr, 0);
/* Set CRC preset and mask */
W32(ep, fen_cmask, 0xdebb20e3);
W32(ep, fen_cpres, 0xffffffff);
W32(ep, fen_crcec, 0); /* CRC Error counter */
W32(ep, fen_alec, 0); /* alignment error counter */
W32(ep, fen_disfc, 0); /* discard frame counter */
W16(ep, fen_retlim, 15); /* Retry limit threshold */
W16(ep, fen_pper, 0); /* Normal persistence */
/* set group address */
W32(ep, fen_gaddrh, fep->fcc.gaddrh);
W32(ep, fen_gaddrl, fep->fcc.gaddrh);
/* Clear hash filter tables */
W32(ep, fen_iaddrh, 0);
W32(ep, fen_iaddrl, 0);
/* Clear the Out-of-sequence TxBD */
W16(ep, fen_tfcstat, 0);
W16(ep, fen_tfclen, 0);
W32(ep, fen_tfcptr, 0);
W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
/* set address */
mac = dev->dev_addr;
paddrh = ((u16)mac[5] << 8) | mac[4];
paddrm = ((u16)mac[3] << 8) | mac[2];
paddrl = ((u16)mac[1] << 8) | mac[0];
W16(ep, fen_paddrh, paddrh);
W16(ep, fen_paddrm, paddrm);
W16(ep, fen_paddrl, paddrl);
W16(ep, fen_taddrh, 0);
W16(ep, fen_taddrm, 0);
W16(ep, fen_taddrl, 0);
W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
/* Clear stat counters, in case we ever enable RMON */
W32(ep, fen_octc, 0);
W32(ep, fen_colc, 0);
W32(ep, fen_broc, 0);
W32(ep, fen_mulc, 0);
W32(ep, fen_uspc, 0);
W32(ep, fen_frgc, 0);
W32(ep, fen_ospc, 0);
W32(ep, fen_jbrc, 0);
W32(ep, fen_p64c, 0);
W32(ep, fen_p65c, 0);
W32(ep, fen_p128c, 0);
W32(ep, fen_p256c, 0);
W32(ep, fen_p512c, 0);
W32(ep, fen_p1024c, 0);
W16(ep, fen_rfthr, 0); /* Suggested by manual */
W16(ep, fen_rfcnt, 0);
W16(ep, fen_cftype, 0);
fs_init_bds(dev);
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
if (fep->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
}
fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
/* clear events */
W16(fccp, fcc_fcce, 0xffff);
/* Enable interrupts we wish to service */
W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
/* Set GFMR to enable Ethernet operating mode */
W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
/* set sync/delimiters */
W16(fccp, fcc_fdsr, 0xd555);
W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
if (fpi->use_rmii)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
if (fep->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
/* Restore multicast and promiscuous settings */
set_multicast_list(dev);
S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
}
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
/* stop ethernet */
C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
/* clear events */
W16(fccp, fcc_fcce, 0xffff);
/* clear interrupt mask */
W16(fccp, fcc_fccm, 0);
fs_cleanup_bds(dev);
}
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
}
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
}
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
}
static void napi_clear_tx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
}
static void napi_enable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
}
static void napi_disable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
{
/* nothing */
}
static void tx_kickstart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
S16(fccp, fcc_ftodr, 0x8000);
}
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
return (u32)R16(fccp, fcc_fcce);
}
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
W16(fccp, fcc_fcce, int_events & 0xffff);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
return -EINVAL;
memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
p = (char *)p + sizeof(fcc_t);
memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
p = (char *)p + sizeof(fcc_enet_t);
memcpy_fromio(p, fep->fcc.fcccp, 1);
return 0;
}
static int get_regs_len(struct net_device *dev)
{
return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
}
/* Some transmit errors cause the transmitter to shut
* down. We now issue a restart transmit.
* Also, to workaround 8260 device erratum CPM37, we must
* disable and then re-enable the transmitterfollowing a
* Late Collision, Underrun, or Retry Limit error.
* In addition, tbptr may point beyond BDs beyond still marked
* as ready due to internal pipelining, so we need to look back
* through the BDs and adjust tbptr to point to the last BD
* marked as ready. This may result in some buffers being
* retransmitted.
*/
static void tx_restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
const struct fs_platform_info *fpi = fep->fpi;
fcc_enet_t __iomem *ep = fep->fcc.ep;
cbd_t __iomem *curr_tbptr;
cbd_t __iomem *recheck_bd;
cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd;
last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
/* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *)
((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
fep->ring_base);
prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
/* Move through the bds in reverse, look for the earliest buffer
* that is not ready. Adjust TBPTR to the following buffer */
while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
/* Go back one buffer */
recheck_bd = prev_bd;
/* update the previous buffer */
prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
/* We should never see all bds marked as ready, check anyway */
if (recheck_bd == curr_tbptr)
break;
}
/* Now update the TBPTR and dirty flag to the current buffer */
W32(ep, fen_genfcc.fcc_tbptr,
(uint) (((void *)recheck_bd - fep->ring_base) +
fep->ring_mem_addr));
fep->dirty_tx = recheck_bd;
C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
udelay(10);
S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
}
/*************************************************************************/
const struct fs_ops fs_fcc_ops = {
.setup_data = setup_data,
.cleanup_data = cleanup_data,
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
.napi_clear_tx_event = napi_clear_tx_event,
.napi_enable_tx = napi_enable_tx,
.napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
.clear_int_events = clear_int_events,
.ev_error = ev_error,
.get_regs = get_regs,
.get_regs_len = get_regs_len,
.tx_restart = tx_restart,
.allocate_bd = allocate_bd,
.free_bd = free_bd,
};

View file

@ -0,0 +1,533 @@
/*
* Freescale Ethernet controllers
*
* Copyright (c) 2005 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
#include <asm/pgtable.h>
#include <asm/cpm1.h>
#endif
#include "fs_enet.h"
#include "fec.h"
/*************************************************/
#if defined(CONFIG_CPM1)
/* for a CPM1 __raw_xxx's are sufficient */
#define __fs_out32(addr, x) __raw_writel(x, addr)
#define __fs_out16(addr, x) __raw_writew(x, addr)
#define __fs_in32(addr) __raw_readl(addr)
#define __fs_in16(addr) __raw_readw(addr)
#else
/* for others play it safe */
#define __fs_out32(addr, x) out_be32(addr, x)
#define __fs_out16(addr, x) out_be16(addr, x)
#define __fs_in32(addr) in_be32(addr)
#define __fs_in16(addr) in_be16(addr)
#endif
/* write */
#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
/* read */
#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
/* set bits */
#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
/* clear bits */
#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
/*
* Delay to wait for FEC reset command to complete (in us)
*/
#define FEC_RESET_DELAY 50
static int whack_reset(struct fec __iomem *fecp)
{
int i;
FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
for (i = 0; i < FEC_RESET_DELAY; i++) {
if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
return 0; /* OK */
udelay(1);
}
return -1;
}
static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
return -EINVAL;
return 0;
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
FEC_ENET_BABT | FEC_ENET_EBERR)
static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (do_pd_setup(fep) != 0)
return -EINVAL;
fep->fec.hthi = 0;
fep->fec.htlo = 0;
fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
fep->ev_rx = FEC_RX_EVENT;
fep->ev_tx = FEC_TX_EVENT;
fep->ev_err = FEC_ERR_EVENT_MSK;
return 0;
}
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
(fpi->tx_ring + fpi->rx_ring) *
sizeof(cbd_t), &fep->ring_mem_addr,
GFP_KERNEL);
if (fep->ring_base == NULL)
return -ENOMEM;
return 0;
}
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
if(fep->ring_base)
dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
* sizeof(cbd_t),
(void __force *)fep->ring_base,
fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
{
/* nothing */
}
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
}
static void set_multicast_start(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fep->fec.hthi = 0;
fep->fec.htlo = 0;
}
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
int temp, hash_index, i, j;
u32 crc, csrVal;
u8 byte, msb;
crc = 0xffffffff;
for (i = 0; i < 6; i++) {
byte = mac[i];
for (j = 0; j < 8; j++) {
msb = crc >> 31;
crc <<= 1;
if (msb ^ (byte & 0x1))
crc ^= FEC_CRC_POLY;
byte >>= 1;
}
}
temp = (crc & 0x3f) >> 1;
hash_index = ((temp & 0x01) << 4) |
((temp & 0x02) << 2) |
((temp & 0x04)) |
((temp & 0x08) >> 2) |
((temp & 0x10) >> 4);
csrVal = 1 << hash_index;
if (crc & 1)
fep->fec.hthi |= csrVal;
else
fep->fec.htlo |= csrVal;
}
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
FW(fecp, grp_hash_table_high, fep->fec.hthi);
FW(fecp, grp_hash_table_low, fep->fec.htlo);
}
static void set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
netdev_for_each_mc_addr(ha, dev)
set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
}
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
const struct fs_platform_info *fpi = fep->fpi;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
int r;
u32 addrhi, addrlo;
struct mii_bus* mii = fep->phydev->bus;
struct fec_info* fec_inf = mii->priv;
r = whack_reset(fep->fec.fecp);
if (r != 0)
dev_err(fep->dev, "FEC Reset FAILED!\n");
/*
* Set station address.
*/
addrhi = ((u32) dev->dev_addr[0] << 24) |
((u32) dev->dev_addr[1] << 16) |
((u32) dev->dev_addr[2] << 8) |
(u32) dev->dev_addr[3];
addrlo = ((u32) dev->dev_addr[4] << 24) |
((u32) dev->dev_addr[5] << 16);
FW(fecp, addr_low, addrhi);
FW(fecp, addr_high, addrlo);
/*
* Reset all multicast.
*/
FW(fecp, grp_hash_table_high, fep->fec.hthi);
FW(fecp, grp_hash_table_low, fep->fec.htlo);
/*
* Set maximum receive buffer size.
*/
FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
#ifdef CONFIG_FS_ENET_MPC5121_FEC
FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
#else
FW(fecp, r_hash, PKT_MAXBUF_SIZE);
#endif
/* get physical address */
rx_bd_base_phys = fep->ring_mem_addr;
tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
/*
* Set receive and transmit descriptor base.
*/
FW(fecp, r_des_start, rx_bd_base_phys);
FW(fecp, x_des_start, tx_bd_base_phys);
fs_init_bds(dev);
/*
* Enable big endian and don't care about SDMA FC.
*/
#ifdef CONFIG_FS_ENET_MPC5121_FEC
FS(fecp, dma_control, 0xC0000000);
#else
FW(fecp, fun_code, 0x78000000);
#endif
/*
* Set MII speed.
*/
FW(fecp, mii_speed, fec_inf->mii_speed);
/*
* Clear any outstanding interrupt.
*/
FW(fecp, ievent, 0xffc0);
#ifndef CONFIG_FS_ENET_MPC5121_FEC
FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
#else
/*
* Only set MII/RMII mode - do not touch maximum frame length
* configured before.
*/
FS(fecp, r_cntrl, fpi->use_rmii ?
FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
#endif
/*
* adjust to duplex mode
*/
if (fep->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
}
/* Restore multicast and promiscuous settings */
set_multicast_list(dev);
/*
* Enable interrupts we wish to service.
*/
FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
FEC_ENET_RXF | FEC_ENET_RXB);
/*
* And last, enable the transmit and receive processing.
*/
FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
FW(fecp, r_des_active, 0x01000000);
}
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
struct fec_info* feci= fep->phydev->bus->priv;
int i;
if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
return; /* already down */
FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
i < FEC_RESET_DELAY; i++)
udelay(1);
if (i == FEC_RESET_DELAY)
dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
/*
* Disable FEC. Let only MII interrupts.
*/
FW(fecp, imask, 0);
FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
fs_cleanup_bds(dev);
/* shut down FEC1? that's where the mii bus is */
if (fpi->has_phy) {
FS(fecp, r_cntrl, fpi->use_rmii ?
FEC_RCNTRL_RMII_MODE :
FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
FW(fecp, ievent, FEC_ENET_MII);
FW(fecp, mii_speed, feci->mii_speed);
}
}
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
}
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
static void napi_clear_tx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
}
static void napi_enable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
}
static void napi_disable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, r_des_active, 0x01000000);
}
static void tx_kickstart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, x_des_active, 0x01000000);
}
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
return FR(fecp, ievent) & FR(fecp, imask);
}
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, int_events);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (*sizep < sizeof(struct fec))
return -EINVAL;
memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
return 0;
}
static int get_regs_len(struct net_device *dev)
{
return sizeof(struct fec);
}
static void tx_restart(struct net_device *dev)
{
/* nothing */
}
/*************************************************************************/
const struct fs_ops fs_fec_ops = {
.setup_data = setup_data,
.cleanup_data = cleanup_data,
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
.napi_clear_tx_event = napi_clear_tx_event,
.napi_enable_tx = napi_enable_tx,
.napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
.clear_int_events = clear_int_events,
.ev_error = ev_error,
.get_regs = get_regs,
.get_regs_len = get_regs_len,
.tx_restart = tx_restart,
.allocate_bd = allocate_bd,
.free_bd = free_bd,
};

View file

@ -0,0 +1,516 @@
/*
* Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
#include <asm/pgtable.h>
#include <asm/cpm1.h>
#endif
#include "fs_enet.h"
/*************************************************/
#if defined(CONFIG_CPM1)
/* for a 8xx __raw_xxx's are sufficient */
#define __fs_out32(addr, x) __raw_writel(x, addr)
#define __fs_out16(addr, x) __raw_writew(x, addr)
#define __fs_out8(addr, x) __raw_writeb(x, addr)
#define __fs_in32(addr) __raw_readl(addr)
#define __fs_in16(addr) __raw_readw(addr)
#define __fs_in8(addr) __raw_readb(addr)
#else
/* for others play it safe */
#define __fs_out32(addr, x) out_be32(addr, x)
#define __fs_out16(addr, x) out_be16(addr, x)
#define __fs_in32(addr) in_be32(addr)
#define __fs_in16(addr) in_be16(addr)
#define __fs_out8(addr, x) out_8(addr, x)
#define __fs_in8(addr) in_8(addr)
#endif
/* write, read, set bits, clear bits */
#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
#define R32(_p, _m) __fs_in32(&(_p)->_m)
#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
#define R16(_p, _m) __fs_in16(&(_p)->_m)
#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
#define R8(_p, _m) __fs_in8(&(_p)->_m)
#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
#define SCC_MAX_MULTICAST_ADDRS 64
/*
* Delay to wait for SCC reset command to complete (in us)
*/
#define SCC_RESET_DELAY 50
static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
{
const struct fs_platform_info *fpi = fep->fpi;
return cpm_command(fpi->cp_command, op);
}
static int do_pd_setup(struct fs_enet_private *fep)
{
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->scc.sccp)
return -EINVAL;
fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->scc.ep) {
iounmap(fep->scc.sccp);
return -EINVAL;
}
return 0;
}
#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
#define SCC_RX_EVENT (SCCE_ENET_RXF)
#define SCC_TX_EVENT (SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
static int setup_data(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
do_pd_setup(fep);
fep->scc.hthi = 0;
fep->scc.htlo = 0;
fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
fep->ev_rx = SCC_RX_EVENT;
fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
fep->ev_err = SCC_ERR_EVENT_MSK;
return 0;
}
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
sizeof(cbd_t), 8);
if (IS_ERR_VALUE(fep->ring_mem_addr))
return -ENOMEM;
fep->ring_base = (void __iomem __force*)
cpm_dpram_addr(fep->ring_mem_addr);
return 0;
}
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (fep->ring_base)
cpm_dpfree(fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
{
/* nothing */
}
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
S16(sccp, scc_psmr, SCC_PSMR_PRO);
}
static void set_multicast_start(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_enet_t __iomem *ep = fep->scc.ep;
W16(ep, sen_gaddr1, 0);
W16(ep, sen_gaddr2, 0);
W16(ep, sen_gaddr3, 0);
W16(ep, sen_gaddr4, 0);
}
static void set_multicast_one(struct net_device *dev, const u8 * mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_enet_t __iomem *ep = fep->scc.ep;
u16 taddrh, taddrm, taddrl;
taddrh = ((u16) mac[5] << 8) | mac[4];
taddrm = ((u16) mac[3] << 8) | mac[2];
taddrl = ((u16) mac[1] << 8) | mac[0];
W16(ep, sen_taddrh, taddrh);
W16(ep, sen_taddrm, taddrm);
W16(ep, sen_taddrl, taddrl);
scc_cr_cmd(fep, CPM_CR_SET_GADDR);
}
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
scc_enet_t __iomem *ep = fep->scc.ep;
/* clear promiscuous always */
C16(sccp, scc_psmr, SCC_PSMR_PRO);
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
W16(ep, sen_gaddr3, 0xffff);
W16(ep, sen_gaddr4, 0xffff);
}
}
static void set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
netdev_for_each_mc_addr(ha, dev)
set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
}
/*
* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
*/
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
scc_enet_t __iomem *ep = fep->scc.ep;
const struct fs_platform_info *fpi = fep->fpi;
u16 paddrh, paddrm, paddrl;
const unsigned char *mac;
int i;
C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
/* clear everything (slow & steady does it) */
for (i = 0; i < sizeof(*ep); i++)
__fs_out8((u8 __iomem *)ep + i, 0);
/* point to bds */
W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
W16(ep, sen_genscc.scc_tbase,
fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
/* Initialize function code registers for big-endian.
*/
#ifndef CONFIG_NOT_COHERENT_CACHE
W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
#else
W8(ep, sen_genscc.scc_rfcr, SCC_EB);
W8(ep, sen_genscc.scc_tfcr, SCC_EB);
#endif
/* Set maximum bytes per receive buffer.
* This appears to be an Ethernet frame size, not the buffer
* fragment size. It must be a multiple of four.
*/
W16(ep, sen_genscc.scc_mrblr, 0x5f0);
/* Set CRC preset and mask.
*/
W32(ep, sen_cpres, 0xffffffff);
W32(ep, sen_cmask, 0xdebb20e3);
W32(ep, sen_crcec, 0); /* CRC Error counter */
W32(ep, sen_alec, 0); /* alignment error counter */
W32(ep, sen_disfc, 0); /* discard frame counter */
W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
W16(ep, sen_retlim, 15); /* Retry limit threshold */
W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
/* Clear hash tables.
*/
W16(ep, sen_gaddr1, 0);
W16(ep, sen_gaddr2, 0);
W16(ep, sen_gaddr3, 0);
W16(ep, sen_gaddr4, 0);
W16(ep, sen_iaddr1, 0);
W16(ep, sen_iaddr2, 0);
W16(ep, sen_iaddr3, 0);
W16(ep, sen_iaddr4, 0);
/* set address
*/
mac = dev->dev_addr;
paddrh = ((u16) mac[5] << 8) | mac[4];
paddrm = ((u16) mac[3] << 8) | mac[2];
paddrl = ((u16) mac[1] << 8) | mac[0];
W16(ep, sen_paddrh, paddrh);
W16(ep, sen_paddrm, paddrm);
W16(ep, sen_paddrl, paddrl);
W16(ep, sen_pper, 0);
W16(ep, sen_taddrl, 0);
W16(ep, sen_taddrm, 0);
W16(ep, sen_taddrh, 0);
fs_init_bds(dev);
scc_cr_cmd(fep, CPM_CR_INIT_TRX);
W16(sccp, scc_scce, 0xffff);
/* Enable interrupts we wish to service.
*/
W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
/* Set GSMR_H to enable all normal operating modes.
* Set GSMR_L to enable Ethernet to MC68160.
*/
W32(sccp, scc_gsmrh, 0);
W32(sccp, scc_gsmrl,
SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
SCC_GSMRL_MODE_ENET);
/* Set sync/delimiters.
*/
W16(sccp, scc_dsr, 0xd555);
/* Set processing mode. Use Ethernet CRC, catch broadcast, and
* start frame search 22 bit times after RENA.
*/
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
if (fep->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
set_multicast_list(dev);
S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
int i;
for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
udelay(1);
if (i == SCC_RESET_DELAY)
dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
W16(sccp, scc_sccm, 0);
C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
fs_cleanup_bds(dev);
}
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
}
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
}
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
}
static void napi_clear_tx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
}
static void napi_enable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
}
static void napi_disable_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
{
/* nothing */
}
static void tx_kickstart(struct net_device *dev)
{
/* nothing */
}
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
return (u32) R16(sccp, scc_scce);
}
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
W16(sccp, scc_scce, int_events & 0xffff);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
return -EINVAL;
memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
p = (char *)p + sizeof(scc_t);
memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
return 0;
}
static int get_regs_len(struct net_device *dev)
{
return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
}
static void tx_restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_cr_cmd(fep, CPM_CR_RESTART_TX);
}
/*************************************************************************/
const struct fs_ops fs_scc_ops = {
.setup_data = setup_data,
.cleanup_data = cleanup_data,
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
.napi_clear_tx_event = napi_clear_tx_event,
.napi_enable_tx = napi_enable_tx,
.napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
.clear_int_events = clear_int_events,
.ev_error = ev_error,
.get_regs = get_regs,
.get_regs_len = get_regs_len,
.tx_restart = tx_restart,
.allocate_bd = allocate_bd,
.free_bd = free_bd,
};

View file

@ -0,0 +1,234 @@
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include "fs_enet.h"
struct bb_info {
struct mdiobb_ctrl ctrl;
__be32 __iomem *dir;
__be32 __iomem *dat;
u32 mdio_msk;
u32 mdc_msk;
};
/* FIXME: If any other users of GPIO crop up, then these will have to
* have some sort of global synchronization to avoid races with other
* pins on the same port. The ideal solution would probably be to
* bind the ports to a GPIO driver, and have this be a client of it.
*/
static inline void bb_set(u32 __iomem *p, u32 m)
{
out_be32(p, in_be32(p) | m);
}
static inline void bb_clr(u32 __iomem *p, u32 m)
{
out_be32(p, in_be32(p) & ~m);
}
static inline int bb_read(u32 __iomem *p, u32 m)
{
return (in_be32(p) & m) != 0;
}
static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (dir)
bb_set(bitbang->dir, bitbang->mdio_msk);
else
bb_clr(bitbang->dir, bitbang->mdio_msk);
/* Read back to flush the write. */
in_be32(bitbang->dir);
}
static inline int mdio_read(struct mdiobb_ctrl *ctrl)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
return bb_read(bitbang->dat, bitbang->mdio_msk);
}
static inline void mdio(struct mdiobb_ctrl *ctrl, int what)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (what)
bb_set(bitbang->dat, bitbang->mdio_msk);
else
bb_clr(bitbang->dat, bitbang->mdio_msk);
/* Read back to flush the write. */
in_be32(bitbang->dat);
}
static inline void mdc(struct mdiobb_ctrl *ctrl, int what)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
if (what)
bb_set(bitbang->dat, bitbang->mdc_msk);
else
bb_clr(bitbang->dat, bitbang->mdc_msk);
/* Read back to flush the write. */
in_be32(bitbang->dat);
}
static struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = mdc,
.set_mdio_dir = mdio_dir,
.set_mdio_data = mdio,
.get_mdio_data = mdio_read,
};
static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
{
struct resource res;
const u32 *data;
int mdio_pin, mdc_pin, len;
struct bb_info *bitbang = bus->priv;
int ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
if (resource_size(&res) <= 13)
return -ENODEV;
/* This should really encode the pin number as well, but all
* we get is an int, and the odds of multiple bitbang mdio buses
* is low enough that it's not worth going too crazy.
*/
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
data = of_get_property(np, "fsl,mdio-pin", &len);
if (!data || len != 4)
return -ENODEV;
mdio_pin = *data;
data = of_get_property(np, "fsl,mdc-pin", &len);
if (!data || len != 4)
return -ENODEV;
mdc_pin = *data;
bitbang->dir = ioremap(res.start, resource_size(&res));
if (!bitbang->dir)
return -ENOMEM;
bitbang->dat = bitbang->dir + 4;
bitbang->mdio_msk = 1 << (31 - mdio_pin);
bitbang->mdc_msk = 1 << (31 - mdc_pin);
return 0;
}
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *new_bus;
struct bb_info *bitbang;
int ret = -ENOMEM;
bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
if (!bitbang)
goto out;
bitbang->ctrl.ops = &bb_ops;
new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!new_bus)
goto out_free_priv;
new_bus->name = "CPM2 Bitbanged MII",
ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node);
if (ret)
goto out_free_bus;
new_bus->phy_mask = ~0;
new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!new_bus->irq) {
ret = -ENOMEM;
goto out_unmap_regs;
}
new_bus->parent = &ofdev->dev;
platform_set_drvdata(ofdev, new_bus);
ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
goto out_free_irqs;
return 0;
out_free_irqs:
kfree(new_bus->irq);
out_unmap_regs:
iounmap(bitbang->dir);
out_free_bus:
free_mdio_bitbang(new_bus);
out_free_priv:
kfree(bitbang);
out:
return ret;
}
static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct bb_info *bitbang = bus->priv;
mdiobus_unregister(bus);
kfree(bus->irq);
free_mdio_bitbang(bus);
iounmap(bitbang->dir);
kfree(bitbang);
return 0;
}
static struct of_device_id fs_enet_mdio_bb_match[] = {
{
.compatible = "fsl,cpm2-mdio-bitbang",
},
{},
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct platform_driver fs_enet_bb_mdio_driver = {
.driver = {
.name = "fsl-bb-mdio",
.owner = THIS_MODULE,
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_bb_mdio_driver);

View file

@ -0,0 +1,235 @@
/*
* Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
*
* Copyright (c) 2003 Intracom S.A.
* by Pantelis Antoniou <panto@intracom.gr>
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/mpc5xxx.h>
#include "fs_enet.h"
#include "fec.h"
/* Make MII read/write commands for the FEC.
*/
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
#define mk_mii_end 0
#define FEC_MII_LOOPS 10000
static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
{
struct fec_info* fec = bus->priv;
struct fec __iomem *fecp = fec->fecp;
int i, ret = -1;
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
for (i = 0; i < FEC_MII_LOOPS; i++)
if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
break;
if (i < FEC_MII_LOOPS) {
out_be32(&fecp->fec_ievent, FEC_ENET_MII);
ret = in_be32(&fecp->fec_mii_data) & 0xffff;
}
return ret;
}
static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
struct fec_info* fec = bus->priv;
struct fec __iomem *fecp = fec->fecp;
int i;
/* this must never happen */
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
/* Add PHY address to register command. */
out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
for (i = 0; i < FEC_MII_LOOPS; i++)
if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
break;
if (i < FEC_MII_LOOPS)
out_be32(&fecp->fec_ievent, FEC_ENET_MII);
return 0;
}
static struct of_device_id fs_enet_mdio_fec_match[];
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
int (*get_bus_freq)(struct device_node *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
if (!match)
return -EINVAL;
get_bus_freq = match->data;
new_bus = mdiobus_alloc();
if (!new_bus)
goto out;
fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
if (!fec)
goto out_mii;
new_bus->priv = fec;
new_bus->name = "FEC MII Bus";
new_bus->read = &fs_enet_fec_mii_read;
new_bus->write = &fs_enet_fec_mii_write;
ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
goto out_res;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
fec->fecp = ioremap(res.start, resource_size(&res));
if (!fec->fecp) {
ret = -ENOMEM;
goto out_fec;
}
if (get_bus_freq) {
clock = get_bus_freq(ofdev->dev.of_node);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
clock = 0x3F * 5000000;
}
} else
clock = ppc_proc_freq;
/*
* Scale for a MII clock <= 2.5 MHz
* Note that only 6 bits (25:30) are available for MII speed.
*/
speed = (clock + 4999999) / 5000000;
if (speed > 0x3F) {
speed = 0x3F;
dev_err(&ofdev->dev,
"MII clock (%d Hz) exceeds max (2.5 MHz)\n",
clock / speed);
}
fec->mii_speed = speed << 1;
setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
FEC_ECNTRL_ETHER_EN);
out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
new_bus->phy_mask = ~0;
new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!new_bus->irq) {
ret = -ENOMEM;
goto out_unmap_regs;
}
new_bus->parent = &ofdev->dev;
platform_set_drvdata(ofdev, new_bus);
ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
goto out_free_irqs;
return 0;
out_free_irqs:
kfree(new_bus->irq);
out_unmap_regs:
iounmap(fec->fecp);
out_res:
out_fec:
kfree(fec);
out_mii:
mdiobus_free(new_bus);
out:
return ret;
}
static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct fec_info *fec = bus->priv;
mdiobus_unregister(bus);
kfree(bus->irq);
iounmap(fec->fecp);
kfree(fec);
mdiobus_free(bus);
return 0;
}
static struct of_device_id fs_enet_mdio_fec_match[] = {
{
.compatible = "fsl,pq1-fec-mdio",
},
#if defined(CONFIG_PPC_MPC512x)
{
.compatible = "fsl,mpc5121-fec-mdio",
.data = mpc5xxx_get_bus_frequency,
},
#endif
{},
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct platform_driver fs_enet_fec_mdio_driver = {
.driver = {
.name = "fsl-fec-mdio",
.owner = THIS_MODULE,
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);

View file

@ -0,0 +1,500 @@
/*
* Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
* Provides Bus interface for MIIM regs
*
* Author: Andy Fleming <afleming@freescale.com>
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
*
* Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_device.h>
#include <asm/io.h>
#if IS_ENABLED(CONFIG_UCC_GETH)
#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
#endif
#include "gianfar.h"
#define MIIMIND_BUSY 0x00000001
#define MIIMIND_NOTVALID 0x00000004
#define MIIMCFG_INIT_VALUE 0x00000007
#define MIIMCFG_RESET 0x80000000
#define MII_READ_COMMAND 0x00000001
struct fsl_pq_mii {
u32 miimcfg; /* MII management configuration reg */
u32 miimcom; /* MII management command reg */
u32 miimadd; /* MII management address reg */
u32 miimcon; /* MII management control reg */
u32 miimstat; /* MII management status reg */
u32 miimind; /* MII management indication reg */
};
struct fsl_pq_mdio {
u8 res1[16];
u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
u8 res2[4];
u32 emapm; /* MDIO Event mapping register (for etsec2)*/
u8 res3[1280];
struct fsl_pq_mii mii;
u8 res4[28];
u32 utbipar; /* TBI phy address reg (only on UCC) */
u8 res5[2728];
} __packed;
/* Number of microseconds to wait for an MII register to respond */
#define MII_TIMEOUT 1000
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mii __iomem *regs;
int irqs[PHY_MAX_ADDR];
};
/*
* Per-device-type data. Each type of device tree node that we support gets
* one of these.
*
* @mii_offset: the offset of the MII registers within the memory map of the
* node. Some nodes define only the MII registers, and some define the whole
* MAC (which includes the MII registers).
*
* @get_tbipa: determines the address of the TBIPA register
*
* @ucc_configure: a special function for extra QE configuration
*/
struct fsl_pq_mdio_data {
unsigned int mii_offset; /* offset of the MII registers */
uint32_t __iomem * (*get_tbipa)(void __iomem *p);
void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
};
/*
* Write value to the PHY at mii_id at register regnum, on the bus attached
* to the local interface, which may be different from the generic mdio bus
* (tied to a single interface), waiting until the write is done before
* returning. This is helpful in programming interfaces like the TBI which
* control interfaces like onchip SERDES and are always tied to the local
* mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example.
*/
static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
unsigned int timeout;
/* Set the PHY address and the register address we want to write */
iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Write out the value we want */
iowrite32be(value, &regs->miimcon);
/* Wait for the transaction to finish */
timeout = MII_TIMEOUT;
while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
cpu_relax();
timeout--;
}
return timeout ? 0 : -ETIMEDOUT;
}
/*
* Read the bus for PHY at addr mii_id, register regnum, and return the value.
* Clears miimcom first.
*
* All PHY operation done on the bus attached to the local interface, which
* may be different from the generic mdio bus. This is helpful in programming
* interfaces like the TBI which, in turn, control interfaces like on-chip
* SERDES and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg.
*/
static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
unsigned int timeout;
u16 value;
/* Set the PHY address and the register address we want to read */
iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Clear miimcom, and then initiate a read */
iowrite32be(0, &regs->miimcom);
iowrite32be(MII_READ_COMMAND, &regs->miimcom);
/* Wait for the transaction to finish, normally less than 100us */
timeout = MII_TIMEOUT;
while ((ioread32be(&regs->miimind) &
(MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
cpu_relax();
timeout--;
}
if (!timeout)
return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
value = ioread32be(&regs->miimstat);
dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
return value;
}
/* Reset the MIIM registers, and wait for the bus to free */
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
unsigned int timeout;
mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
/* Setup the MII Mgmt clock speed */
iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
/* Wait until the bus is free */
timeout = MII_TIMEOUT;
while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
cpu_relax();
timeout--;
}
mutex_unlock(&bus->mdio_lock);
if (!timeout) {
dev_err(&bus->dev, "timeout waiting for MII bus\n");
return -EBUSY;
}
return 0;
}
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
/*
* This is mildly evil, but so is our hardware for doing this.
* Also, we have to cast back to struct gfar because of
* definition weirdness done in gianfar.h.
*/
static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
{
struct gfar __iomem *enet_regs = p;
return &enet_regs->tbipa;
}
/*
* Return the TBIPAR address for an eTSEC2 node
*/
static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
{
return p;
}
#endif
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
/*
* Return the TBIPAR address for a QE MDIO node
*/
static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
{
struct fsl_pq_mdio __iomem *mdio = p;
return &mdio->utbipar;
}
/*
* Find the UCC node that controls the given MDIO node
*
* For some reason, the QE MDIO nodes are not children of the UCC devices
* that control them. Therefore, we need to scan all UCC nodes looking for
* the one that encompases the given MDIO node. We do this by comparing
* physical addresses. The 'start' and 'end' addresses of the MDIO node are
* passed, and the correct UCC node will cover the entire address range.
*
* This assumes that there is only one QE MDIO node in the entire device tree.
*/
static void ucc_configure(phys_addr_t start, phys_addr_t end)
{
static bool found_mii_master;
struct device_node *np = NULL;
if (found_mii_master)
return;
for_each_compatible_node(np, NULL, "ucc_geth") {
struct resource res;
const uint32_t *iprop;
uint32_t id;
int ret;
ret = of_address_to_resource(np, 0, &res);
if (ret < 0) {
pr_debug("fsl-pq-mdio: no address range in node %s\n",
np->full_name);
continue;
}
/* if our mdio regs fall within this UCC regs range */
if ((start < res.start) || (end > res.end))
continue;
iprop = of_get_property(np, "cell-index", NULL);
if (!iprop) {
iprop = of_get_property(np, "device-id", NULL);
if (!iprop) {
pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
np->full_name);
continue;
}
}
id = be32_to_cpup(iprop);
/*
* cell-index and device-id for QE nodes are
* numbered from 1, not 0.
*/
if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
np->full_name);
continue;
}
pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
found_mii_master = true;
}
}
#endif
static struct of_device_id fsl_pq_mdio_match[] = {
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
{
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_gfar_tbipa,
},
},
{
.compatible = "fsl,gianfar-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_gfar_tbipa,
},
},
{
.type = "mdio",
.compatible = "gianfar",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_gfar_tbipa,
},
},
{
.compatible = "fsl,etsec2-tbi",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_etsec_tbipa,
},
},
{
.compatible = "fsl,etsec2-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
.get_tbipa = get_etsec_tbipa,
},
},
#endif
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
{
.compatible = "fsl,ucc-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_ucc_tbipa,
.ucc_configure = ucc_configure,
},
},
{
/* Legacy UCC MDIO node */
.type = "mdio",
.compatible = "ucc_geth_phy",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
.get_tbipa = get_ucc_tbipa,
.ucc_configure = ucc_configure,
},
},
#endif
/* No Kconfig option for Fman support yet */
{
.compatible = "fsl,fman-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
/* Fman TBI operations are handled elsewhere */
},
},
{},
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
const struct fsl_pq_mdio_data *data = id->data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
struct mii_bus *new_bus;
int err;
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
if (!new_bus)
return -ENOMEM;
priv = new_bus->priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read;
new_bus->write = &fsl_pq_mdio_write;
new_bus->reset = &fsl_pq_mdio_reset;
new_bus->irq = priv->irqs;
err = of_address_to_resource(np, 0, &res);
if (err < 0) {
dev_err(&pdev->dev, "could not obtain address information\n");
goto error;
}
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
(unsigned long long)res.start);
priv->map = of_iomap(np, 0);
if (!priv->map) {
err = -ENOMEM;
goto error;
}
/*
* Some device tree nodes represent only the MII registers, and
* others represent the MAC and MII registers. The 'mii_offset' field
* contains the offset of the MII registers inside the mapped register
* space.
*/
if (data->mii_offset > resource_size(&res)) {
dev_err(&pdev->dev, "invalid register map\n");
err = -EINVAL;
goto error;
}
priv->regs = priv->map + data->mii_offset;
new_bus->parent = &pdev->dev;
platform_set_drvdata(pdev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
if (strcmp(tbi->type, "tbi-phy") == 0) {
dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
strrchr(tbi->full_name, '/') + 1);
break;
}
}
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
uint32_t __iomem *tbipa;
if (!prop) {
dev_err(&pdev->dev,
"missing 'reg' property in node %s\n",
tbi->full_name);
err = -EBUSY;
goto error;
}
tbipa = data->get_tbipa(priv->map);
iowrite32be(be32_to_cpup(prop), tbipa);
}
}
if (data->ucc_configure)
data->ucc_configure(res.start, res.end);
err = of_mdiobus_register(new_bus, np);
if (err) {
dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
new_bus->name);
goto error;
}
return 0;
error:
if (priv->map)
iounmap(priv->map);
kfree(new_bus);
return err;
}
static int fsl_pq_mdio_remove(struct platform_device *pdev)
{
struct device *device = &pdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
iounmap(priv->map);
mdiobus_free(bus);
return 0;
}
static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
.owner = THIS_MODULE,
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
.remove = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,576 @@
/*
* PTP 1588 clock using the eTSEC
*
* Copyright (C) 2010 OMICRON electronics GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <linux/ptp_clock_kernel.h>
#include "gianfar.h"
/*
* gianfar ptp registers
* Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
*/
struct gianfar_ptp_registers {
u32 tmr_ctrl; /* Timer control register */
u32 tmr_tevent; /* Timestamp event register */
u32 tmr_temask; /* Timer event mask register */
u32 tmr_pevent; /* Timestamp event register */
u32 tmr_pemask; /* Timer event mask register */
u32 tmr_stat; /* Timestamp status register */
u32 tmr_cnt_h; /* Timer counter high register */
u32 tmr_cnt_l; /* Timer counter low register */
u32 tmr_add; /* Timer drift compensation addend register */
u32 tmr_acc; /* Timer accumulator register */
u32 tmr_prsc; /* Timer prescale */
u8 res1[4];
u32 tmroff_h; /* Timer offset high */
u32 tmroff_l; /* Timer offset low */
u8 res2[8];
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
u8 res3[48];
u32 tmr_fiper1; /* Timer fixed period interval */
u32 tmr_fiper2; /* Timer fixed period interval */
u32 tmr_fiper3; /* Timer fixed period interval */
u8 res4[20];
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
};
/* Bit definitions for the TMR_CTRL register */
#define ALM1P (1<<31) /* Alarm1 output polarity */
#define ALM2P (1<<30) /* Alarm2 output polarity */
#define FS (1<<28) /* FIPER start indication */
#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
#define TCLK_PERIOD_MASK (0x3ff)
#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
#define FRD (1<<14) /* FIPER Realignment Disable */
#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
#define COPH (1<<7) /* Generated clock output phase. */
#define CIPH (1<<6) /* External oscillator input clock phase */
#define TMSR (1<<5) /* Timer soft reset. */
#define BYP (1<<3) /* Bypass drift compensated clock */
#define TE (1<<2) /* 1588 timer enable. */
#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
#define CKSEL_MASK (0x3)
/* Bit definitions for the TMR_TEVENT register */
#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
#define ALM2 (1<<17) /* Current time = alarm time register 2 */
#define ALM1 (1<<16) /* Current time = alarm time register 1 */
#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
/* Bit definitions for the TMR_TEMASK register */
#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
#define ALM2EN (1<<17) /* Timer ALM2 event enable */
#define ALM1EN (1<<16) /* Timer ALM1 event enable */
#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
/* Bit definitions for the TMR_PEVENT register */
#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
#define RXP (1<<0) /* PTP frame has been received */
/* Bit definitions for the TMR_PEMASK register */
#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
#define RXPEN (1<<0) /* Receive PTP packet event enable */
/* Bit definitions for the TMR_STAT register */
#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
#define STAT_VEC_MASK (0x3f)
/* Bit definitions for the TMR_PRSC register */
#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
#define PRSC_OCK_MASK (0xffff)
#define DRIVER "gianfar_ptp"
#define DEFAULT_CKSEL 1
#define N_EXT_TS 2
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
struct etsects {
struct gianfar_ptp_registers __iomem *regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
int irq;
u64 alarm_interval; /* for periodic alarm */
u64 alarm_value;
u32 tclk_period; /* nanoseconds */
u32 tmr_prsc;
u32 tmr_add;
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
};
/*
* Register access functions
*/
/* Caller must hold etsects->lock. */
static u64 tmr_cnt_read(struct etsects *etsects)
{
u64 ns;
u32 lo, hi;
lo = gfar_read(&etsects->regs->tmr_cnt_l);
hi = gfar_read(&etsects->regs->tmr_cnt_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
}
/* Caller must hold etsects->lock. */
static void tmr_cnt_write(struct etsects *etsects, u64 ns)
{
u32 hi = ns >> 32;
u32 lo = ns & 0xffffffff;
gfar_write(&etsects->regs->tmr_cnt_l, lo);
gfar_write(&etsects->regs->tmr_cnt_h, hi);
}
/* Caller must hold etsects->lock. */
static void set_alarm(struct etsects *etsects)
{
u64 ns;
u32 lo, hi;
ns = tmr_cnt_read(etsects) + 1500000000ULL;
ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
ns -= etsects->tclk_period;
hi = ns >> 32;
lo = ns & 0xffffffff;
gfar_write(&etsects->regs->tmr_alarm1_l, lo);
gfar_write(&etsects->regs->tmr_alarm1_h, hi);
}
/* Caller must hold etsects->lock. */
static void set_fipers(struct etsects *etsects)
{
set_alarm(etsects);
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
}
/*
* Interrupt service routine
*/
static irqreturn_t isr(int irq, void *priv)
{
struct etsects *etsects = priv;
struct ptp_clock_event event;
u64 ns;
u32 ack = 0, lo, hi, mask, val;
val = gfar_read(&etsects->regs->tmr_tevent);
if (val & ETS1) {
ack |= ETS1;
hi = gfar_read(&etsects->regs->tmr_etts1_h);
lo = gfar_read(&etsects->regs->tmr_etts1_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ((u64) hi) << 32;
event.timestamp |= lo;
ptp_clock_event(etsects->clock, &event);
}
if (val & ETS2) {
ack |= ETS2;
hi = gfar_read(&etsects->regs->tmr_etts2_h);
lo = gfar_read(&etsects->regs->tmr_etts2_l);
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
event.timestamp = ((u64) hi) << 32;
event.timestamp |= lo;
ptp_clock_event(etsects->clock, &event);
}
if (val & ALM2) {
ack |= ALM2;
if (etsects->alarm_value) {
event.type = PTP_CLOCK_ALARM;
event.index = 0;
event.timestamp = etsects->alarm_value;
ptp_clock_event(etsects->clock, &event);
}
if (etsects->alarm_interval) {
ns = etsects->alarm_value + etsects->alarm_interval;
hi = ns >> 32;
lo = ns & 0xffffffff;
spin_lock(&etsects->lock);
gfar_write(&etsects->regs->tmr_alarm2_l, lo);
gfar_write(&etsects->regs->tmr_alarm2_h, hi);
spin_unlock(&etsects->lock);
etsects->alarm_value = ns;
} else {
gfar_write(&etsects->regs->tmr_tevent, ALM2);
spin_lock(&etsects->lock);
mask = gfar_read(&etsects->regs->tmr_temask);
mask &= ~ALM2EN;
gfar_write(&etsects->regs->tmr_temask, mask);
spin_unlock(&etsects->lock);
etsects->alarm_value = 0;
etsects->alarm_interval = 0;
}
}
if (val & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
ptp_clock_event(etsects->clock, &event);
}
if (ack) {
gfar_write(&etsects->regs->tmr_tevent, ack);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
/*
* PTP clock operations
*/
static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
u64 adj;
u32 diff, tmr_add;
int neg_adj = 0;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
tmr_add = etsects->tmr_add;
adj = tmr_add;
adj *= ppb;
diff = div_u64(adj, 1000000000ULL);
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
gfar_write(&etsects->regs->tmr_add, tmr_add);
return 0;
}
static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
s64 now;
unsigned long flags;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
spin_lock_irqsave(&etsects->lock, flags);
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
spin_unlock_irqrestore(&etsects->lock, flags);
set_fipers(etsects);
return 0;
}
static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
u64 ns;
u32 remainder;
unsigned long flags;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
spin_lock_irqsave(&etsects->lock, flags);
ns = tmr_cnt_read(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
ts->tv_nsec = remainder;
return 0;
}
static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
u64 ns;
unsigned long flags;
struct etsects *etsects = container_of(ptp, struct etsects, caps);
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
spin_lock_irqsave(&etsects->lock, flags);
tmr_cnt_write(etsects, ns);
set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
return 0;
}
static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct etsects *etsects = container_of(ptp, struct etsects, caps);
unsigned long flags;
u32 bit, mask;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
switch (rq->extts.index) {
case 0:
bit = ETS1EN;
break;
case 1:
bit = ETS2EN;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&etsects->lock, flags);
mask = gfar_read(&etsects->regs->tmr_temask);
if (on)
mask |= bit;
else
mask &= ~bit;
gfar_write(&etsects->regs->tmr_temask, mask);
spin_unlock_irqrestore(&etsects->lock, flags);
return 0;
case PTP_CLK_REQ_PPS:
spin_lock_irqsave(&etsects->lock, flags);
mask = gfar_read(&etsects->regs->tmr_temask);
if (on)
mask |= PP1EN;
else
mask &= ~PP1EN;
gfar_write(&etsects->regs->tmr_temask, mask);
spin_unlock_irqrestore(&etsects->lock, flags);
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static struct ptp_clock_info ptp_gianfar_caps = {
.owner = THIS_MODULE,
.name = "gianfar clock",
.max_adj = 512000,
.n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
.n_pins = 0,
.pps = 1,
.adjfreq = ptp_gianfar_adjfreq,
.adjtime = ptp_gianfar_adjtime,
.gettime = ptp_gianfar_gettime,
.settime = ptp_gianfar_settime,
.enable = ptp_gianfar_enable,
};
/* OF device tree */
static int get_of_u32(struct device_node *node, char *str, u32 *val)
{
int plen;
const u32 *prop = of_get_property(node, str, &plen);
if (!prop || plen != sizeof(*prop))
return -1;
*val = *prop;
return 0;
}
static int gianfar_ptp_probe(struct platform_device *dev)
{
struct device_node *node = dev->dev.of_node;
struct etsects *etsects;
struct timespec now;
int err = -ENOMEM;
u32 tmr_ctrl;
unsigned long flags;
etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
if (!etsects)
goto no_memory;
err = -ENODEV;
etsects->caps = ptp_gianfar_caps;
if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
etsects->cksel = DEFAULT_CKSEL;
if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
pr_err("device tree node missing required elements\n");
goto no_node;
}
etsects->irq = platform_get_irq(dev, 0);
if (etsects->irq == NO_IRQ) {
pr_err("irq not in device tree\n");
goto no_node;
}
if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
pr_err("request_irq failed\n");
goto no_node;
}
etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!etsects->rsrc) {
pr_err("no resource\n");
goto no_resource;
}
if (request_resource(&iomem_resource, etsects->rsrc)) {
pr_err("resource busy\n");
goto no_resource;
}
spin_lock_init(&etsects->lock);
etsects->regs = ioremap(etsects->rsrc->start,
resource_size(etsects->rsrc));
if (!etsects->regs) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
getnstimeofday(&now);
ptp_gianfar_settime(&etsects->caps, &now);
tmr_ctrl =
(etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
(etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
spin_lock_irqsave(&etsects->lock, flags);
gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
set_alarm(etsects);
gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
spin_unlock_irqrestore(&etsects->lock, flags);
etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
if (IS_ERR(etsects->clock)) {
err = PTR_ERR(etsects->clock);
goto no_clock;
}
gfar_phc_index = ptp_clock_index(etsects->clock);
platform_set_drvdata(dev, etsects);
return 0;
no_clock:
iounmap(etsects->regs);
no_ioremap:
release_resource(etsects->rsrc);
no_resource:
free_irq(etsects->irq, etsects);
no_node:
kfree(etsects);
no_memory:
return err;
}
static int gianfar_ptp_remove(struct platform_device *dev)
{
struct etsects *etsects = platform_get_drvdata(dev);
gfar_write(&etsects->regs->tmr_temask, 0);
gfar_write(&etsects->regs->tmr_ctrl, 0);
gfar_phc_index = -1;
ptp_clock_unregister(etsects->clock);
iounmap(etsects->regs);
release_resource(etsects->rsrc);
free_irq(etsects->irq, etsects);
kfree(etsects);
return 0;
}
static struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
{},
};
static struct platform_driver gianfar_ptp_driver = {
.driver = {
.name = "gianfar_ptp",
.of_match_table = match_table,
.owner = THIS_MODULE,
},
.probe = gianfar_ptp_probe,
.remove = gianfar_ptp_remove,
};
module_platform_driver(gianfar_ptp_driver);
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,421 @@
/*
* Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* Description: QE UCC Gigabit Ethernet Ethtool API Set
*
* Author: Li Yang <leoli@freescale.com>
*
* Limitation:
* Can only get/set settings of the first queue.
* Need to re-open the interface manually after changing some parameters.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include "ucc_geth.h"
static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-64-frames",
"tx-65-127-frames",
"tx-128-255-frames",
"rx-64-frames",
"rx-65-127-frames",
"rx-128-255-frames",
"tx-bytes-ok",
"tx-pause-frames",
"tx-multicast-frames",
"tx-broadcast-frames",
"rx-frames",
"rx-bytes-ok",
"rx-bytes-all",
"rx-multicast-frames",
"rx-broadcast-frames",
"stats-counter-carry",
"stats-counter-mask",
"rx-dropped-frames",
};
static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-single-collision",
"tx-multiple-collision",
"tx-late-collsion",
"tx-aborted-frames",
"tx-lost-frames",
"tx-carrier-sense-errors",
"tx-frames-ok",
"tx-excessive-differ-frames",
"tx-256-511-frames",
"tx-512-1023-frames",
"tx-1024-1518-frames",
"tx-jumbo-frames",
};
static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-crc-errors",
"rx-alignment-errors",
"rx-in-range-length-errors",
"rx-out-of-range-length-errors",
"rx-too-long-frames",
"rx-runt",
"rx-very-long-event",
"rx-symbol-errors",
"rx-busy-drop-frames",
"reserved",
"reserved",
"rx-mismatch-drop-frames",
"rx-small-than-64",
"rx-256-511-frames",
"rx-512-1023-frames",
"rx-1024-1518-frames",
"rx-jumbo-frames",
"rx-mac-error-loss",
"rx-pause-frames",
"reserved",
"rx-vlan-removed",
"rx-vlan-replaced",
"rx-vlan-inserted",
"rx-ip-checksum-errors",
};
#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
static int
uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
struct ucc_geth_info *ug_info = ugeth->ug_info;
if (!phydev)
return -ENODEV;
ecmd->maxtxpkt = 1;
ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
return phy_ethtool_gset(phydev, ecmd);
}
static int
uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
if (!phydev)
return -ENODEV;
return phy_ethtool_sset(phydev, ecmd);
}
static void
uec_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
pause->autoneg = ugeth->phydev->autoneg;
if (ugeth->ug_info->receiveFlowControl)
pause->rx_pause = 1;
if (ugeth->ug_info->transmitFlowControl)
pause->tx_pause = 1;
}
static int
uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
if (ugeth->phydev->autoneg) {
if (netif_running(netdev)) {
/* FIXME: automatically restart */
netdev_info(netdev, "Please re-open the interface\n");
}
} else {
struct ucc_geth_info *ug_info = ugeth->ug_info;
ret = init_flow_control_params(ug_info->aufc,
ug_info->receiveFlowControl,
ug_info->transmitFlowControl,
ug_info->pausePeriod,
ug_info->extensionField,
&ugeth->uccf->uf_regs->upsmr,
&ugeth->ug_regs->uempr,
&ugeth->ug_regs->maccfg1);
}
return ret;
}
static uint32_t
uec_get_msglevel(struct net_device *netdev)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
return ugeth->msg_enable;
}
static void
uec_set_msglevel(struct net_device *netdev, uint32_t data)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
ugeth->msg_enable = data;
}
static int
uec_get_regs_len(struct net_device *netdev)
{
return sizeof(struct ucc_geth);
}
static void
uec_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
int i;
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
u32 *buff = p;
for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
buff[i] = in_be32(&ug_regs[i]);
}
static void
uec_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
int queue = 0;
ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring->rx_pending = ug_info->bdRingLenRx[queue];
ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
ring->tx_pending = ug_info->bdRingLenTx[queue];
}
static int
uec_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
int queue = 0, ret = 0;
if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
netdev_info(netdev, "RxBD ring size must be no smaller than %d\n",
UCC_GETH_RX_BD_RING_SIZE_MIN);
return -EINVAL;
}
if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
netdev_info(netdev, "RxBD ring size must be multiple of %d\n",
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
return -EINVAL;
}
if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
netdev_info(netdev, "TxBD ring size must be no smaller than %d\n",
UCC_GETH_TX_BD_RING_SIZE_MIN);
return -EINVAL;
}
ug_info->bdRingLenRx[queue] = ring->rx_pending;
ug_info->bdRingLenTx[queue] = ring->tx_pending;
if (netif_running(netdev)) {
/* FIXME: restart automatically */
netdev_info(netdev, "Please re-open the interface\n");
}
return ret;
}
static int uec_get_sset_count(struct net_device *netdev, int sset)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 stats_mode = ugeth->ug_info->statisticsMode;
int len = 0;
switch (sset) {
case ETH_SS_STATS:
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
len += UEC_HW_STATS_LEN;
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
len += UEC_TX_FW_STATS_LEN;
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
len += UEC_RX_FW_STATS_LEN;
return len;
default:
return -EOPNOTSUPP;
}
}
static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 stats_mode = ugeth->ug_info->statisticsMode;
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
ETH_GSTRING_LEN);
buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
ETH_GSTRING_LEN);
buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
ETH_GSTRING_LEN);
}
static void uec_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 stats_mode = ugeth->ug_info->statisticsMode;
u32 __iomem *base;
int i, j = 0;
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
if (ugeth->ug_regs)
base = (u32 __iomem *)&ugeth->ug_regs->tx64;
else
base = NULL;
for (i = 0; i < UEC_HW_STATS_LEN; i++)
data[j++] = base ? in_be32(&base[i]) : 0;
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
data[j++] = base ? in_be32(&base[i]) : 0;
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
data[j++] = base ? in_be32(&base[i]) : 0;
}
}
static int uec_nway_reset(struct net_device *netdev)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
return phy_start_aneg(ugeth->phydev);
}
/* Report driver information */
static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
drvinfo->eedump_len = 0;
drvinfo->regdump_len = uec_get_regs_len(netdev);
}
#ifdef CONFIG_PM
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
if (phydev && phydev->irq)
wol->supported |= WAKE_PHY;
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
wol->wolopts = ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
return -EINVAL;
else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
return -EINVAL;
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
return 0;
}
#else
#define uec_get_wol NULL
#define uec_set_wol NULL
#endif /* CONFIG_PM */
static const struct ethtool_ops uec_ethtool_ops = {
.get_settings = uec_get_settings,
.set_settings = uec_set_settings,
.get_drvinfo = uec_get_drvinfo,
.get_regs_len = uec_get_regs_len,
.get_regs = uec_get_regs,
.get_msglevel = uec_get_msglevel,
.set_msglevel = uec_set_msglevel,
.nway_reset = uec_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = uec_get_ringparam,
.set_ringparam = uec_set_ringparam,
.get_pauseparam = uec_get_pauseparam,
.set_pauseparam = uec_set_pauseparam,
.get_sset_count = uec_get_sset_count,
.get_strings = uec_get_strings,
.get_ethtool_stats = uec_get_ethtool_stats,
.get_wol = uec_get_wol,
.set_wol = uec_set_wol,
.get_ts_info = ethtool_op_get_ts_info,
};
void uec_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &uec_ethtool_ops;
}

View file

@ -0,0 +1,277 @@
/*
* QorIQ 10G MDIO Controller
*
* Copyright 2012 Freescale Semiconductor, Inc.
*
* Authors: Andy Fleming <afleming@freescale.com>
* Timur Tabi <timur@freescale.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
/* Number of microseconds to wait for a register to respond */
#define TIMEOUT 1000
struct tgec_mdio_controller {
__be32 reserved[12];
__be32 mdio_stat; /* MDIO configuration and status */
__be32 mdio_ctl; /* MDIO control */
__be32 mdio_data; /* MDIO data */
__be32 mdio_addr; /* MDIO address */
} __packed;
#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
#define MDIO_STAT_BSY (1 << 0)
#define MDIO_STAT_RD_ER (1 << 1)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS (1 << 10)
#define MDIO_CTL_SCAN_EN (1 << 11)
#define MDIO_CTL_POST_INC (1 << 14)
#define MDIO_CTL_READ (1 << 15)
#define MDIO_DATA(x) (x & 0xffff)
#define MDIO_DATA_BSY (1 << 31)
/*
* Wait until the MDIO bus is free
*/
static int xgmac_wait_until_free(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
uint32_t status;
/* Wait till the bus is free */
status = spin_event_timeout(
!((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
if (!status) {
dev_err(dev, "timeout waiting for bus to be free\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* Wait till the MDIO read or write operation is complete
*/
static int xgmac_wait_until_done(struct device *dev,
struct tgec_mdio_controller __iomem *regs)
{
uint32_t status;
/* Wait till the MDIO write is complete */
status = spin_event_timeout(
!((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
if (!status) {
dev_err(dev, "timeout waiting for operation to complete\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* Write value to the PHY for this device to the register at regnum,waiting
* until the write is done before it returns. All PHY configuration has to be
* done through the TSEC1 MIIM regs.
*/
static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
uint16_t dev_addr = regnum >> 16;
int ret;
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the port and dev addr */
out_be32(&regs->mdio_ctl,
MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
/* Set the register address */
out_be32(&regs->mdio_addr, regnum & 0xffff);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Write the value to the register */
out_be32(&regs->mdio_data, MDIO_DATA(value));
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
return ret;
return 0;
}
/*
* Reads from register regnum in the PHY for device dev, returning the value.
* Clears miimcom first. All PHY configuration has to be done through the
* TSEC1 MIIM regs.
*/
static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
uint16_t dev_addr = regnum >> 16;
uint32_t mdio_ctl;
uint16_t value;
int ret;
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Set the Port and Device Addrs */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
out_be32(&regs->mdio_ctl, mdio_ctl);
/* Set the register address */
out_be32(&regs->mdio_addr, regnum & 0xffff);
ret = xgmac_wait_until_free(&bus->dev, regs);
if (ret)
return ret;
/* Initiate the read */
out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
ret = xgmac_wait_until_done(&bus->dev, regs);
if (ret)
return ret;
/* Return all Fs if nothing was there */
if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
value = in_be32(&regs->mdio_data) & 0xffff;
dev_dbg(&bus->dev, "read %04x\n", value);
return value;
}
/* Reset the MIIM registers, and wait for the bus to free */
static int xgmac_mdio_reset(struct mii_bus *bus)
{
struct tgec_mdio_controller __iomem *regs = bus->priv;
int ret;
mutex_lock(&bus->mdio_lock);
/* Setup the MII Mgmt clock speed */
out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
ret = xgmac_wait_until_free(&bus->dev, regs);
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct resource res;
int ret;
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(&pdev->dev, "could not obtain address\n");
return ret;
}
bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
if (!bus)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
bus->read = xgmac_mdio_read;
bus->write = xgmac_mdio_write;
bus->reset = xgmac_mdio_reset;
bus->irq = bus->priv;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
/* Set the PHY base address */
bus->priv = of_iomap(np, 0);
if (!bus->priv) {
ret = -ENOMEM;
goto err_ioremap;
}
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
goto err_registration;
}
platform_set_drvdata(pdev, bus);
return 0;
err_registration:
iounmap(bus->priv);
err_ioremap:
mdiobus_free(bus);
return ret;
}
static int xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
iounmap(bus->priv);
mdiobus_free(bus);
return 0;
}
static struct of_device_id xgmac_mdio_match[] = {
{
.compatible = "fsl,fman-xmdio",
},
{},
};
MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
static struct platform_driver xgmac_mdio_driver = {
.driver = {
.name = "fsl-fman_xmdio",
.of_match_table = xgmac_mdio_match,
},
.probe = xgmac_mdio_probe,
.remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
MODULE_LICENSE("GPL v2");