mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-11-01 08:38:52 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
38
drivers/net/ethernet/sfc/Kconfig
Normal file
38
drivers/net/ethernet/sfc/Kconfig
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
config SFC
|
||||
tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
|
||||
depends on PCI
|
||||
select MDIO
|
||||
select CRC32
|
||||
select I2C
|
||||
select I2C_ALGOBIT
|
||||
select PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports 10/40-gigabit Ethernet cards based on
|
||||
the Solarflare SFC4000, SFC9000-family and SFC9100-family
|
||||
controllers.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called sfc.
|
||||
config SFC_MTD
|
||||
bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
|
||||
depends on SFC && MTD && !(SFC=y && MTD=m)
|
||||
default y
|
||||
---help---
|
||||
This exposes the on-board flash and/or EEPROM as MTD devices
|
||||
(e.g. /dev/mtd1). This is required to update the firmware or
|
||||
the boot configuration under Linux.
|
||||
config SFC_MCDI_MON
|
||||
bool "Solarflare SFC9000/SFC9100-family hwmon support"
|
||||
depends on SFC && HWMON && !(SFC=y && HWMON=m)
|
||||
default y
|
||||
---help---
|
||||
This exposes the on-board firmware-managed sensors as a
|
||||
hardware monitor device.
|
||||
config SFC_SRIOV
|
||||
bool "Solarflare SFC9000-family SR-IOV support"
|
||||
depends on SFC && PCI_IOV
|
||||
default y
|
||||
---help---
|
||||
This enables support for the SFC9000 I/O Virtualization
|
||||
features, allowing accelerated network performance in
|
||||
virtualized environments.
|
||||
8
drivers/net/ethernet/sfc/Makefile
Normal file
8
drivers/net/ethernet/sfc/Makefile
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
|
||||
rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
|
||||
tenxpress.o txc43128_phy.o falcon_boards.o \
|
||||
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
|
||||
sfc-$(CONFIG_SFC_MTD) += mtd.o
|
||||
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
|
||||
|
||||
obj-$(CONFIG_SFC) += sfc.o
|
||||
542
drivers/net/ethernet/sfc/bitfield.h
Normal file
542
drivers/net/ethernet/sfc/bitfield.h
Normal file
|
|
@ -0,0 +1,542 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_BITFIELD_H
|
||||
#define EFX_BITFIELD_H
|
||||
|
||||
/*
|
||||
* Efx bitfield access
|
||||
*
|
||||
* Efx NICs make extensive use of bitfields up to 128 bits
|
||||
* wide. Since there is no native 128-bit datatype on most systems,
|
||||
* and since 64-bit datatypes are inefficient on 32-bit systems and
|
||||
* vice versa, we wrap accesses in a way that uses the most efficient
|
||||
* datatype.
|
||||
*
|
||||
* The NICs are PCI devices and therefore little-endian. Since most
|
||||
* of the quantities that we deal with are DMAed to/from host memory,
|
||||
* we define our datatypes (efx_oword_t, efx_qword_t and
|
||||
* efx_dword_t) to be little-endian.
|
||||
*/
|
||||
|
||||
/* Lowest bit numbers and widths */
|
||||
#define EFX_DUMMY_FIELD_LBN 0
|
||||
#define EFX_DUMMY_FIELD_WIDTH 0
|
||||
#define EFX_WORD_0_LBN 0
|
||||
#define EFX_WORD_0_WIDTH 16
|
||||
#define EFX_WORD_1_LBN 16
|
||||
#define EFX_WORD_1_WIDTH 16
|
||||
#define EFX_DWORD_0_LBN 0
|
||||
#define EFX_DWORD_0_WIDTH 32
|
||||
#define EFX_DWORD_1_LBN 32
|
||||
#define EFX_DWORD_1_WIDTH 32
|
||||
#define EFX_DWORD_2_LBN 64
|
||||
#define EFX_DWORD_2_WIDTH 32
|
||||
#define EFX_DWORD_3_LBN 96
|
||||
#define EFX_DWORD_3_WIDTH 32
|
||||
#define EFX_QWORD_0_LBN 0
|
||||
#define EFX_QWORD_0_WIDTH 64
|
||||
|
||||
/* Specified attribute (e.g. LBN) of the specified field */
|
||||
#define EFX_VAL(field, attribute) field ## _ ## attribute
|
||||
/* Low bit number of the specified field */
|
||||
#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
|
||||
/* Bit width of the specified field */
|
||||
#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
|
||||
/* High bit number of the specified field */
|
||||
#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
|
||||
/* Mask equal in width to the specified field.
|
||||
*
|
||||
* For example, a field with width 5 would have a mask of 0x1f.
|
||||
*
|
||||
* The maximum width mask that can be generated is 64 bits.
|
||||
*/
|
||||
#define EFX_MASK64(width) \
|
||||
((width) == 64 ? ~((u64) 0) : \
|
||||
(((((u64) 1) << (width))) - 1))
|
||||
|
||||
/* Mask equal in width to the specified field.
|
||||
*
|
||||
* For example, a field with width 5 would have a mask of 0x1f.
|
||||
*
|
||||
* The maximum width mask that can be generated is 32 bits. Use
|
||||
* EFX_MASK64 for higher width fields.
|
||||
*/
|
||||
#define EFX_MASK32(width) \
|
||||
((width) == 32 ? ~((u32) 0) : \
|
||||
(((((u32) 1) << (width))) - 1))
|
||||
|
||||
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
|
||||
typedef union efx_dword {
|
||||
__le32 u32[1];
|
||||
} efx_dword_t;
|
||||
|
||||
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
|
||||
typedef union efx_qword {
|
||||
__le64 u64[1];
|
||||
__le32 u32[2];
|
||||
efx_dword_t dword[2];
|
||||
} efx_qword_t;
|
||||
|
||||
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
|
||||
typedef union efx_oword {
|
||||
__le64 u64[2];
|
||||
efx_qword_t qword[2];
|
||||
__le32 u32[4];
|
||||
efx_dword_t dword[4];
|
||||
} efx_oword_t;
|
||||
|
||||
/* Format string and value expanders for printk */
|
||||
#define EFX_DWORD_FMT "%08x"
|
||||
#define EFX_QWORD_FMT "%08x:%08x"
|
||||
#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
|
||||
#define EFX_DWORD_VAL(dword) \
|
||||
((unsigned int) le32_to_cpu((dword).u32[0]))
|
||||
#define EFX_QWORD_VAL(qword) \
|
||||
((unsigned int) le32_to_cpu((qword).u32[1])), \
|
||||
((unsigned int) le32_to_cpu((qword).u32[0]))
|
||||
#define EFX_OWORD_VAL(oword) \
|
||||
((unsigned int) le32_to_cpu((oword).u32[3])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[2])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[1])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[0]))
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the native-endian element
|
||||
* which contains bits [min,max).
|
||||
*
|
||||
* For example, suppose "element" represents the high 32 bits of a
|
||||
* 64-bit value, and we wish to extract the bits belonging to the bit
|
||||
* field occupying bits 28-45 of this 64-bit value.
|
||||
*
|
||||
* Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
|
||||
*
|
||||
* ( element ) << 4
|
||||
*
|
||||
* The result will contain the relevant bits filled in in the range
|
||||
* [0,high-low), with garbage in bits [high-low+1,...).
|
||||
*/
|
||||
#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
|
||||
((low) > (max) || (high) < (min) ? 0 : \
|
||||
(low) > (min) ? \
|
||||
(native_element) >> ((low) - (min)) : \
|
||||
(native_element) << ((min) - (low)))
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the 64-bit little-endian
|
||||
* element which contains bits [min,max)
|
||||
*/
|
||||
#define EFX_EXTRACT64(element, min, max, low, high) \
|
||||
EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the 32-bit little-endian
|
||||
* element which contains bits [min,max)
|
||||
*/
|
||||
#define EFX_EXTRACT32(element, min, max, low, high) \
|
||||
EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
|
||||
|
||||
#define EFX_EXTRACT_OWORD64(oword, low, high) \
|
||||
((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
|
||||
EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
|
||||
EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_QWORD64(qword, low, high) \
|
||||
(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
|
||||
EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_OWORD32(oword, low, high) \
|
||||
((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_QWORD32(qword, low, high) \
|
||||
((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
|
||||
EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_DWORD(dword, low, high) \
|
||||
(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_OWORD_FIELD64(oword, field) \
|
||||
EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_QWORD_FIELD64(qword, field) \
|
||||
EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_OWORD_FIELD32(oword, field) \
|
||||
EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_QWORD_FIELD32(qword, field) \
|
||||
EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_DWORD_FIELD(dword, field) \
|
||||
EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_OWORD_IS_ZERO64(oword) \
|
||||
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
|
||||
|
||||
#define EFX_QWORD_IS_ZERO64(qword) \
|
||||
(((qword).u64[0]) == (__force __le64) 0)
|
||||
|
||||
#define EFX_OWORD_IS_ZERO32(oword) \
|
||||
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
|
||||
== (__force __le32) 0)
|
||||
|
||||
#define EFX_QWORD_IS_ZERO32(qword) \
|
||||
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
|
||||
|
||||
#define EFX_DWORD_IS_ZERO(dword) \
|
||||
(((dword).u32[0]) == (__force __le32) 0)
|
||||
|
||||
#define EFX_OWORD_IS_ALL_ONES64(oword) \
|
||||
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
|
||||
|
||||
#define EFX_QWORD_IS_ALL_ONES64(qword) \
|
||||
((qword).u64[0] == ~((__force __le64) 0))
|
||||
|
||||
#define EFX_OWORD_IS_ALL_ONES32(oword) \
|
||||
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
|
||||
== ~((__force __le32) 0))
|
||||
|
||||
#define EFX_QWORD_IS_ALL_ONES32(qword) \
|
||||
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
|
||||
|
||||
#define EFX_DWORD_IS_ALL_ONES(dword) \
|
||||
((dword).u32[0] == ~((__force __le32) 0))
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
|
||||
#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
|
||||
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
|
||||
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
|
||||
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
|
||||
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
|
||||
#else
|
||||
#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
|
||||
#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
|
||||
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
|
||||
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
|
||||
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
|
||||
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Construct bit field portion
|
||||
*
|
||||
* Creates the portion of the bit field [low,high) that lies within
|
||||
* the range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
|
||||
(((low > max) || (high < min)) ? 0 : \
|
||||
((low > min) ? \
|
||||
(((u64) (value)) << (low - min)) : \
|
||||
(((u64) (value)) >> (min - low))))
|
||||
|
||||
#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
|
||||
(((low > max) || (high < min)) ? 0 : \
|
||||
((low > min) ? \
|
||||
(((u32) (value)) << (low - min)) : \
|
||||
(((u32) (value)) >> (min - low))))
|
||||
|
||||
#define EFX_INSERT_NATIVE(min, max, low, high, value) \
|
||||
((((max - min) >= 32) || ((high - low) >= 32)) ? \
|
||||
EFX_INSERT_NATIVE64(min, max, low, high, value) : \
|
||||
EFX_INSERT_NATIVE32(min, max, low, high, value))
|
||||
|
||||
/*
|
||||
* Construct bit field portion
|
||||
*
|
||||
* Creates the portion of the named bit field that lies within the
|
||||
* range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
|
||||
EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
/*
|
||||
* Construct bit field
|
||||
*
|
||||
* Creates the portion of the named bit fields that lie within the
|
||||
* range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_FIELDS_NATIVE(min, max, \
|
||||
field1, value1, \
|
||||
field2, value2, \
|
||||
field3, value3, \
|
||||
field4, value4, \
|
||||
field5, value5, \
|
||||
field6, value6, \
|
||||
field7, value7, \
|
||||
field8, value8, \
|
||||
field9, value9, \
|
||||
field10, value10) \
|
||||
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
|
||||
|
||||
#define EFX_INSERT_FIELDS64(...) \
|
||||
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
|
||||
|
||||
#define EFX_INSERT_FIELDS32(...) \
|
||||
cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
|
||||
|
||||
#define EFX_POPULATE_OWORD64(oword, ...) do { \
|
||||
(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
|
||||
(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_QWORD64(qword, ...) do { \
|
||||
(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_OWORD32(oword, ...) do { \
|
||||
(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
|
||||
(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
|
||||
(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_QWORD32(qword, ...) do { \
|
||||
(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_DWORD(dword, ...) do { \
|
||||
(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
|
||||
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
|
||||
#else
|
||||
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
|
||||
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
|
||||
#endif
|
||||
|
||||
/* Populate an octword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
|
||||
#define EFX_POPULATE_OWORD_9(oword, ...) \
|
||||
EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_8(oword, ...) \
|
||||
EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_7(oword, ...) \
|
||||
EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_6(oword, ...) \
|
||||
EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_5(oword, ...) \
|
||||
EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_4(oword, ...) \
|
||||
EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_3(oword, ...) \
|
||||
EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_2(oword, ...) \
|
||||
EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_1(oword, ...) \
|
||||
EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_OWORD(oword) \
|
||||
EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_OWORD(oword) \
|
||||
EFX_POPULATE_OWORD_4(oword, \
|
||||
EFX_DWORD_0, 0xffffffff, \
|
||||
EFX_DWORD_1, 0xffffffff, \
|
||||
EFX_DWORD_2, 0xffffffff, \
|
||||
EFX_DWORD_3, 0xffffffff)
|
||||
|
||||
/* Populate a quadword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
|
||||
#define EFX_POPULATE_QWORD_9(qword, ...) \
|
||||
EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_8(qword, ...) \
|
||||
EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_7(qword, ...) \
|
||||
EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_6(qword, ...) \
|
||||
EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_5(qword, ...) \
|
||||
EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_4(qword, ...) \
|
||||
EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_3(qword, ...) \
|
||||
EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_2(qword, ...) \
|
||||
EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_1(qword, ...) \
|
||||
EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_QWORD(qword) \
|
||||
EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_QWORD(qword) \
|
||||
EFX_POPULATE_QWORD_2(qword, \
|
||||
EFX_DWORD_0, 0xffffffff, \
|
||||
EFX_DWORD_1, 0xffffffff)
|
||||
|
||||
/* Populate a dword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
|
||||
#define EFX_POPULATE_DWORD_9(dword, ...) \
|
||||
EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_8(dword, ...) \
|
||||
EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_7(dword, ...) \
|
||||
EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_6(dword, ...) \
|
||||
EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_5(dword, ...) \
|
||||
EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_4(dword, ...) \
|
||||
EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_3(dword, ...) \
|
||||
EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_2(dword, ...) \
|
||||
EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_1(dword, ...) \
|
||||
EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_DWORD(dword) \
|
||||
EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_DWORD(dword) \
|
||||
EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
|
||||
|
||||
/*
|
||||
* Modify a named field within an already-populated structure. Used
|
||||
* for read-modify-write operations.
|
||||
*
|
||||
*/
|
||||
#define EFX_INVERT_OWORD(oword) do { \
|
||||
(oword).u64[0] = ~((oword).u64[0]); \
|
||||
(oword).u64[1] = ~((oword).u64[1]); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_AND_OWORD(oword, from, mask) \
|
||||
do { \
|
||||
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
|
||||
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
|
||||
} while (0)
|
||||
|
||||
#define EFX_OR_OWORD(oword, from, mask) \
|
||||
do { \
|
||||
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
|
||||
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
|
||||
} while (0)
|
||||
|
||||
#define EFX_INSERT64(min, max, low, high, value) \
|
||||
cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
|
||||
|
||||
#define EFX_INSERT32(min, max, low, high, value) \
|
||||
cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
|
||||
|
||||
#define EFX_INPLACE_MASK64(min, max, low, high) \
|
||||
EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_INPLACE_MASK32(min, max, low, high) \
|
||||
EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_SET_OWORD64(oword, low, high, value) do { \
|
||||
(oword).u64[0] = (((oword).u64[0] \
|
||||
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
|
||||
| EFX_INSERT64(0, 63, low, high, value)); \
|
||||
(oword).u64[1] = (((oword).u64[1] \
|
||||
& ~EFX_INPLACE_MASK64(64, 127, low, high)) \
|
||||
| EFX_INSERT64(64, 127, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_QWORD64(qword, low, high, value) do { \
|
||||
(qword).u64[0] = (((qword).u64[0] \
|
||||
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
|
||||
| EFX_INSERT64(0, 63, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_OWORD32(oword, low, high, value) do { \
|
||||
(oword).u32[0] = (((oword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
(oword).u32[1] = (((oword).u32[1] \
|
||||
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
|
||||
| EFX_INSERT32(32, 63, low, high, value)); \
|
||||
(oword).u32[2] = (((oword).u32[2] \
|
||||
& ~EFX_INPLACE_MASK32(64, 95, low, high)) \
|
||||
| EFX_INSERT32(64, 95, low, high, value)); \
|
||||
(oword).u32[3] = (((oword).u32[3] \
|
||||
& ~EFX_INPLACE_MASK32(96, 127, low, high)) \
|
||||
| EFX_INSERT32(96, 127, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_QWORD32(qword, low, high, value) do { \
|
||||
(qword).u32[0] = (((qword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
(qword).u32[1] = (((qword).u32[1] \
|
||||
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
|
||||
| EFX_INSERT32(32, 63, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_DWORD32(dword, low, high, value) do { \
|
||||
(dword).u32[0] = (((dword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_OWORD_FIELD64(oword, field, value) \
|
||||
EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_QWORD_FIELD64(qword, field, value) \
|
||||
EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_OWORD_FIELD32(oword, field, value) \
|
||||
EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_QWORD_FIELD32(qword, field, value) \
|
||||
EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_DWORD_FIELD(dword, field, value) \
|
||||
EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
|
||||
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
|
||||
#else
|
||||
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
|
||||
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
|
||||
#endif
|
||||
|
||||
/* Used to avoid compiler warnings about shift range exceeding width
|
||||
* of the data types when dma_addr_t is only 32 bits wide.
|
||||
*/
|
||||
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
|
||||
#define EFX_DMA_TYPE_WIDTH(width) \
|
||||
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
|
||||
|
||||
|
||||
/* Static initialiser */
|
||||
#define EFX_OWORD32(a, b, c, d) \
|
||||
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
|
||||
cpu_to_le32(c), cpu_to_le32(d) } }
|
||||
|
||||
#endif /* EFX_BITFIELD_H */
|
||||
3708
drivers/net/ethernet/sfc/ef10.c
Normal file
3708
drivers/net/ethernet/sfc/ef10.c
Normal file
File diff suppressed because it is too large
Load diff
355
drivers/net/ethernet/sfc/ef10_regs.h
Normal file
355
drivers/net/ethernet/sfc/ef10_regs.h
Normal file
|
|
@ -0,0 +1,355 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2012-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_EF10_REGS_H
|
||||
#define EFX_EF10_REGS_H
|
||||
|
||||
/* EF10 hardware architecture definitions have a name prefix following
|
||||
* the format:
|
||||
*
|
||||
* E<type>_<min-rev><max-rev>_
|
||||
*
|
||||
* The following <type> strings are used:
|
||||
*
|
||||
* MMIO register Host memory structure
|
||||
* -------------------------------------------------------------
|
||||
* Address R
|
||||
* Bitfield RF SF
|
||||
* Enumerator FE SE
|
||||
*
|
||||
* <min-rev> is the first revision to which the definition applies:
|
||||
*
|
||||
* D: Huntington A0
|
||||
*
|
||||
* If the definition has been changed or removed in later revisions
|
||||
* then <max-rev> is the last revision to which the definition applies;
|
||||
* otherwise it is "Z".
|
||||
*/
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* EF10 registers and descriptors
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
/* BIU_HW_REV_ID_REG: */
|
||||
#define ER_DZ_BIU_HW_REV_ID 0x00000000
|
||||
#define ERF_DZ_HW_REV_ID_LBN 0
|
||||
#define ERF_DZ_HW_REV_ID_WIDTH 32
|
||||
|
||||
/* BIU_MC_SFT_STATUS_REG: */
|
||||
#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
|
||||
#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
|
||||
#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
|
||||
#define ERF_DZ_MC_SFT_STATUS_LBN 0
|
||||
#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
|
||||
|
||||
/* BIU_INT_ISR_REG: */
|
||||
#define ER_DZ_BIU_INT_ISR 0x00000090
|
||||
#define ERF_DZ_ISR_REG_LBN 0
|
||||
#define ERF_DZ_ISR_REG_WIDTH 32
|
||||
|
||||
/* MC_DB_LWRD_REG: */
|
||||
#define ER_DZ_MC_DB_LWRD 0x00000200
|
||||
#define ERF_DZ_MC_DOORBELL_L_LBN 0
|
||||
#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
|
||||
|
||||
/* MC_DB_HWRD_REG: */
|
||||
#define ER_DZ_MC_DB_HWRD 0x00000204
|
||||
#define ERF_DZ_MC_DOORBELL_H_LBN 0
|
||||
#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
|
||||
|
||||
/* EVQ_RPTR_REG: */
|
||||
#define ER_DZ_EVQ_RPTR 0x00000400
|
||||
#define ER_DZ_EVQ_RPTR_STEP 8192
|
||||
#define ER_DZ_EVQ_RPTR_ROWS 2048
|
||||
#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
|
||||
#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
|
||||
#define ERF_DZ_EVQ_RPTR_LBN 0
|
||||
#define ERF_DZ_EVQ_RPTR_WIDTH 15
|
||||
|
||||
/* EVQ_TMR_REG: */
|
||||
#define ER_DZ_EVQ_TMR 0x00000420
|
||||
#define ER_DZ_EVQ_TMR_STEP 8192
|
||||
#define ER_DZ_EVQ_TMR_ROWS 2048
|
||||
#define ERF_DZ_TC_TIMER_MODE_LBN 14
|
||||
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
|
||||
#define ERF_DZ_TC_TIMER_VAL_LBN 0
|
||||
#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
|
||||
|
||||
/* RX_DESC_UPD_REG: */
|
||||
#define ER_DZ_RX_DESC_UPD 0x00000830
|
||||
#define ER_DZ_RX_DESC_UPD_STEP 8192
|
||||
#define ER_DZ_RX_DESC_UPD_ROWS 2048
|
||||
#define ERF_DZ_RX_DESC_WPTR_LBN 0
|
||||
#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
|
||||
|
||||
/* TX_DESC_UPD_REG: */
|
||||
#define ER_DZ_TX_DESC_UPD 0x00000a10
|
||||
#define ER_DZ_TX_DESC_UPD_STEP 8192
|
||||
#define ER_DZ_TX_DESC_UPD_ROWS 2048
|
||||
#define ERF_DZ_RSVD_LBN 76
|
||||
#define ERF_DZ_RSVD_WIDTH 20
|
||||
#define ERF_DZ_TX_DESC_WPTR_LBN 64
|
||||
#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
|
||||
#define ERF_DZ_TX_DESC_HWORD_LBN 32
|
||||
#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
|
||||
#define ERF_DZ_TX_DESC_LWORD_LBN 0
|
||||
#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
|
||||
|
||||
/* DRIVER_EV */
|
||||
#define ESF_DZ_DRV_CODE_LBN 60
|
||||
#define ESF_DZ_DRV_CODE_WIDTH 4
|
||||
#define ESF_DZ_DRV_SUB_CODE_LBN 56
|
||||
#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
|
||||
#define ESE_DZ_DRV_TIMER_EV 3
|
||||
#define ESE_DZ_DRV_START_UP_EV 2
|
||||
#define ESE_DZ_DRV_WAKE_UP_EV 1
|
||||
#define ESF_DZ_DRV_SUB_DATA_LBN 0
|
||||
#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
|
||||
#define ESF_DZ_DRV_EVQ_ID_LBN 0
|
||||
#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
|
||||
#define ESF_DZ_DRV_TMR_ID_LBN 0
|
||||
#define ESF_DZ_DRV_TMR_ID_WIDTH 14
|
||||
|
||||
/* EVENT_ENTRY */
|
||||
#define ESF_DZ_EV_CODE_LBN 60
|
||||
#define ESF_DZ_EV_CODE_WIDTH 4
|
||||
#define ESE_DZ_EV_CODE_MCDI_EV 12
|
||||
#define ESE_DZ_EV_CODE_DRIVER_EV 5
|
||||
#define ESE_DZ_EV_CODE_TX_EV 2
|
||||
#define ESE_DZ_EV_CODE_RX_EV 0
|
||||
#define ESE_DZ_OTHER other
|
||||
#define ESF_DZ_EV_DATA_LBN 0
|
||||
#define ESF_DZ_EV_DATA_WIDTH 60
|
||||
|
||||
/* MC_EVENT */
|
||||
#define ESF_DZ_MC_CODE_LBN 60
|
||||
#define ESF_DZ_MC_CODE_WIDTH 4
|
||||
#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
|
||||
#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
|
||||
#define ESF_DZ_MC_DROP_EVENT_LBN 58
|
||||
#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
|
||||
#define ESF_DZ_MC_SOFT_LBN 0
|
||||
#define ESF_DZ_MC_SOFT_WIDTH 58
|
||||
|
||||
/* RX_EVENT */
|
||||
#define ESF_DZ_RX_CODE_LBN 60
|
||||
#define ESF_DZ_RX_CODE_WIDTH 4
|
||||
#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
|
||||
#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
|
||||
#define ESF_DZ_RX_DROP_EVENT_LBN 58
|
||||
#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
|
||||
#define ESF_DZ_RX_EV_RSVD2_LBN 54
|
||||
#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
|
||||
#define ESF_DZ_RX_EV_SOFT2_LBN 52
|
||||
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
|
||||
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
|
||||
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
|
||||
#define ESF_DZ_RX_L4_CLASS_LBN 45
|
||||
#define ESF_DZ_RX_L4_CLASS_WIDTH 3
|
||||
#define ESE_DZ_L4_CLASS_RSVD7 7
|
||||
#define ESE_DZ_L4_CLASS_RSVD6 6
|
||||
#define ESE_DZ_L4_CLASS_RSVD5 5
|
||||
#define ESE_DZ_L4_CLASS_RSVD4 4
|
||||
#define ESE_DZ_L4_CLASS_RSVD3 3
|
||||
#define ESE_DZ_L4_CLASS_UDP 2
|
||||
#define ESE_DZ_L4_CLASS_TCP 1
|
||||
#define ESE_DZ_L4_CLASS_UNKNOWN 0
|
||||
#define ESF_DZ_RX_L3_CLASS_LBN 42
|
||||
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
|
||||
#define ESE_DZ_L3_CLASS_RSVD7 7
|
||||
#define ESE_DZ_L3_CLASS_IP6_FRAG 6
|
||||
#define ESE_DZ_L3_CLASS_ARP 5
|
||||
#define ESE_DZ_L3_CLASS_IP4_FRAG 4
|
||||
#define ESE_DZ_L3_CLASS_FCOE 3
|
||||
#define ESE_DZ_L3_CLASS_IP6 2
|
||||
#define ESE_DZ_L3_CLASS_IP4 1
|
||||
#define ESE_DZ_L3_CLASS_UNKNOWN 0
|
||||
#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
|
||||
#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
|
||||
#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
|
||||
#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
|
||||
#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
|
||||
#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
|
||||
#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
|
||||
#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
|
||||
#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
|
||||
#define ESE_DZ_ETH_TAG_CLASS_NONE 0
|
||||
#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
|
||||
#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
|
||||
#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
|
||||
#define ESE_DZ_ETH_BASE_CLASS_LLC 1
|
||||
#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
|
||||
#define ESF_DZ_RX_MAC_CLASS_LBN 35
|
||||
#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
|
||||
#define ESE_DZ_MAC_CLASS_MCAST 1
|
||||
#define ESE_DZ_MAC_CLASS_UCAST 0
|
||||
#define ESF_DZ_RX_EV_SOFT1_LBN 32
|
||||
#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
|
||||
#define ESF_DZ_RX_EV_RSVD1_LBN 31
|
||||
#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
|
||||
#define ESF_DZ_RX_ABORT_LBN 30
|
||||
#define ESF_DZ_RX_ABORT_WIDTH 1
|
||||
#define ESF_DZ_RX_ECC_ERR_LBN 29
|
||||
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_CRC1_ERR_LBN 28
|
||||
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_CRC0_ERR_LBN 27
|
||||
#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
|
||||
#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
|
||||
#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_ECRC_ERR_LBN 24
|
||||
#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
|
||||
#define ESF_DZ_RX_QLABEL_LBN 16
|
||||
#define ESF_DZ_RX_QLABEL_WIDTH 5
|
||||
#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
|
||||
#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
|
||||
#define ESF_DZ_RX_CONT_LBN 14
|
||||
#define ESF_DZ_RX_CONT_WIDTH 1
|
||||
#define ESF_DZ_RX_BYTES_LBN 0
|
||||
#define ESF_DZ_RX_BYTES_WIDTH 14
|
||||
|
||||
/* RX_KER_DESC */
|
||||
#define ESF_DZ_RX_KER_RESERVED_LBN 62
|
||||
#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
|
||||
#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
|
||||
#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
|
||||
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
|
||||
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
|
||||
|
||||
/* TX_CSUM_TSTAMP_DESC */
|
||||
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
|
||||
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
|
||||
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
|
||||
#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
|
||||
#define ESE_DZ_TX_OPTION_DESC_TSO 7
|
||||
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
|
||||
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
|
||||
#define ESF_DZ_TX_TIMESTAMP_LBN 5
|
||||
#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
|
||||
#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
|
||||
#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
|
||||
#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
|
||||
#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
|
||||
#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
|
||||
#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
|
||||
#define ESE_DZ_TX_OPTION_CRC_FCOE 1
|
||||
#define ESE_DZ_TX_OPTION_CRC_OFF 0
|
||||
#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
|
||||
#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
|
||||
#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
|
||||
#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
|
||||
|
||||
/* TX_EVENT */
|
||||
#define ESF_DZ_TX_CODE_LBN 60
|
||||
#define ESF_DZ_TX_CODE_WIDTH 4
|
||||
#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
|
||||
#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
|
||||
#define ESF_DZ_TX_DROP_EVENT_LBN 58
|
||||
#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
|
||||
#define ESF_DZ_TX_EV_RSVD_LBN 48
|
||||
#define ESF_DZ_TX_EV_RSVD_WIDTH 10
|
||||
#define ESF_DZ_TX_SOFT2_LBN 32
|
||||
#define ESF_DZ_TX_SOFT2_WIDTH 16
|
||||
#define ESF_DZ_TX_CAN_MERGE_LBN 31
|
||||
#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
|
||||
#define ESF_DZ_TX_SOFT1_LBN 24
|
||||
#define ESF_DZ_TX_SOFT1_WIDTH 7
|
||||
#define ESF_DZ_TX_QLABEL_LBN 16
|
||||
#define ESF_DZ_TX_QLABEL_WIDTH 5
|
||||
#define ESF_DZ_TX_DESCR_INDX_LBN 0
|
||||
#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
|
||||
|
||||
/* TX_KER_DESC */
|
||||
#define ESF_DZ_TX_KER_TYPE_LBN 63
|
||||
#define ESF_DZ_TX_KER_TYPE_WIDTH 1
|
||||
#define ESF_DZ_TX_KER_CONT_LBN 62
|
||||
#define ESF_DZ_TX_KER_CONT_WIDTH 1
|
||||
#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
|
||||
#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
|
||||
#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
|
||||
#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
|
||||
|
||||
/* TX_PIO_DESC */
|
||||
#define ESF_DZ_TX_PIO_TYPE_LBN 63
|
||||
#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
|
||||
#define ESF_DZ_TX_PIO_OPT_LBN 60
|
||||
#define ESF_DZ_TX_PIO_OPT_WIDTH 3
|
||||
#define ESE_DZ_TX_OPTION_DESC_PIO 1
|
||||
#define ESF_DZ_TX_PIO_CONT_LBN 59
|
||||
#define ESF_DZ_TX_PIO_CONT_WIDTH 1
|
||||
#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
|
||||
#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
|
||||
#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
|
||||
#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
|
||||
|
||||
/* TX_TSO_DESC */
|
||||
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
|
||||
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
|
||||
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
|
||||
#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
|
||||
#define ESE_DZ_TX_OPTION_DESC_TSO 7
|
||||
#define ESE_DZ_TX_OPTION_DESC_VLAN 6
|
||||
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
|
||||
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
|
||||
#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
|
||||
#define ESF_DZ_TX_TSO_IP_ID_LBN 32
|
||||
#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
|
||||
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
|
||||
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
|
||||
|
||||
/*************************************************************************/
|
||||
|
||||
/* TX_DESC_UPD_REG: Transmit descriptor update register.
|
||||
* We may write just one dword of these registers.
|
||||
*/
|
||||
#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
|
||||
#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
|
||||
#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
|
||||
|
||||
/* The workaround for bug 35388 requires multiplexing writes through
|
||||
* the TX_DESC_UPD_DWORD address.
|
||||
* TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
|
||||
* EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
|
||||
* EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
|
||||
*/
|
||||
#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
|
||||
#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
|
||||
#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
|
||||
#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
|
||||
#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
|
||||
#define ERF_DD_EVQ_IND_RPTR_LBN 0
|
||||
#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
|
||||
#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
|
||||
#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
|
||||
#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
|
||||
#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
|
||||
#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
|
||||
#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
|
||||
#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
|
||||
|
||||
/* TX_PIOBUF
|
||||
* PIO buffer aperture (paged)
|
||||
*/
|
||||
#define ER_DZ_TX_PIOBUF 4096
|
||||
#define ER_DZ_TX_PIOBUF_SIZE 2048
|
||||
|
||||
/* RX packet prefix */
|
||||
#define ES_DZ_RX_PREFIX_HASH_OFST 0
|
||||
#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
|
||||
#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
|
||||
#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
|
||||
#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
|
||||
#define ES_DZ_RX_PREFIX_SIZE 14
|
||||
|
||||
#endif /* EFX_EF10_REGS_H */
|
||||
3349
drivers/net/ethernet/sfc/efx.c
Normal file
3349
drivers/net/ethernet/sfc/efx.c
Normal file
File diff suppressed because it is too large
Load diff
255
drivers/net/ethernet/sfc/efx.h
Normal file
255
drivers/net/ethernet/sfc/efx.h
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_EFX_H
|
||||
#define EFX_EFX_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "filter.h"
|
||||
|
||||
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
|
||||
#define EFX_MEM_BAR 2
|
||||
|
||||
/* TX */
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev);
|
||||
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
extern unsigned int efx_piobuf_size;
|
||||
|
||||
/* RX */
|
||||
void efx_rx_config_page_split(struct efx_nic *efx);
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
|
||||
void efx_rx_slow_fill(unsigned long context);
|
||||
void __efx_rx_packet(struct efx_channel *channel);
|
||||
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags);
|
||||
static inline void efx_rx_flush_packet(struct efx_channel *channel)
|
||||
{
|
||||
if (channel->rx_pkt_n_frags)
|
||||
__efx_rx_packet(channel);
|
||||
}
|
||||
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
||||
|
||||
#define EFX_MAX_DMAQ_SIZE 4096UL
|
||||
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
|
||||
#define EFX_MIN_DMAQ_SIZE 512UL
|
||||
|
||||
#define EFX_MAX_EVQ_SIZE 16384UL
|
||||
#define EFX_MIN_EVQ_SIZE 512UL
|
||||
|
||||
/* Maximum number of TCP segments we support for soft-TSO */
|
||||
#define EFX_TSO_MAX_SEGS 100
|
||||
|
||||
/* The smallest [rt]xq_entries that the driver supports. RX minimum
|
||||
* is a bit arbitrary. For TX, we must have space for at least 2
|
||||
* TSO skbs.
|
||||
*/
|
||||
#define EFX_RXQ_MIN_ENT 128U
|
||||
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
|
||||
|
||||
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
|
||||
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
|
||||
|
||||
/* Filters */
|
||||
|
||||
/**
|
||||
* efx_filter_insert_filter - add or replace a filter
|
||||
* @efx: NIC in which to insert the filter
|
||||
* @spec: Specification for the filter
|
||||
* @replace_equal: Flag for whether the specified filter may replace an
|
||||
* existing filter with equal priority
|
||||
*
|
||||
* On success, return the filter ID.
|
||||
* On failure, return a negative error code.
|
||||
*
|
||||
* If existing filters have equal match values to the new filter spec,
|
||||
* then the new filter might replace them or the function might fail,
|
||||
* as follows.
|
||||
*
|
||||
* 1. If the existing filters have lower priority, or @replace_equal
|
||||
* is set and they have equal priority, replace them.
|
||||
*
|
||||
* 2. If the existing filters have higher priority, return -%EPERM.
|
||||
*
|
||||
* 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
|
||||
* support delivery to multiple recipients, return -%EEXIST.
|
||||
*
|
||||
* This implies that filters for multiple multicast recipients must
|
||||
* all be inserted with the same priority and @replace_equal = %false.
|
||||
*/
|
||||
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec,
|
||||
bool replace_equal)
|
||||
{
|
||||
return efx->type->filter_insert(efx, spec, replace_equal);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_remove_id_safe - remove a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id)
|
||||
{
|
||||
return efx->type->filter_remove_safe(efx, priority, filter_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
* @spec: Buffer in which to store filter specification
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_get_filter_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *spec)
|
||||
{
|
||||
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
|
||||
}
|
||||
|
||||
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority)
|
||||
{
|
||||
return efx->type->filter_count_rx_used(efx, priority);
|
||||
}
|
||||
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->filter_get_rx_id_limit(efx);
|
||||
}
|
||||
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size)
|
||||
{
|
||||
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
|
||||
}
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id);
|
||||
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
|
||||
static inline void efx_filter_rfs_expire(struct efx_channel *channel)
|
||||
{
|
||||
if (channel->rfs_filters_added >= 60 &&
|
||||
__efx_filter_rfs_expire(channel->efx, 100))
|
||||
channel->rfs_filters_added -= 60;
|
||||
}
|
||||
#define efx_filter_rfs_enabled() 1
|
||||
#else
|
||||
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
|
||||
#define efx_filter_rfs_enabled() 0
|
||||
#endif
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
|
||||
/* Channels */
|
||||
int efx_channel_dummy_op_int(struct efx_channel *channel);
|
||||
void efx_channel_dummy_op_void(struct efx_channel *channel);
|
||||
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
|
||||
|
||||
/* Ports */
|
||||
int efx_reconfigure_port(struct efx_nic *efx);
|
||||
int __efx_reconfigure_port(struct efx_nic *efx);
|
||||
|
||||
/* Ethtool support */
|
||||
extern const struct ethtool_ops efx_ethtool_ops;
|
||||
|
||||
/* Reset handling */
|
||||
int efx_reset(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_reset_down(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
|
||||
int efx_try_recovery(struct efx_nic *efx);
|
||||
|
||||
/* Global */
|
||||
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
|
||||
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
|
||||
unsigned int rx_usecs, bool rx_adaptive,
|
||||
bool rx_may_override_tx);
|
||||
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
|
||||
unsigned int *rx_usecs, bool *rx_adaptive);
|
||||
void efx_stop_eventq(struct efx_channel *channel);
|
||||
void efx_start_eventq(struct efx_channel *channel);
|
||||
|
||||
/* Dummy PHY ops for PHY drivers */
|
||||
int efx_port_dummy_op_int(struct efx_nic *efx);
|
||||
void efx_port_dummy_op_void(struct efx_nic *efx);
|
||||
|
||||
/* Update the generic software stats in the passed stats array */
|
||||
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
|
||||
|
||||
/* MTD */
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
|
||||
size_t n_parts, size_t sizeof_part);
|
||||
static inline int efx_mtd_probe(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->mtd_probe(efx);
|
||||
}
|
||||
void efx_mtd_rename(struct efx_nic *efx);
|
||||
void efx_mtd_remove(struct efx_nic *efx);
|
||||
#else
|
||||
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
|
||||
static inline void efx_mtd_rename(struct efx_nic *efx) {}
|
||||
static inline void efx_mtd_remove(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
static inline void efx_schedule_channel(struct efx_channel *channel)
|
||||
{
|
||||
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
|
||||
"channel %d scheduling NAPI poll on CPU%d\n",
|
||||
channel->channel, raw_smp_processor_id());
|
||||
|
||||
napi_schedule(&channel->napi_str);
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = raw_smp_processor_id();
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
void efx_link_status_changed(struct efx_nic *efx);
|
||||
void efx_link_set_advertising(struct efx_nic *efx, u32);
|
||||
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
|
||||
|
||||
static inline void efx_device_detach_sync(struct efx_nic *efx)
|
||||
{
|
||||
struct net_device *dev = efx->net_dev;
|
||||
|
||||
/* Lock/freeze all TX queues so that we can be sure the
|
||||
* TX scheduler is stopped when we're done and before
|
||||
* netif_device_present() becomes false.
|
||||
*/
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_device_detach(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
#endif /* EFX_EFX_H */
|
||||
181
drivers/net/ethernet/sfc/enum.h
Normal file
181
drivers/net/ethernet/sfc/enum.h
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2007-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_ENUM_H
|
||||
#define EFX_ENUM_H
|
||||
|
||||
/**
|
||||
* enum efx_loopback_mode - loopback modes
|
||||
* @LOOPBACK_NONE: no loopback
|
||||
* @LOOPBACK_DATA: data path loopback
|
||||
* @LOOPBACK_GMAC: loopback within GMAC
|
||||
* @LOOPBACK_XGMII: loopback after XMAC
|
||||
* @LOOPBACK_XGXS: loopback within BPX after XGXS
|
||||
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
|
||||
* @LOOPBACK_GMII: loopback within BPX after GMAC
|
||||
* @LOOPBACK_SGMII: loopback within BPX within SGMII
|
||||
* @LOOPBACK_XGBR: loopback within BPX within XGBR
|
||||
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
|
||||
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
|
||||
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
|
||||
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
|
||||
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
|
||||
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
|
||||
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
|
||||
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
|
||||
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
|
||||
* @LOOPBACK_XPORT: cross port loopback
|
||||
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
|
||||
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
|
||||
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
|
||||
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
|
||||
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
|
||||
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
|
||||
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
|
||||
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
|
||||
*/
|
||||
/* Please keep up-to-date w.r.t the following two #defines */
|
||||
enum efx_loopback_mode {
|
||||
LOOPBACK_NONE = 0,
|
||||
LOOPBACK_DATA = 1,
|
||||
LOOPBACK_GMAC = 2,
|
||||
LOOPBACK_XGMII = 3,
|
||||
LOOPBACK_XGXS = 4,
|
||||
LOOPBACK_XAUI = 5,
|
||||
LOOPBACK_GMII = 6,
|
||||
LOOPBACK_SGMII = 7,
|
||||
LOOPBACK_XGBR = 8,
|
||||
LOOPBACK_XFI = 9,
|
||||
LOOPBACK_XAUI_FAR = 10,
|
||||
LOOPBACK_GMII_FAR = 11,
|
||||
LOOPBACK_SGMII_FAR = 12,
|
||||
LOOPBACK_XFI_FAR = 13,
|
||||
LOOPBACK_GPHY = 14,
|
||||
LOOPBACK_PHYXS = 15,
|
||||
LOOPBACK_PCS = 16,
|
||||
LOOPBACK_PMAPMD = 17,
|
||||
LOOPBACK_XPORT = 18,
|
||||
LOOPBACK_XGMII_WS = 19,
|
||||
LOOPBACK_XAUI_WS = 20,
|
||||
LOOPBACK_XAUI_WS_FAR = 21,
|
||||
LOOPBACK_XAUI_WS_NEAR = 22,
|
||||
LOOPBACK_GMII_WS = 23,
|
||||
LOOPBACK_XFI_WS = 24,
|
||||
LOOPBACK_XFI_WS_FAR = 25,
|
||||
LOOPBACK_PHYXS_WS = 26,
|
||||
LOOPBACK_MAX
|
||||
};
|
||||
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
|
||||
|
||||
/* These loopbacks occur within the controller */
|
||||
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
|
||||
(1 << LOOPBACK_GMAC) | \
|
||||
(1 << LOOPBACK_XGMII)| \
|
||||
(1 << LOOPBACK_XGXS) | \
|
||||
(1 << LOOPBACK_XAUI) | \
|
||||
(1 << LOOPBACK_GMII) | \
|
||||
(1 << LOOPBACK_SGMII) | \
|
||||
(1 << LOOPBACK_SGMII) | \
|
||||
(1 << LOOPBACK_XGBR) | \
|
||||
(1 << LOOPBACK_XFI) | \
|
||||
(1 << LOOPBACK_XAUI_FAR) | \
|
||||
(1 << LOOPBACK_GMII_FAR) | \
|
||||
(1 << LOOPBACK_SGMII_FAR) | \
|
||||
(1 << LOOPBACK_XFI_FAR) | \
|
||||
(1 << LOOPBACK_XGMII_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS_FAR) | \
|
||||
(1 << LOOPBACK_XAUI_WS_NEAR) | \
|
||||
(1 << LOOPBACK_GMII_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS_FAR))
|
||||
|
||||
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS_FAR) | \
|
||||
(1 << LOOPBACK_XAUI_WS_NEAR) | \
|
||||
(1 << LOOPBACK_GMII_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS_FAR) | \
|
||||
(1 << LOOPBACK_PHYXS_WS))
|
||||
|
||||
#define LOOPBACKS_EXTERNAL(_efx) \
|
||||
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
|
||||
~(1 << LOOPBACK_NONE))
|
||||
|
||||
#define LOOPBACK_MASK(_efx) \
|
||||
(1 << (_efx)->loopback_mode)
|
||||
|
||||
#define LOOPBACK_INTERNAL(_efx) \
|
||||
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
|
||||
|
||||
#define LOOPBACK_EXTERNAL(_efx) \
|
||||
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
|
||||
|
||||
#define LOOPBACK_CHANGED(_from, _to, _mask) \
|
||||
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
|
||||
|
||||
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
|
||||
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
/**
|
||||
* enum reset_type - reset types
|
||||
*
|
||||
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
|
||||
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
|
||||
* other valuesspecify reasons, which efx_schedule_reset() will choose
|
||||
* a method for.
|
||||
*
|
||||
* Reset methods are numbered in order of increasing scope.
|
||||
*
|
||||
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
|
||||
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
|
||||
* if unsuccessful.
|
||||
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
|
||||
* @RESET_TYPE_WORLD: Reset as much as possible
|
||||
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
|
||||
* unsuccessful.
|
||||
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
|
||||
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
|
||||
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
|
||||
* @RESET_TYPE_INT_ERROR: reset due to internal error
|
||||
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
|
||||
* @RESET_TYPE_DMA_ERROR: DMA error
|
||||
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
|
||||
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
|
||||
* @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
|
||||
*/
|
||||
enum reset_type {
|
||||
RESET_TYPE_INVISIBLE,
|
||||
RESET_TYPE_RECOVER_OR_ALL,
|
||||
RESET_TYPE_ALL,
|
||||
RESET_TYPE_WORLD,
|
||||
RESET_TYPE_RECOVER_OR_DISABLE,
|
||||
RESET_TYPE_MC_BIST,
|
||||
RESET_TYPE_DISABLE,
|
||||
RESET_TYPE_MAX_METHOD,
|
||||
RESET_TYPE_TX_WATCHDOG,
|
||||
RESET_TYPE_INT_ERROR,
|
||||
RESET_TYPE_RX_RECOVERY,
|
||||
RESET_TYPE_DMA_ERROR,
|
||||
RESET_TYPE_TX_SKIP,
|
||||
RESET_TYPE_MC_FAILURE,
|
||||
/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
|
||||
* it doesn't fit the scope hierarchy (not well-ordered by inclusion).
|
||||
* We encode this by having its enum value be greater than
|
||||
* RESET_TYPE_MAX_METHOD. This also prevents issuing it with
|
||||
* efx_ioctl_reset.
|
||||
*/
|
||||
RESET_TYPE_MCDI_TIMEOUT,
|
||||
RESET_TYPE_MAX,
|
||||
};
|
||||
|
||||
#endif /* EFX_ENUM_H */
|
||||
1186
drivers/net/ethernet/sfc/ethtool.c
Normal file
1186
drivers/net/ethernet/sfc/ethtool.c
Normal file
File diff suppressed because it is too large
Load diff
2882
drivers/net/ethernet/sfc/falcon.c
Normal file
2882
drivers/net/ethernet/sfc/falcon.c
Normal file
File diff suppressed because it is too large
Load diff
764
drivers/net/ethernet/sfc/falcon_boards.c
Normal file
764
drivers/net/ethernet/sfc/falcon_boards.c
Normal file
|
|
@ -0,0 +1,764 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2007-2012 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "phy.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* Macros for unpacking the board revision */
|
||||
/* The revision info is in host byte order. */
|
||||
#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
|
||||
#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
|
||||
#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
|
||||
|
||||
/* Board types */
|
||||
#define FALCON_BOARD_SFE4001 0x01
|
||||
#define FALCON_BOARD_SFE4002 0x02
|
||||
#define FALCON_BOARD_SFE4003 0x03
|
||||
#define FALCON_BOARD_SFN4112F 0x52
|
||||
|
||||
/* Board temperature is about 15°C above ambient when air flow is
|
||||
* limited. The maximum acceptable ambient temperature varies
|
||||
* depending on the PHY specifications but the critical temperature
|
||||
* above which we should shut down to avoid damage is 80°C. */
|
||||
#define FALCON_BOARD_TEMP_BIAS 15
|
||||
#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
|
||||
|
||||
/* SFC4000 datasheet says: 'The maximum permitted junction temperature
|
||||
* is 125°C; the thermal design of the environment for the SFC4000
|
||||
* should aim to keep this well below 100°C.' */
|
||||
#define FALCON_JUNC_TEMP_MIN 0
|
||||
#define FALCON_JUNC_TEMP_MAX 90
|
||||
#define FALCON_JUNC_TEMP_CRIT 125
|
||||
|
||||
/*****************************************************************************
|
||||
* Support for LM87 sensor chip used on several boards
|
||||
*/
|
||||
#define LM87_REG_TEMP_HW_INT_LOCK 0x13
|
||||
#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
|
||||
#define LM87_REG_TEMP_HW_INT 0x17
|
||||
#define LM87_REG_TEMP_HW_EXT 0x18
|
||||
#define LM87_REG_TEMP_EXT1 0x26
|
||||
#define LM87_REG_TEMP_INT 0x27
|
||||
#define LM87_REG_ALARMS1 0x41
|
||||
#define LM87_REG_ALARMS2 0x42
|
||||
#define LM87_IN_LIMITS(nr, _min, _max) \
|
||||
0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
|
||||
#define LM87_AIN_LIMITS(nr, _min, _max) \
|
||||
0x3B + (nr), _max, 0x1A + (nr), _min
|
||||
#define LM87_TEMP_INT_LIMITS(_min, _max) \
|
||||
0x39, _max, 0x3A, _min
|
||||
#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
|
||||
0x37, _max, 0x38, _min
|
||||
|
||||
#define LM87_ALARM_TEMP_INT 0x10
|
||||
#define LM87_ALARM_TEMP_EXT1 0x20
|
||||
|
||||
#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
|
||||
|
||||
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
|
||||
{
|
||||
while (*reg_values) {
|
||||
u8 reg = *reg_values++;
|
||||
u8 value = *reg_values++;
|
||||
int rc = i2c_smbus_write_byte_data(client, reg, value);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const u8 falcon_lm87_common_regs[] = {
|
||||
LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
|
||||
LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
|
||||
LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
|
||||
LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
|
||||
LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
|
||||
0
|
||||
};
|
||||
|
||||
static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
|
||||
const u8 *reg_values)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
|
||||
int rc;
|
||||
|
||||
if (!client)
|
||||
return -EIO;
|
||||
|
||||
/* Read-to-clear alarm/interrupt status */
|
||||
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
|
||||
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
|
||||
|
||||
rc = efx_poke_lm87(client, reg_values);
|
||||
if (rc)
|
||||
goto err;
|
||||
rc = efx_poke_lm87(client, falcon_lm87_common_regs);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
board->hwmon_client = client;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
i2c_unregister_device(client);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_fini_lm87(struct efx_nic *efx)
|
||||
{
|
||||
i2c_unregister_device(falcon_board(efx)->hwmon_client);
|
||||
}
|
||||
|
||||
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
|
||||
{
|
||||
struct i2c_client *client = falcon_board(efx)->hwmon_client;
|
||||
bool temp_crit, elec_fault, is_failure;
|
||||
u16 alarms;
|
||||
s32 reg;
|
||||
|
||||
/* If link is up then do not monitor temperature */
|
||||
if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
|
||||
return 0;
|
||||
|
||||
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
alarms = reg;
|
||||
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
alarms |= reg << 8;
|
||||
alarms &= mask;
|
||||
|
||||
temp_crit = false;
|
||||
if (alarms & LM87_ALARM_TEMP_INT) {
|
||||
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
if (reg > FALCON_BOARD_TEMP_CRIT)
|
||||
temp_crit = true;
|
||||
}
|
||||
if (alarms & LM87_ALARM_TEMP_EXT1) {
|
||||
reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
if (reg > FALCON_JUNC_TEMP_CRIT)
|
||||
temp_crit = true;
|
||||
}
|
||||
elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
|
||||
is_failure = temp_crit || elec_fault;
|
||||
|
||||
if (alarms)
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"LM87 detected a hardware %s (status %02x:%02x)"
|
||||
"%s%s%s%s\n",
|
||||
is_failure ? "failure" : "problem",
|
||||
alarms & 0xff, alarms >> 8,
|
||||
(alarms & LM87_ALARM_TEMP_INT) ?
|
||||
"; board is overheating" : "",
|
||||
(alarms & LM87_ALARM_TEMP_EXT1) ?
|
||||
"; controller is overheating" : "",
|
||||
temp_crit ? "; reached critical temperature" : "",
|
||||
elec_fault ? "; electrical fault" : "");
|
||||
|
||||
return is_failure ? -ERANGE : 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SENSORS_LM87 */
|
||||
|
||||
static inline int
|
||||
efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
|
||||
const u8 *reg_values)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void efx_fini_lm87(struct efx_nic *efx)
|
||||
{
|
||||
}
|
||||
static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SENSORS_LM87 */
|
||||
|
||||
/*****************************************************************************
|
||||
* Support for the SFE4001 NIC.
|
||||
*
|
||||
* The SFE4001 does not power-up fully at reset due to its high power
|
||||
* consumption. We control its power via a PCA9539 I/O expander.
|
||||
* It also has a MAX6647 temperature monitor which we expose to
|
||||
* the lm90 driver.
|
||||
*
|
||||
* This also provides minimal support for reflashing the PHY, which is
|
||||
* initiated by resetting it with the FLASH_CFG_1 pin pulled down.
|
||||
* On SFE4001 rev A2 and later this is connected to the 3V3X output of
|
||||
* the IO-expander.
|
||||
* We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
|
||||
* exclusive with the network device being open.
|
||||
*/
|
||||
|
||||
/**************************************************************************
|
||||
* Support for I2C IO Expander device on SFE4001
|
||||
*/
|
||||
#define PCA9539 0x74
|
||||
|
||||
#define P0_IN 0x00
|
||||
#define P0_OUT 0x02
|
||||
#define P0_INVERT 0x04
|
||||
#define P0_CONFIG 0x06
|
||||
|
||||
#define P0_EN_1V0X_LBN 0
|
||||
#define P0_EN_1V0X_WIDTH 1
|
||||
#define P0_EN_1V2_LBN 1
|
||||
#define P0_EN_1V2_WIDTH 1
|
||||
#define P0_EN_2V5_LBN 2
|
||||
#define P0_EN_2V5_WIDTH 1
|
||||
#define P0_EN_3V3X_LBN 3
|
||||
#define P0_EN_3V3X_WIDTH 1
|
||||
#define P0_EN_5V_LBN 4
|
||||
#define P0_EN_5V_WIDTH 1
|
||||
#define P0_SHORTEN_JTAG_LBN 5
|
||||
#define P0_SHORTEN_JTAG_WIDTH 1
|
||||
#define P0_X_TRST_LBN 6
|
||||
#define P0_X_TRST_WIDTH 1
|
||||
#define P0_DSP_RESET_LBN 7
|
||||
#define P0_DSP_RESET_WIDTH 1
|
||||
|
||||
#define P1_IN 0x01
|
||||
#define P1_OUT 0x03
|
||||
#define P1_INVERT 0x05
|
||||
#define P1_CONFIG 0x07
|
||||
|
||||
#define P1_AFE_PWD_LBN 0
|
||||
#define P1_AFE_PWD_WIDTH 1
|
||||
#define P1_DSP_PWD25_LBN 1
|
||||
#define P1_DSP_PWD25_WIDTH 1
|
||||
#define P1_RESERVED_LBN 2
|
||||
#define P1_RESERVED_WIDTH 2
|
||||
#define P1_SPARE_LBN 4
|
||||
#define P1_SPARE_WIDTH 4
|
||||
|
||||
/* Temperature Sensor */
|
||||
#define MAX664X_REG_RSL 0x02
|
||||
#define MAX664X_REG_WLHO 0x0B
|
||||
|
||||
static void sfe4001_poweroff(struct efx_nic *efx)
|
||||
{
|
||||
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
|
||||
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
|
||||
|
||||
/* Turn off all power rails and disable outputs */
|
||||
i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
|
||||
i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
|
||||
i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
|
||||
|
||||
/* Clear any over-temperature alert */
|
||||
i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
|
||||
}
|
||||
|
||||
static int sfe4001_poweron(struct efx_nic *efx)
|
||||
{
|
||||
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
|
||||
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
|
||||
unsigned int i, j;
|
||||
int rc;
|
||||
u8 out;
|
||||
|
||||
/* Clear any previous over-temperature alert */
|
||||
rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Enable port 0 and port 1 outputs on IO expander */
|
||||
rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
|
||||
0xff & ~(1 << P1_SPARE_LBN));
|
||||
if (rc)
|
||||
goto fail_on;
|
||||
|
||||
/* If PHY power is on, turn it all off and wait 1 second to
|
||||
* ensure a full reset.
|
||||
*/
|
||||
rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
|
||||
if (rc < 0)
|
||||
goto fail_on;
|
||||
out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
|
||||
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
|
||||
(0 << P0_EN_1V0X_LBN));
|
||||
if (rc != out) {
|
||||
netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
|
||||
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
|
||||
if (rc)
|
||||
goto fail_on;
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
}
|
||||
|
||||
for (i = 0; i < 20; ++i) {
|
||||
/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
|
||||
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
|
||||
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
|
||||
(1 << P0_X_TRST_LBN));
|
||||
if (efx->phy_mode & PHY_MODE_SPECIAL)
|
||||
out |= 1 << P0_EN_3V3X_LBN;
|
||||
|
||||
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
|
||||
if (rc)
|
||||
goto fail_on;
|
||||
msleep(10);
|
||||
|
||||
/* Turn on 1V power rail */
|
||||
out &= ~(1 << P0_EN_1V0X_LBN);
|
||||
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
|
||||
if (rc)
|
||||
goto fail_on;
|
||||
|
||||
netif_info(efx, hw, efx->net_dev,
|
||||
"waiting for DSP boot (attempt %d)...\n", i);
|
||||
|
||||
/* In flash config mode, DSP does not turn on AFE, so
|
||||
* just wait 1 second.
|
||||
*/
|
||||
if (efx->phy_mode & PHY_MODE_SPECIAL) {
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (j = 0; j < 10; ++j) {
|
||||
msleep(100);
|
||||
|
||||
/* Check DSP has asserted AFE power line */
|
||||
rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
|
||||
if (rc < 0)
|
||||
goto fail_on;
|
||||
if (rc & (1 << P1_AFE_PWD_LBN))
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
|
||||
rc = -ETIMEDOUT;
|
||||
fail_on:
|
||||
sfe4001_poweroff(efx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t show_phy_flash_cfg(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
|
||||
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
|
||||
}
|
||||
|
||||
static ssize_t set_phy_flash_cfg(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
|
||||
enum efx_phy_mode old_mode, new_mode;
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
old_mode = efx->phy_mode;
|
||||
if (count == 0 || *buf == '0')
|
||||
new_mode = old_mode & ~PHY_MODE_SPECIAL;
|
||||
else
|
||||
new_mode = PHY_MODE_SPECIAL;
|
||||
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
|
||||
err = 0;
|
||||
} else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
|
||||
err = -EBUSY;
|
||||
} else {
|
||||
/* Reset the PHY, reconfigure the MAC and enable/disable
|
||||
* MAC stats accordingly. */
|
||||
efx->phy_mode = new_mode;
|
||||
if (new_mode & PHY_MODE_SPECIAL)
|
||||
falcon_stop_nic_stats(efx);
|
||||
err = sfe4001_poweron(efx);
|
||||
if (!err)
|
||||
err = efx_reconfigure_port(efx);
|
||||
if (!(new_mode & PHY_MODE_SPECIAL))
|
||||
falcon_start_nic_stats(efx);
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
return err ? err : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
|
||||
|
||||
static void sfe4001_fini(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
|
||||
netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
|
||||
|
||||
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
|
||||
sfe4001_poweroff(efx);
|
||||
i2c_unregister_device(board->ioexp_client);
|
||||
i2c_unregister_device(board->hwmon_client);
|
||||
}
|
||||
|
||||
static int sfe4001_check_hw(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_nic_data *nic_data = efx->nic_data;
|
||||
s32 status;
|
||||
|
||||
/* If XAUI link is up then do not monitor */
|
||||
if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
|
||||
return 0;
|
||||
|
||||
/* Check the powered status of the PHY. Lack of power implies that
|
||||
* the MAX6647 has shut down power to it, probably due to a temp.
|
||||
* alarm. Reading the power status rather than the MAX6647 status
|
||||
* directly because the later is read-to-clear and would thus
|
||||
* start to power up the PHY again when polled, causing us to blip
|
||||
* the power undesirably.
|
||||
* We know we can read from the IO expander because we did
|
||||
* it during power-on. Assume failure now is bad news. */
|
||||
status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
|
||||
if (status >= 0 &&
|
||||
(status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
|
||||
return 0;
|
||||
|
||||
/* Use board power control, not PHY power control */
|
||||
sfe4001_poweroff(efx);
|
||||
efx->phy_mode = PHY_MODE_OFF;
|
||||
|
||||
return (status < 0) ? -EIO : -ERANGE;
|
||||
}
|
||||
|
||||
static const struct i2c_board_info sfe4001_hwmon_info = {
|
||||
I2C_BOARD_INFO("max6647", 0x4e),
|
||||
};
|
||||
|
||||
/* This board uses an I2C expander to provider power to the PHY, which needs to
|
||||
* be turned on before the PHY can be used.
|
||||
* Context: Process context, rtnl lock held
|
||||
*/
|
||||
static int sfe4001_init(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
int rc;
|
||||
|
||||
#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
|
||||
board->hwmon_client =
|
||||
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
|
||||
#else
|
||||
board->hwmon_client =
|
||||
i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
|
||||
#endif
|
||||
if (!board->hwmon_client)
|
||||
return -EIO;
|
||||
|
||||
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
|
||||
rc = i2c_smbus_write_byte_data(board->hwmon_client,
|
||||
MAX664X_REG_WLHO, 90);
|
||||
if (rc)
|
||||
goto fail_hwmon;
|
||||
|
||||
board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
|
||||
if (!board->ioexp_client) {
|
||||
rc = -EIO;
|
||||
goto fail_hwmon;
|
||||
}
|
||||
|
||||
if (efx->phy_mode & PHY_MODE_SPECIAL) {
|
||||
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
|
||||
* will fail. */
|
||||
falcon_stop_nic_stats(efx);
|
||||
}
|
||||
rc = sfe4001_poweron(efx);
|
||||
if (rc)
|
||||
goto fail_ioexp;
|
||||
|
||||
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
|
||||
if (rc)
|
||||
goto fail_on;
|
||||
|
||||
netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
|
||||
return 0;
|
||||
|
||||
fail_on:
|
||||
sfe4001_poweroff(efx);
|
||||
fail_ioexp:
|
||||
i2c_unregister_device(board->ioexp_client);
|
||||
fail_hwmon:
|
||||
i2c_unregister_device(board->hwmon_client);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
* Support for the SFE4002
|
||||
*
|
||||
*/
|
||||
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
|
||||
|
||||
static const u8 sfe4002_lm87_regs[] = {
|
||||
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
|
||||
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
|
||||
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
|
||||
LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
|
||||
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
|
||||
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
|
||||
LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
|
||||
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
|
||||
LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
|
||||
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct i2c_board_info sfe4002_hwmon_info = {
|
||||
I2C_BOARD_INFO("lm87", 0x2e),
|
||||
.platform_data = &sfe4002_lm87_channel,
|
||||
};
|
||||
|
||||
/****************************************************************************/
|
||||
/* LED allocations. Note that on rev A0 boards the schematic and the reality
|
||||
* differ: red and green are swapped. Below is the fixed (A1) layout (there
|
||||
* are only 3 A0 boards in existence, so no real reason to make this
|
||||
* conditional).
|
||||
*/
|
||||
#define SFE4002_FAULT_LED (2) /* Red */
|
||||
#define SFE4002_RX_LED (0) /* Green */
|
||||
#define SFE4002_TX_LED (1) /* Amber */
|
||||
|
||||
static void sfe4002_init_phy(struct efx_nic *efx)
|
||||
{
|
||||
/* Set the TX and RX LEDs to reflect status and activity, and the
|
||||
* fault LED off */
|
||||
falcon_qt202x_set_led(efx, SFE4002_TX_LED,
|
||||
QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
|
||||
falcon_qt202x_set_led(efx, SFE4002_RX_LED,
|
||||
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
|
||||
falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
|
||||
}
|
||||
|
||||
static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
|
||||
{
|
||||
falcon_qt202x_set_led(
|
||||
efx, SFE4002_FAULT_LED,
|
||||
(mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
|
||||
}
|
||||
|
||||
static int sfe4002_check_hw(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
|
||||
/* A0 board rev. 4002s report a temperature fault the whole time
|
||||
* (bad sensor) so we mask it out. */
|
||||
unsigned alarm_mask =
|
||||
(board->major == 0 && board->minor == 0) ?
|
||||
~LM87_ALARM_TEMP_EXT1 : ~0;
|
||||
|
||||
return efx_check_lm87(efx, alarm_mask);
|
||||
}
|
||||
|
||||
static int sfe4002_init(struct efx_nic *efx)
|
||||
{
|
||||
return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
* Support for the SFN4112F
|
||||
*
|
||||
*/
|
||||
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
|
||||
|
||||
static const u8 sfn4112f_lm87_regs[] = {
|
||||
LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
|
||||
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
|
||||
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
|
||||
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
|
||||
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
|
||||
LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
|
||||
LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
|
||||
LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct i2c_board_info sfn4112f_hwmon_info = {
|
||||
I2C_BOARD_INFO("lm87", 0x2e),
|
||||
.platform_data = &sfn4112f_lm87_channel,
|
||||
};
|
||||
|
||||
#define SFN4112F_ACT_LED 0
|
||||
#define SFN4112F_LINK_LED 1
|
||||
|
||||
static void sfn4112f_init_phy(struct efx_nic *efx)
|
||||
{
|
||||
falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
|
||||
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
|
||||
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
|
||||
QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
|
||||
}
|
||||
|
||||
static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
|
||||
{
|
||||
int reg;
|
||||
|
||||
switch (mode) {
|
||||
case EFX_LED_OFF:
|
||||
reg = QUAKE_LED_OFF;
|
||||
break;
|
||||
case EFX_LED_ON:
|
||||
reg = QUAKE_LED_ON;
|
||||
break;
|
||||
default:
|
||||
reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
|
||||
break;
|
||||
}
|
||||
|
||||
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
|
||||
}
|
||||
|
||||
static int sfn4112f_check_hw(struct efx_nic *efx)
|
||||
{
|
||||
/* Mask out unused sensors */
|
||||
return efx_check_lm87(efx, ~0x48);
|
||||
}
|
||||
|
||||
static int sfn4112f_init(struct efx_nic *efx)
|
||||
{
|
||||
return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
* Support for the SFE4003
|
||||
*
|
||||
*/
|
||||
static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
|
||||
|
||||
static const u8 sfe4003_lm87_regs[] = {
|
||||
LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
|
||||
LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
|
||||
LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
|
||||
LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
|
||||
LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
|
||||
LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct i2c_board_info sfe4003_hwmon_info = {
|
||||
I2C_BOARD_INFO("lm87", 0x2e),
|
||||
.platform_data = &sfe4003_lm87_channel,
|
||||
};
|
||||
|
||||
/* Board-specific LED info. */
|
||||
#define SFE4003_RED_LED_GPIO 11
|
||||
#define SFE4003_LED_ON 1
|
||||
#define SFE4003_LED_OFF 0
|
||||
|
||||
static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
|
||||
/* The LEDs were not wired to GPIOs before A3 */
|
||||
if (board->minor < 3 && board->major == 0)
|
||||
return;
|
||||
|
||||
falcon_txc_set_gpio_val(
|
||||
efx, SFE4003_RED_LED_GPIO,
|
||||
(mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
|
||||
}
|
||||
|
||||
static void sfe4003_init_phy(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
|
||||
/* The LEDs were not wired to GPIOs before A3 */
|
||||
if (board->minor < 3 && board->major == 0)
|
||||
return;
|
||||
|
||||
falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
|
||||
falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
|
||||
}
|
||||
|
||||
static int sfe4003_check_hw(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
|
||||
/* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
|
||||
* (bad sensor) so we mask it out. */
|
||||
unsigned alarm_mask =
|
||||
(board->major == 0 && board->minor <= 2) ?
|
||||
~LM87_ALARM_TEMP_EXT1 : ~0;
|
||||
|
||||
return efx_check_lm87(efx, alarm_mask);
|
||||
}
|
||||
|
||||
static int sfe4003_init(struct efx_nic *efx)
|
||||
{
|
||||
return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
|
||||
}
|
||||
|
||||
static const struct falcon_board_type board_types[] = {
|
||||
{
|
||||
.id = FALCON_BOARD_SFE4001,
|
||||
.init = sfe4001_init,
|
||||
.init_phy = efx_port_dummy_op_void,
|
||||
.fini = sfe4001_fini,
|
||||
.set_id_led = tenxpress_set_id_led,
|
||||
.monitor = sfe4001_check_hw,
|
||||
},
|
||||
{
|
||||
.id = FALCON_BOARD_SFE4002,
|
||||
.init = sfe4002_init,
|
||||
.init_phy = sfe4002_init_phy,
|
||||
.fini = efx_fini_lm87,
|
||||
.set_id_led = sfe4002_set_id_led,
|
||||
.monitor = sfe4002_check_hw,
|
||||
},
|
||||
{
|
||||
.id = FALCON_BOARD_SFE4003,
|
||||
.init = sfe4003_init,
|
||||
.init_phy = sfe4003_init_phy,
|
||||
.fini = efx_fini_lm87,
|
||||
.set_id_led = sfe4003_set_id_led,
|
||||
.monitor = sfe4003_check_hw,
|
||||
},
|
||||
{
|
||||
.id = FALCON_BOARD_SFN4112F,
|
||||
.init = sfn4112f_init,
|
||||
.init_phy = sfn4112f_init_phy,
|
||||
.fini = efx_fini_lm87,
|
||||
.set_id_led = sfn4112f_set_id_led,
|
||||
.monitor = sfn4112f_check_hw,
|
||||
},
|
||||
};
|
||||
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
|
||||
{
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
u8 type_id = FALCON_BOARD_TYPE(revision_info);
|
||||
int i;
|
||||
|
||||
board->major = FALCON_BOARD_MAJOR(revision_info);
|
||||
board->minor = FALCON_BOARD_MINOR(revision_info);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(board_types); i++)
|
||||
if (board_types[i].id == type_id)
|
||||
board->type = &board_types[i];
|
||||
|
||||
if (board->type) {
|
||||
return 0;
|
||||
} else {
|
||||
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
|
||||
type_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
2962
drivers/net/ethernet/sfc/farch.c
Normal file
2962
drivers/net/ethernet/sfc/farch.c
Normal file
File diff suppressed because it is too large
Load diff
2932
drivers/net/ethernet/sfc/farch_regs.h
Normal file
2932
drivers/net/ethernet/sfc/farch_regs.h
Normal file
File diff suppressed because it is too large
Load diff
272
drivers/net/ethernet/sfc/filter.h
Normal file
272
drivers/net/ethernet/sfc/filter.h
Normal file
|
|
@ -0,0 +1,272 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_FILTER_H
|
||||
#define EFX_FILTER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/**
|
||||
* enum efx_filter_match_flags - Flags for hardware filter match type
|
||||
* @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
|
||||
* @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
|
||||
* @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
|
||||
* @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
|
||||
* @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
|
||||
* @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
|
||||
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
|
||||
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
|
||||
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
|
||||
* Used for RX default unicast and multicast/broadcast filters.
|
||||
*
|
||||
* Only some combinations are supported, depending on NIC type:
|
||||
*
|
||||
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
|
||||
* local 2-tuple (only implemented for Falcon B0)
|
||||
*
|
||||
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
|
||||
* or local 2-tuple, or local MAC with or without outer VID, and RX
|
||||
* default filters
|
||||
*
|
||||
* - Huntington supports filter matching controlled by firmware, potentially
|
||||
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
|
||||
* with or without outer and inner VID
|
||||
*/
|
||||
enum efx_filter_match_flags {
|
||||
EFX_FILTER_MATCH_REM_HOST = 0x0001,
|
||||
EFX_FILTER_MATCH_LOC_HOST = 0x0002,
|
||||
EFX_FILTER_MATCH_REM_MAC = 0x0004,
|
||||
EFX_FILTER_MATCH_REM_PORT = 0x0008,
|
||||
EFX_FILTER_MATCH_LOC_MAC = 0x0010,
|
||||
EFX_FILTER_MATCH_LOC_PORT = 0x0020,
|
||||
EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
|
||||
EFX_FILTER_MATCH_INNER_VID = 0x0080,
|
||||
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
|
||||
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
|
||||
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_filter_priority - priority of a hardware filter specification
|
||||
* @EFX_FILTER_PRI_HINT: Performance hint
|
||||
* @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
|
||||
* or hardware requirements. This may only be used by the filter
|
||||
* implementation for each NIC type.
|
||||
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
|
||||
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
|
||||
* networking and SR-IOV)
|
||||
*/
|
||||
enum efx_filter_priority {
|
||||
EFX_FILTER_PRI_HINT = 0,
|
||||
EFX_FILTER_PRI_AUTO,
|
||||
EFX_FILTER_PRI_MANUAL,
|
||||
EFX_FILTER_PRI_REQUIRED,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_filter_flags - flags for hardware filter specifications
|
||||
* @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
|
||||
* By default, matching packets will be delivered only to the
|
||||
* specified queue. If this flag is set, they will be delivered
|
||||
* to a range of queues offset from the specified queue number
|
||||
* according to the indirection table.
|
||||
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
|
||||
* queue.
|
||||
* @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
|
||||
* overriding an automatic filter (priority
|
||||
* %EFX_FILTER_PRI_AUTO). This may only be set by the filter
|
||||
* implementation for each type. A removal request will restore
|
||||
* the automatic filter in its place.
|
||||
* @EFX_FILTER_FLAG_RX: Filter is for RX
|
||||
* @EFX_FILTER_FLAG_TX: Filter is for TX
|
||||
*/
|
||||
enum efx_filter_flags {
|
||||
EFX_FILTER_FLAG_RX_RSS = 0x01,
|
||||
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
|
||||
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
|
||||
EFX_FILTER_FLAG_RX = 0x08,
|
||||
EFX_FILTER_FLAG_TX = 0x10,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_filter_spec - specification for a hardware filter
|
||||
* @match_flags: Match type flags, from &enum efx_filter_match_flags
|
||||
* @priority: Priority of the filter, from &enum efx_filter_priority
|
||||
* @flags: Miscellaneous flags, from &enum efx_filter_flags
|
||||
* @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
|
||||
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
|
||||
* an RX drop filter
|
||||
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
|
||||
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
|
||||
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
|
||||
* %EFX_FILTER_MATCH_LOC_MAC_IG is set
|
||||
* @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
|
||||
* @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
|
||||
* @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
|
||||
* is set
|
||||
* @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
|
||||
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
|
||||
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
|
||||
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
|
||||
*
|
||||
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
|
||||
* used to initialise the structure. The efx_filter_set_*() functions
|
||||
* may then be used to set @rss_context, @match_flags and related
|
||||
* fields.
|
||||
*
|
||||
* The @priority field is used by software to determine whether a new
|
||||
* filter may replace an old one. The hardware priority of a filter
|
||||
* depends on which fields are matched.
|
||||
*/
|
||||
struct efx_filter_spec {
|
||||
u32 match_flags:12;
|
||||
u32 priority:2;
|
||||
u32 flags:6;
|
||||
u32 dmaq_id:12;
|
||||
u32 rss_context;
|
||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
||||
__be16 inner_vid;
|
||||
u8 loc_mac[ETH_ALEN];
|
||||
u8 rem_mac[ETH_ALEN];
|
||||
__be16 ether_type;
|
||||
u8 ip_proto;
|
||||
__be32 loc_host[4];
|
||||
__be32 rem_host[4];
|
||||
__be16 loc_port;
|
||||
__be16 rem_port;
|
||||
/* total 64 bytes */
|
||||
};
|
||||
|
||||
enum {
|
||||
EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
|
||||
EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
|
||||
};
|
||||
|
||||
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
|
||||
enum efx_filter_priority priority,
|
||||
enum efx_filter_flags flags,
|
||||
unsigned rxq_id)
|
||||
{
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = priority;
|
||||
spec->flags = EFX_FILTER_FLAG_RX | flags;
|
||||
spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
|
||||
spec->dmaq_id = rxq_id;
|
||||
}
|
||||
|
||||
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
|
||||
unsigned txq_id)
|
||||
{
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = EFX_FILTER_PRI_REQUIRED;
|
||||
spec->flags = EFX_FILTER_FLAG_TX;
|
||||
spec->dmaq_id = txq_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @host: Local host address (network byte order)
|
||||
* @port: Local port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 host, __be16 port)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = host;
|
||||
spec->loc_port = port;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @lhost: Local host address (network byte order)
|
||||
* @lport: Local port (network byte order)
|
||||
* @rhost: Remote host address (network byte order)
|
||||
* @rport: Remote port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 lhost, __be16 lport,
|
||||
__be32 rhost, __be16 rport)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
||||
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = lhost;
|
||||
spec->loc_port = lport;
|
||||
spec->rem_host[0] = rhost;
|
||||
spec->rem_port = rport;
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
EFX_FILTER_VID_UNSPEC = 0xffff,
|
||||
};
|
||||
|
||||
/**
|
||||
* efx_filter_set_eth_local - specify local Ethernet address and/or VID
|
||||
* @spec: Specification to initialise
|
||||
* @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
|
||||
* @addr: Local Ethernet MAC address, or %NULL
|
||||
*/
|
||||
static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
|
||||
u16 vid, const u8 *addr)
|
||||
{
|
||||
if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (vid != EFX_FILTER_VID_UNSPEC) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
|
||||
spec->outer_vid = htons(vid);
|
||||
}
|
||||
if (addr != NULL) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
|
||||
ether_addr_copy(spec->loc_mac, addr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
spec->loc_mac[0] = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* EFX_FILTER_H */
|
||||
302
drivers/net/ethernet/sfc/io.h
Normal file
302
drivers/net/ethernet/sfc/io.h
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_IO_H
|
||||
#define EFX_IO_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* NIC register I/O
|
||||
*
|
||||
**************************************************************************
|
||||
*
|
||||
* Notes on locking strategy for the Falcon architecture:
|
||||
*
|
||||
* Many CSRs are very wide and cannot be read or written atomically.
|
||||
* Writes from the host are buffered by the Bus Interface Unit (BIU)
|
||||
* up to 128 bits. Whenever the host writes part of such a register,
|
||||
* the BIU collects the written value and does not write to the
|
||||
* underlying register until all 4 dwords have been written. A
|
||||
* similar buffering scheme applies to host access to the NIC's 64-bit
|
||||
* SRAM.
|
||||
*
|
||||
* Writes to different CSRs and 64-bit SRAM words must be serialised,
|
||||
* since interleaved access can result in lost writes. We use
|
||||
* efx_nic::biu_lock for this.
|
||||
*
|
||||
* We also serialise reads from 128-bit CSRs and SRAM with the same
|
||||
* spinlock. This may not be necessary, but it doesn't really matter
|
||||
* as there are no such reads on the fast path.
|
||||
*
|
||||
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
|
||||
* 128-bit but are special-cased in the BIU to avoid the need for
|
||||
* locking in the host:
|
||||
*
|
||||
* - They are write-only.
|
||||
* - The semantics of writing to these registers are such that
|
||||
* replacing the low 96 bits with zero does not affect functionality.
|
||||
* - If the host writes to the last dword address of such a register
|
||||
* (i.e. the high 32 bits) the underlying register will always be
|
||||
* written. If the collector and the current write together do not
|
||||
* provide values for all 128 bits of the register, the low 96 bits
|
||||
* will be written as zero.
|
||||
* - If the host writes to the address of any other part of such a
|
||||
* register while the collector already holds values for some other
|
||||
* register, the write is discarded and the collector maintains its
|
||||
* current state.
|
||||
*
|
||||
* The EF10 architecture exposes very few registers to the host and
|
||||
* most of them are only 32 bits wide. The only exceptions are the MC
|
||||
* doorbell register pair, which has its own latching, and
|
||||
* TX_DESC_UPD, which works in a similar way to the Falcon
|
||||
* architecture.
|
||||
*/
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_USE_QWORD_IO 1
|
||||
#endif
|
||||
|
||||
/* Hardware issue requires that only 64-bit naturally aligned writes
|
||||
* are seen by hardware. Its not strictly necessary to restrict to
|
||||
* x86_64 arch, but done for safety since unusual write combining behaviour
|
||||
* can break PIO.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
/* PIO is a win only if write-combining is possible */
|
||||
#ifdef ARCH_HAS_IOREMAP_WC
|
||||
#define EFX_USE_PIO 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
|
||||
unsigned int reg)
|
||||
{
|
||||
__raw_writeq((__force u64)value, efx->membase + reg);
|
||||
}
|
||||
static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
|
||||
{
|
||||
return (__force __le64)__raw_readq(efx->membase + reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void _efx_writed(struct efx_nic *efx, __le32 value,
|
||||
unsigned int reg)
|
||||
{
|
||||
__raw_writel((__force u32)value, efx->membase + reg);
|
||||
}
|
||||
static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
|
||||
{
|
||||
return (__force __le32)__raw_readl(efx->membase + reg);
|
||||
}
|
||||
|
||||
/* Write a normal 128-bit CSR, locking as appropriate. */
|
||||
static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
_efx_writeq(efx, value->u64[0], reg + 0);
|
||||
_efx_writeq(efx, value->u64[1], reg + 8);
|
||||
#else
|
||||
_efx_writed(efx, value->u32[0], reg + 0);
|
||||
_efx_writed(efx, value->u32[1], reg + 4);
|
||||
_efx_writed(efx, value->u32[2], reg + 8);
|
||||
_efx_writed(efx, value->u32[3], reg + 12);
|
||||
#endif
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
}
|
||||
|
||||
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
|
||||
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
|
||||
const efx_qword_t *value, unsigned int index)
|
||||
{
|
||||
unsigned int addr = index * sizeof(*value);
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing SRAM address %x with " EFX_QWORD_FMT "\n",
|
||||
addr, EFX_QWORD_VAL(*value));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
__raw_writeq((__force u64)value->u64[0], membase + addr);
|
||||
#else
|
||||
__raw_writel((__force u32)value->u32[0], membase + addr);
|
||||
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
|
||||
#endif
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
}
|
||||
|
||||
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
|
||||
static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with "EFX_DWORD_FMT"\n",
|
||||
reg, EFX_DWORD_VAL(*value));
|
||||
|
||||
/* No lock required */
|
||||
_efx_writed(efx, value->u32[0], reg);
|
||||
}
|
||||
|
||||
/* Read a 128-bit CSR, locking as appropriate. */
|
||||
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
value->u32[0] = _efx_readd(efx, reg + 0);
|
||||
value->u32[1] = _efx_readd(efx, reg + 4);
|
||||
value->u32[2] = _efx_readd(efx, reg + 8);
|
||||
value->u32[3] = _efx_readd(efx, reg + 12);
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from register %x, got " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
|
||||
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
|
||||
efx_qword_t *value, unsigned int index)
|
||||
{
|
||||
unsigned int addr = index * sizeof(*value);
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
|
||||
#else
|
||||
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
|
||||
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from SRAM address %x, got "EFX_QWORD_FMT"\n",
|
||||
addr, EFX_QWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Read a 32-bit CSR or SRAM */
|
||||
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
value->u32[0] = _efx_readd(efx, reg);
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from register %x, got "EFX_DWORD_FMT"\n",
|
||||
reg, EFX_DWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Write a 128-bit CSR forming part of a table */
|
||||
static inline void
|
||||
efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
|
||||
unsigned int reg, unsigned int index)
|
||||
{
|
||||
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
|
||||
}
|
||||
|
||||
/* Read a 128-bit CSR forming part of a table */
|
||||
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg, unsigned int index)
|
||||
{
|
||||
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
|
||||
}
|
||||
|
||||
/* Page size used as step between per-VI registers */
|
||||
#define EFX_VI_PAGE_SIZE 0x2000
|
||||
|
||||
/* Calculate offset to page-mapped register */
|
||||
#define EFX_PAGED_REG(page, reg) \
|
||||
((page) * EFX_VI_PAGE_SIZE + (reg))
|
||||
|
||||
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
|
||||
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg, unsigned int page)
|
||||
{
|
||||
reg = EFX_PAGED_REG(page, reg);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
_efx_writeq(efx, value->u64[0], reg + 0);
|
||||
_efx_writeq(efx, value->u64[1], reg + 8);
|
||||
#else
|
||||
_efx_writed(efx, value->u32[0], reg + 0);
|
||||
_efx_writed(efx, value->u32[1], reg + 4);
|
||||
_efx_writed(efx, value->u32[2], reg + 8);
|
||||
_efx_writed(efx, value->u32[3], reg + 12);
|
||||
#endif
|
||||
}
|
||||
#define efx_writeo_page(efx, value, reg, page) \
|
||||
_efx_writeo_page(efx, value, \
|
||||
reg + \
|
||||
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
|
||||
page)
|
||||
|
||||
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
|
||||
* high bits of RX_DESC_UPD or TX_DESC_UPD)
|
||||
*/
|
||||
static inline void
|
||||
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
|
||||
unsigned int reg, unsigned int page)
|
||||
{
|
||||
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
|
||||
}
|
||||
#define efx_writed_page(efx, value, reg, page) \
|
||||
_efx_writed_page(efx, value, \
|
||||
reg + \
|
||||
BUILD_BUG_ON_ZERO((reg) != 0x400 && \
|
||||
(reg) != 0x420 && \
|
||||
(reg) != 0x830 && \
|
||||
(reg) != 0x83c && \
|
||||
(reg) != 0xa18 && \
|
||||
(reg) != 0xa1c), \
|
||||
page)
|
||||
|
||||
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
|
||||
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
|
||||
* collector register.
|
||||
*/
|
||||
static inline void _efx_writed_page_locked(struct efx_nic *efx,
|
||||
const efx_dword_t *value,
|
||||
unsigned int reg,
|
||||
unsigned int page)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
if (page == 0) {
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
} else {
|
||||
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
|
||||
}
|
||||
}
|
||||
#define efx_writed_page_locked(efx, value, reg, page) \
|
||||
_efx_writed_page_locked(efx, value, \
|
||||
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
|
||||
page)
|
||||
|
||||
#endif /* EFX_IO_H */
|
||||
1891
drivers/net/ethernet/sfc/mcdi.c
Normal file
1891
drivers/net/ethernet/sfc/mcdi.c
Normal file
File diff suppressed because it is too large
Load diff
361
drivers/net/ethernet/sfc/mcdi.h
Normal file
361
drivers/net/ethernet/sfc/mcdi.h
Normal file
|
|
@ -0,0 +1,361 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_H
|
||||
#define EFX_MCDI_H
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_state - MCDI request handling state
|
||||
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
|
||||
* mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
|
||||
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
|
||||
* Only the thread that moved into this state is allowed to move out of it.
|
||||
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
|
||||
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
|
||||
* has not yet consumed the result. For all other threads, equivalent to
|
||||
* %MCDI_STATE_RUNNING.
|
||||
*/
|
||||
enum efx_mcdi_state {
|
||||
MCDI_STATE_QUIESCENT,
|
||||
MCDI_STATE_RUNNING_SYNC,
|
||||
MCDI_STATE_RUNNING_ASYNC,
|
||||
MCDI_STATE_COMPLETED,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_mode - MCDI transaction mode
|
||||
* @MCDI_MODE_POLL: poll for MCDI completion, until timeout
|
||||
* @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
|
||||
* @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
|
||||
*/
|
||||
enum efx_mcdi_mode {
|
||||
MCDI_MODE_POLL,
|
||||
MCDI_MODE_EVENTS,
|
||||
MCDI_MODE_FAIL,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_iface - MCDI protocol context
|
||||
* @efx: The associated NIC.
|
||||
* @state: Request handling state. Waited for by @wq.
|
||||
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
|
||||
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
|
||||
* @new_epoch: Indicates start of day or start of MC reboot recovery
|
||||
* @iface_lock: Serialises access to @seqno, @credits and response metadata
|
||||
* @seqno: The next sequence number to use for mcdi requests.
|
||||
* @credits: Number of spurious MCDI completion events allowed before we
|
||||
* trigger a fatal error
|
||||
* @resprc: Response error/success code (Linux numbering)
|
||||
* @resp_hdr_len: Response header length
|
||||
* @resp_data_len: Response data (SDU or error) length
|
||||
* @async_lock: Serialises access to @async_list while event processing is
|
||||
* enabled
|
||||
* @async_list: Queue of asynchronous requests
|
||||
* @async_timer: Timer for asynchronous request timeout
|
||||
*/
|
||||
struct efx_mcdi_iface {
|
||||
struct efx_nic *efx;
|
||||
enum efx_mcdi_state state;
|
||||
enum efx_mcdi_mode mode;
|
||||
wait_queue_head_t wq;
|
||||
spinlock_t iface_lock;
|
||||
bool new_epoch;
|
||||
unsigned int credits;
|
||||
unsigned int seqno;
|
||||
int resprc;
|
||||
size_t resp_hdr_len;
|
||||
size_t resp_data_len;
|
||||
spinlock_t async_lock;
|
||||
struct list_head async_list;
|
||||
struct timer_list async_timer;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mon {
|
||||
struct efx_buffer dma_buf;
|
||||
struct mutex update_lock;
|
||||
unsigned long last_update;
|
||||
struct device *device;
|
||||
struct efx_mcdi_mon_attribute *attrs;
|
||||
struct attribute_group group;
|
||||
const struct attribute_group *groups[2];
|
||||
unsigned int n_attrs;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mtd_partition {
|
||||
struct efx_mtd_partition common;
|
||||
bool updating;
|
||||
u16 nvram_type;
|
||||
u16 fw_subtype;
|
||||
};
|
||||
|
||||
#define to_efx_mcdi_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_data - extra state for NICs that implement MCDI
|
||||
* @iface: Interface/protocol state
|
||||
* @hwmon: Hardware monitor state
|
||||
* @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
|
||||
*/
|
||||
struct efx_mcdi_data {
|
||||
struct efx_mcdi_iface iface;
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
struct efx_mcdi_mon hwmon;
|
||||
#endif
|
||||
u32 fn_flags;
|
||||
};
|
||||
|
||||
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
|
||||
{
|
||||
EFX_BUG_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->iface;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
|
||||
{
|
||||
EFX_BUG_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->hwmon;
|
||||
}
|
||||
#endif
|
||||
|
||||
int efx_mcdi_init(struct efx_nic *efx);
|
||||
void efx_mcdi_fini(struct efx_nic *efx);
|
||||
|
||||
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
|
||||
size_t inlen, efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
|
||||
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen);
|
||||
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, size_t *outlen_actual);
|
||||
|
||||
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
|
||||
unsigned long cookie, int rc,
|
||||
efx_dword_t *outbuf,
|
||||
size_t outlen_actual);
|
||||
int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
|
||||
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, int rc);
|
||||
|
||||
int efx_mcdi_poll_reboot(struct efx_nic *efx);
|
||||
void efx_mcdi_mode_poll(struct efx_nic *efx);
|
||||
void efx_mcdi_mode_event(struct efx_nic *efx);
|
||||
void efx_mcdi_flush_async(struct efx_nic *efx);
|
||||
|
||||
void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
|
||||
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
/* We expect that 16- and 32-bit fields in MCDI requests and responses
|
||||
* are appropriately aligned, but 64-bit fields are only
|
||||
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
|
||||
* memory strictly 32 bits at a time, so add any necessary padding.
|
||||
*/
|
||||
#define MCDI_DECLARE_BUF(_name, _len) \
|
||||
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
|
||||
#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
|
||||
MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
|
||||
#define _MCDI_PTR(_buf, _offset) \
|
||||
((u8 *)(_buf) + (_offset))
|
||||
#define MCDI_PTR(_buf, _field) \
|
||||
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
|
||||
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
|
||||
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
|
||||
#define _MCDI_DWORD(_buf, _field) \
|
||||
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
|
||||
|
||||
#define MCDI_WORD(_buf, _field) \
|
||||
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
|
||||
#define MCDI_SET_DWORD(_buf, _field, _value) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_DWORD(_buf, _field) \
|
||||
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
|
||||
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1)
|
||||
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2) \
|
||||
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2)
|
||||
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3) \
|
||||
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3)
|
||||
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4) \
|
||||
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4)
|
||||
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5) \
|
||||
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5)
|
||||
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6) \
|
||||
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6)
|
||||
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6, _name7, _value7) \
|
||||
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6, \
|
||||
MC_CMD_ ## _name7, _value7)
|
||||
#define MCDI_SET_QWORD(_buf, _field, _value) \
|
||||
do { \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_QWORD(_buf, _field) \
|
||||
(EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
|
||||
(u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
|
||||
#define MCDI_FIELD(_ptr, _type, _field) \
|
||||
EFX_EXTRACT_DWORD( \
|
||||
*(efx_dword_t *) \
|
||||
_MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
|
||||
MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
|
||||
(MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
|
||||
MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
|
||||
|
||||
#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
|
||||
(_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
|
||||
+ (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
|
||||
#define MCDI_DECLARE_STRUCT_PTR(_name) \
|
||||
efx_dword_t *_name
|
||||
#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
|
||||
((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_VAR_ARRAY_LEN(_len, _field) \
|
||||
min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
|
||||
((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
|
||||
#define MCDI_ARRAY_WORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *) \
|
||||
_MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
|
||||
#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
|
||||
EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
|
||||
EFX_DWORD_0, _value)
|
||||
#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
|
||||
#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
|
||||
do { \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
|
||||
MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
|
||||
_type ## _TYPEDEF, _field2)
|
||||
|
||||
#define MCDI_EVENT_FIELD(_ev, _field) \
|
||||
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
|
||||
|
||||
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
|
||||
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
|
||||
u16 *fw_subtype_list, u32 *capabilities);
|
||||
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
|
||||
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
|
||||
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
|
||||
size_t *size_out, size_t *erase_size_out,
|
||||
bool *protected_out);
|
||||
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
|
||||
int efx_mcdi_handle_assertion(struct efx_nic *efx);
|
||||
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
|
||||
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
|
||||
int *id_out);
|
||||
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
|
||||
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
|
||||
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
|
||||
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx);
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx);
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx);
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
|
||||
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
|
||||
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
int efx_mcdi_mon_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_mon_remove(struct efx_nic *efx);
|
||||
#else
|
||||
static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
|
||||
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, u8 *buffer);
|
||||
int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
|
||||
int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, const u8 *buffer);
|
||||
int efx_mcdi_mtd_sync(struct mtd_info *mtd);
|
||||
void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
|
||||
#endif
|
||||
|
||||
#endif /* EFX_MCDI_H */
|
||||
534
drivers/net/ethernet/sfc/mcdi_mon.c
Normal file
534
drivers/net/ethernet/sfc/mcdi_mon.c
Normal file
|
|
@ -0,0 +1,534 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2011-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/stat.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
|
||||
enum efx_hwmon_type {
|
||||
EFX_HWMON_UNKNOWN,
|
||||
EFX_HWMON_TEMP, /* temperature */
|
||||
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
|
||||
EFX_HWMON_IN, /* voltage */
|
||||
EFX_HWMON_CURR, /* current */
|
||||
EFX_HWMON_POWER, /* power */
|
||||
EFX_HWMON_TYPES_COUNT
|
||||
};
|
||||
|
||||
static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
|
||||
[EFX_HWMON_TEMP] = " degC",
|
||||
[EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
|
||||
[EFX_HWMON_IN] = " mV",
|
||||
[EFX_HWMON_CURR] = " mA",
|
||||
[EFX_HWMON_POWER] = " W",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *label;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
int port;
|
||||
} efx_mcdi_sensor_type[] = {
|
||||
#define SENSOR(name, label, hwmon_type, port) \
|
||||
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
|
||||
SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
|
||||
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
|
||||
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
|
||||
SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
|
||||
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
|
||||
SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
|
||||
SENSOR(IN_1V0, "1.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2, "1.2V supply", IN, -1),
|
||||
SENSOR(IN_1V8, "1.8V supply", IN, -1),
|
||||
SENSOR(IN_2V5, "2.5V supply", IN, -1),
|
||||
SENSOR(IN_3V3, "3.3V supply", IN, -1),
|
||||
SENSOR(IN_12V0, "12.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
|
||||
SENSOR(IN_VREF, "Ref. voltage", IN, -1),
|
||||
SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
|
||||
SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
|
||||
SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
|
||||
SENSOR(PSU_TEMP, "Controller regulator temp.",
|
||||
TEMP, -1),
|
||||
SENSOR(FAN_0, "Fan 0", COOL, -1),
|
||||
SENSOR(FAN_1, "Fan 1", COOL, -1),
|
||||
SENSOR(FAN_2, "Fan 2", COOL, -1),
|
||||
SENSOR(FAN_3, "Fan 3", COOL, -1),
|
||||
SENSOR(FAN_4, "Fan 4", COOL, -1),
|
||||
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
|
||||
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
|
||||
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
|
||||
SENSOR(NIC_POWER, "Board power use", POWER, -1),
|
||||
SENSOR(IN_0V9, "0.9V supply", IN, -1),
|
||||
SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
|
||||
SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
|
||||
SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
|
||||
SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
|
||||
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
|
||||
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT,
|
||||
"Controller PTAT voltage (int. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP,
|
||||
"Controller die temp. (int. ADC)", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT_EXTADC,
|
||||
"Controller PTAT voltage (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
|
||||
"Controller die temp. (ext. ADC)", TEMP, -1),
|
||||
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
|
||||
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
|
||||
SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
|
||||
#undef SENSOR
|
||||
};
|
||||
|
||||
static const char *const sensor_status_names[] = {
|
||||
[MC_CMD_SENSOR_STATE_OK] = "OK",
|
||||
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
|
||||
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
|
||||
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
|
||||
[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
|
||||
};
|
||||
|
||||
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
unsigned int type, state, value;
|
||||
enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
const char *name = NULL, *state_txt, *unit;
|
||||
|
||||
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
|
||||
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
|
||||
value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
|
||||
|
||||
/* Deal gracefully with the board having more drivers than we
|
||||
* know about, but do not expect new sensor states. */
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
name = efx_mcdi_sensor_type[type].label;
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
}
|
||||
if (!name)
|
||||
name = "No sensor name available";
|
||||
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
|
||||
state_txt = sensor_status_names[state];
|
||||
EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
|
||||
unit = efx_hwmon_unit[hwmon_type];
|
||||
if (!unit)
|
||||
unit = "";
|
||||
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Sensor %d (%s) reports condition '%s' for value %d%s\n",
|
||||
type, name, state_txt, value, unit);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
|
||||
struct efx_mcdi_mon_attribute {
|
||||
struct device_attribute dev_attr;
|
||||
unsigned int index;
|
||||
unsigned int type;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
unsigned int limit_value;
|
||||
char name[12];
|
||||
};
|
||||
|
||||
static int efx_mcdi_mon_update(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
|
||||
int rc;
|
||||
|
||||
MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
|
||||
hwmon->dma_buf.dma_addr);
|
||||
MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
|
||||
inbuf, sizeof(inbuf), NULL, 0, NULL);
|
||||
if (rc == 0)
|
||||
hwmon->last_update = jiffies;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
|
||||
efx_dword_t *entry)
|
||||
{
|
||||
struct efx_nic *efx = dev_get_drvdata(dev->parent);
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
|
||||
|
||||
mutex_lock(&hwmon->update_lock);
|
||||
|
||||
/* Use cached value if last update was < 1 s ago */
|
||||
if (time_before(jiffies, hwmon->last_update + HZ))
|
||||
rc = 0;
|
||||
else
|
||||
rc = efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Copy out the requested entry */
|
||||
*entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
|
||||
|
||||
mutex_unlock(&hwmon->update_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_value(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
unsigned int value, state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
if (state == MC_CMD_SENSOR_STATE_NO_READING)
|
||||
return -EBUSY;
|
||||
|
||||
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
unsigned int value;
|
||||
|
||||
value = mon_attr->limit_value;
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
int state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_label(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
return sprintf(buf, "%s\n",
|
||||
efx_mcdi_sensor_type[mon_attr->type].label);
|
||||
}
|
||||
|
||||
static void
|
||||
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
ssize_t (*reader)(struct device *,
|
||||
struct device_attribute *, char *),
|
||||
unsigned int index, unsigned int type,
|
||||
unsigned int limit_value)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
|
||||
|
||||
strlcpy(attr->name, name, sizeof(attr->name));
|
||||
attr->index = index;
|
||||
attr->type = type;
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
|
||||
attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
else
|
||||
attr->hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
attr->limit_value = limit_value;
|
||||
sysfs_attr_init(&attr->dev_attr.attr);
|
||||
attr->dev_attr.attr.name = attr->name;
|
||||
attr->dev_attr.attr.mode = S_IRUGO;
|
||||
attr->dev_attr.show = reader;
|
||||
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
|
||||
}
|
||||
|
||||
int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
|
||||
unsigned int n_pages, n_sensors, n_attrs, page;
|
||||
size_t outlen;
|
||||
char name[12];
|
||||
u32 mask;
|
||||
int rc, i, j, type;
|
||||
|
||||
/* Find out how many sensors are present */
|
||||
n_sensors = 0;
|
||||
page = 0;
|
||||
do {
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
|
||||
return -EIO;
|
||||
|
||||
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
|
||||
n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
++page;
|
||||
} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
n_pages = page;
|
||||
|
||||
/* Don't create a device if there are none */
|
||||
if (n_sensors == 0)
|
||||
return 0;
|
||||
|
||||
rc = efx_nic_alloc_buffer(
|
||||
efx, &hwmon->dma_buf,
|
||||
n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_init(&hwmon->update_lock);
|
||||
efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Allocate space for the maximum possible number of
|
||||
* attributes for this set of sensors:
|
||||
* value, min, max, crit, alarm and label for each sensor.
|
||||
*/
|
||||
n_attrs = 6 * n_sensors;
|
||||
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
|
||||
if (!hwmon->attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
|
||||
GFP_KERNEL);
|
||||
if (!hwmon->group.attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0, j = -1, type = -1; ; i++) {
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
const char *hwmon_prefix;
|
||||
unsigned hwmon_index;
|
||||
u16 min1, max1, min2, max2;
|
||||
|
||||
/* Find next sensor type or exit if there is none */
|
||||
do {
|
||||
type++;
|
||||
|
||||
if ((type % 32) == 0) {
|
||||
page = type / 32;
|
||||
j = -1;
|
||||
if (page == n_pages)
|
||||
goto hwmon_register;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
|
||||
page);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
|
||||
inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf),
|
||||
&outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mask = (MCDI_DWORD(outbuf,
|
||||
SENSOR_INFO_OUT_MASK) &
|
||||
~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
|
||||
/* Check again for short response */
|
||||
if (outlen <
|
||||
MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
} while (!(mask & (1 << type % 32)));
|
||||
j++;
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
|
||||
/* Skip sensors specific to a different port */
|
||||
if (hwmon_type != EFX_HWMON_UNKNOWN &&
|
||||
efx_mcdi_sensor_type[type].port >= 0 &&
|
||||
efx_mcdi_sensor_type[type].port !=
|
||||
efx_port_num(efx))
|
||||
continue;
|
||||
} else {
|
||||
hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
}
|
||||
|
||||
switch (hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
hwmon_prefix = "temp";
|
||||
hwmon_index = ++n_temp; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_COOL:
|
||||
/* This is likely to be a heatsink, but there
|
||||
* is no convention for representing cooling
|
||||
* devices other than fans.
|
||||
*/
|
||||
hwmon_prefix = "fan";
|
||||
hwmon_index = ++n_cool; /* 1-based */
|
||||
break;
|
||||
default:
|
||||
hwmon_prefix = "in";
|
||||
hwmon_index = n_in++; /* 0-based */
|
||||
break;
|
||||
case EFX_HWMON_CURR:
|
||||
hwmon_prefix = "curr";
|
||||
hwmon_index = ++n_curr; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
hwmon_prefix = "power";
|
||||
hwmon_index = ++n_power; /* 1-based */
|
||||
break;
|
||||
}
|
||||
|
||||
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN1);
|
||||
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX1);
|
||||
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN2);
|
||||
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX2);
|
||||
|
||||
if (min1 != max1) {
|
||||
snprintf(name, sizeof(name), "%s%u_input",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_value, i, type, 0);
|
||||
|
||||
if (hwmon_type != EFX_HWMON_POWER) {
|
||||
snprintf(name, sizeof(name), "%s%u_min",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, min1);
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_max",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max1);
|
||||
|
||||
if (min2 != max2) {
|
||||
/* Assume max2 is critical value.
|
||||
* But we have no good way to expose min2.
|
||||
*/
|
||||
snprintf(name, sizeof(name), "%s%u_crit",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max2);
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_alarm",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
|
||||
efx_mcdi_sensor_type[type].label) {
|
||||
snprintf(name, sizeof(name), "%s%u_label",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_label, i, type, 0);
|
||||
}
|
||||
}
|
||||
|
||||
hwmon_register:
|
||||
hwmon->groups[0] = &hwmon->group;
|
||||
hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
|
||||
KBUILD_MODNAME, NULL,
|
||||
hwmon->groups);
|
||||
if (IS_ERR(hwmon->device)) {
|
||||
rc = PTR_ERR(hwmon->device);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
efx_mcdi_mon_remove(efx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_mon_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
|
||||
if (hwmon->device)
|
||||
hwmon_device_unregister(hwmon->device);
|
||||
kfree(hwmon->attrs);
|
||||
kfree(hwmon->group.attrs);
|
||||
efx_nic_free_buffer(efx, &hwmon->dma_buf);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SFC_MCDI_MON */
|
||||
7907
drivers/net/ethernet/sfc/mcdi_pcol.h
Normal file
7907
drivers/net/ethernet/sfc/mcdi_pcol.h
Normal file
File diff suppressed because it is too large
Load diff
1035
drivers/net/ethernet/sfc/mcdi_port.c
Normal file
1035
drivers/net/ethernet/sfc/mcdi_port.c
Normal file
File diff suppressed because it is too large
Load diff
323
drivers/net/ethernet/sfc/mdio_10g.c
Normal file
323
drivers/net/ethernet/sfc/mdio_10g.c
Normal file
|
|
@ -0,0 +1,323 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2011 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
/*
|
||||
* Useful functions for working with MDIO clause 45 PHYs
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/delay.h>
|
||||
#include "net_driver.h"
|
||||
#include "mdio_10g.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
unsigned efx_mdio_id_oui(u32 id)
|
||||
{
|
||||
unsigned oui = 0;
|
||||
int i;
|
||||
|
||||
/* The bits of the OUI are designated a..x, with a=0 and b variable.
|
||||
* In the id register c is the MSB but the OUI is conventionally
|
||||
* written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
|
||||
for (i = 0; i < 22; ++i)
|
||||
if (id & (1 << (i + 10)))
|
||||
oui |= 1 << (i ^ 7);
|
||||
|
||||
return oui;
|
||||
}
|
||||
|
||||
int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
|
||||
int spins, int spintime)
|
||||
{
|
||||
u32 ctrl;
|
||||
|
||||
/* Catch callers passing values in the wrong units (or just silly) */
|
||||
EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
|
||||
|
||||
efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
|
||||
/* Wait for the reset bit to clear. */
|
||||
do {
|
||||
msleep(spintime);
|
||||
ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
|
||||
spins--;
|
||||
|
||||
} while (spins && (ctrl & MDIO_CTRL1_RESET));
|
||||
|
||||
return spins ? spins : -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
|
||||
{
|
||||
int status;
|
||||
|
||||
if (mmd != MDIO_MMD_AN) {
|
||||
/* Read MMD STATUS2 to check it is responding. */
|
||||
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
|
||||
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"PHY MMD %d not responding.\n", mmd);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This ought to be ridiculous overkill. We expect it to fail rarely */
|
||||
#define MDIO45_RESET_TIME 1000 /* ms */
|
||||
#define MDIO45_RESET_ITERS 100
|
||||
|
||||
int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
|
||||
{
|
||||
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
|
||||
int tries = MDIO45_RESET_ITERS;
|
||||
int rc = 0;
|
||||
int in_reset;
|
||||
|
||||
while (tries) {
|
||||
int mask = mmd_mask;
|
||||
int mmd = 0;
|
||||
int stat;
|
||||
in_reset = 0;
|
||||
while (mask) {
|
||||
if (mask & 1) {
|
||||
stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
|
||||
if (stat < 0) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"failed to read status of"
|
||||
" MMD %d\n", mmd);
|
||||
return -EIO;
|
||||
}
|
||||
if (stat & MDIO_CTRL1_RESET)
|
||||
in_reset |= (1 << mmd);
|
||||
}
|
||||
mask = mask >> 1;
|
||||
mmd++;
|
||||
}
|
||||
if (!in_reset)
|
||||
break;
|
||||
tries--;
|
||||
msleep(spintime);
|
||||
}
|
||||
if (in_reset != 0) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"not all MMDs came out of reset in time."
|
||||
" MMDs still in reset: %x\n", in_reset);
|
||||
rc = -ETIMEDOUT;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
|
||||
{
|
||||
int mmd = 0, probe_mmd, devs1, devs2;
|
||||
u32 devices;
|
||||
|
||||
/* Historically we have probed the PHYXS to find out what devices are
|
||||
* present,but that doesn't work so well if the PHYXS isn't expected
|
||||
* to exist, if so just find the first item in the list supplied. */
|
||||
probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
|
||||
__ffs(mmd_mask);
|
||||
|
||||
/* Check all the expected MMDs are present */
|
||||
devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
|
||||
devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
|
||||
if (devs1 < 0 || devs2 < 0) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"failed to read devices present\n");
|
||||
return -EIO;
|
||||
}
|
||||
devices = devs1 | (devs2 << 16);
|
||||
if ((devices & mmd_mask) != mmd_mask) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"required MMDs not present: got %x, wanted %x\n",
|
||||
devices, mmd_mask);
|
||||
return -ENODEV;
|
||||
}
|
||||
netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
|
||||
|
||||
/* Check all required MMDs are responding and happy. */
|
||||
while (mmd_mask) {
|
||||
if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
|
||||
return -EIO;
|
||||
mmd_mask = mmd_mask >> 1;
|
||||
mmd++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
|
||||
{
|
||||
/* If the port is in loopback, then we should only consider a subset
|
||||
* of mmd's */
|
||||
if (LOOPBACK_INTERNAL(efx))
|
||||
return true;
|
||||
else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
|
||||
return false;
|
||||
else if (efx_phy_mode_disabled(efx->phy_mode))
|
||||
return false;
|
||||
else if (efx->loopback_mode == LOOPBACK_PHYXS)
|
||||
mmd_mask &= ~(MDIO_DEVS_PHYXS |
|
||||
MDIO_DEVS_PCS |
|
||||
MDIO_DEVS_PMAPMD |
|
||||
MDIO_DEVS_AN);
|
||||
else if (efx->loopback_mode == LOOPBACK_PCS)
|
||||
mmd_mask &= ~(MDIO_DEVS_PCS |
|
||||
MDIO_DEVS_PMAPMD |
|
||||
MDIO_DEVS_AN);
|
||||
else if (efx->loopback_mode == LOOPBACK_PMAPMD)
|
||||
mmd_mask &= ~(MDIO_DEVS_PMAPMD |
|
||||
MDIO_DEVS_AN);
|
||||
|
||||
return mdio45_links_ok(&efx->mdio, mmd_mask);
|
||||
}
|
||||
|
||||
void efx_mdio_transmit_disable(struct efx_nic *efx)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
|
||||
MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
|
||||
efx->phy_mode & PHY_MODE_TX_DISABLED);
|
||||
}
|
||||
|
||||
void efx_mdio_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
|
||||
MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
|
||||
efx->loopback_mode == LOOPBACK_PMAPMD);
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PCS,
|
||||
MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
|
||||
efx->loopback_mode == LOOPBACK_PCS);
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
|
||||
MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
|
||||
efx->loopback_mode == LOOPBACK_PHYXS_WS);
|
||||
}
|
||||
|
||||
static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
|
||||
int lpower, int mmd)
|
||||
{
|
||||
int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
|
||||
|
||||
netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
|
||||
mmd, lpower);
|
||||
|
||||
if (stat & MDIO_STAT1_LPOWERABLE) {
|
||||
efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
|
||||
MDIO_CTRL1_LPOWER, lpower);
|
||||
}
|
||||
}
|
||||
|
||||
void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
|
||||
int low_power, unsigned int mmd_mask)
|
||||
{
|
||||
int mmd = 0;
|
||||
mmd_mask &= ~MDIO_DEVS_AN;
|
||||
while (mmd_mask) {
|
||||
if (mmd_mask & 1)
|
||||
efx_mdio_set_mmd_lpower(efx, low_power, mmd);
|
||||
mmd_mask = (mmd_mask >> 1);
|
||||
mmd++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
|
||||
* @efx: Efx NIC
|
||||
* @ecmd: New settings
|
||||
*/
|
||||
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
|
||||
|
||||
efx->phy_op->get_settings(efx, &prev);
|
||||
|
||||
if (ecmd->advertising == prev.advertising &&
|
||||
ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
|
||||
ecmd->duplex == prev.duplex &&
|
||||
ecmd->port == prev.port &&
|
||||
ecmd->autoneg == prev.autoneg)
|
||||
return 0;
|
||||
|
||||
/* We can only change these settings for -T PHYs */
|
||||
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check that PHY supports these settings */
|
||||
if (!ecmd->autoneg ||
|
||||
(ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
|
||||
return -EINVAL;
|
||||
|
||||
efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
|
||||
efx_mdio_an_reconfigure(efx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
|
||||
* @efx: Efx NIC
|
||||
*/
|
||||
void efx_mdio_an_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
int reg;
|
||||
|
||||
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
|
||||
|
||||
/* Set up the base page */
|
||||
reg = ADVERTISE_CSMA | ADVERTISE_RESV;
|
||||
if (efx->link_advertising & ADVERTISED_Pause)
|
||||
reg |= ADVERTISE_PAUSE_CAP;
|
||||
if (efx->link_advertising & ADVERTISED_Asym_Pause)
|
||||
reg |= ADVERTISE_PAUSE_ASYM;
|
||||
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
|
||||
|
||||
/* Set up the (extended) next page */
|
||||
efx->phy_op->set_npage_adv(efx, efx->link_advertising);
|
||||
|
||||
/* Enable and restart AN */
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
|
||||
reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
|
||||
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
|
||||
}
|
||||
|
||||
u8 efx_mdio_get_pause(struct efx_nic *efx)
|
||||
{
|
||||
BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
|
||||
|
||||
if (!(efx->wanted_fc & EFX_FC_AUTO))
|
||||
return efx->wanted_fc;
|
||||
|
||||
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
|
||||
|
||||
return mii_resolve_flowctrl_fdx(
|
||||
mii_advertise_flowctrl(efx->wanted_fc),
|
||||
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
|
||||
}
|
||||
|
||||
int efx_mdio_test_alive(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
int devad = __ffs(efx->mdio.mmds);
|
||||
u16 physid1, physid2;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
|
||||
physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
|
||||
physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
|
||||
|
||||
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
|
||||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
|
||||
}
|
||||
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
return rc;
|
||||
}
|
||||
110
drivers/net/ethernet/sfc/mdio_10g.h
Normal file
110
drivers/net/ethernet/sfc/mdio_10g.h
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2011 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MDIO_10G_H
|
||||
#define EFX_MDIO_10G_H
|
||||
|
||||
#include <linux/mdio.h>
|
||||
|
||||
/*
|
||||
* Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
|
||||
*/
|
||||
|
||||
#include "efx.h"
|
||||
|
||||
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
|
||||
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
|
||||
unsigned efx_mdio_id_oui(u32 id);
|
||||
|
||||
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
|
||||
{
|
||||
return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
|
||||
{
|
||||
efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
|
||||
}
|
||||
|
||||
static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
|
||||
{
|
||||
u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
|
||||
u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
|
||||
return (id_hi << 16) | (id_low);
|
||||
}
|
||||
|
||||
static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
|
||||
{
|
||||
int i, lane_status;
|
||||
bool sync;
|
||||
|
||||
for (i = 0; i < 2; ++i)
|
||||
lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
|
||||
MDIO_PHYXS_LNSTAT);
|
||||
|
||||
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
|
||||
if (!sync)
|
||||
netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
|
||||
lane_status);
|
||||
return sync;
|
||||
}
|
||||
|
||||
const char *efx_mdio_mmd_name(int mmd);
|
||||
|
||||
/*
|
||||
* Reset a specific MMD and wait for reset to clear.
|
||||
* Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
|
||||
*
|
||||
* This function will sleep
|
||||
*/
|
||||
int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
|
||||
|
||||
/* As efx_mdio_check_mmd but for multiple MMDs */
|
||||
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
|
||||
|
||||
/* Check the link status of specified mmds in bit mask */
|
||||
bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
|
||||
|
||||
/* Generic transmit disable support though PMAPMD */
|
||||
void efx_mdio_transmit_disable(struct efx_nic *efx);
|
||||
|
||||
/* Generic part of reconfigure: set/clear loopback bits */
|
||||
void efx_mdio_phy_reconfigure(struct efx_nic *efx);
|
||||
|
||||
/* Set the power state of the specified MMDs */
|
||||
void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
|
||||
unsigned int mmd_mask);
|
||||
|
||||
/* Set (some of) the PHY settings over MDIO */
|
||||
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
|
||||
|
||||
/* Push advertising flags and restart autonegotiation */
|
||||
void efx_mdio_an_reconfigure(struct efx_nic *efx);
|
||||
|
||||
/* Get pause parameters from AN if available (otherwise return
|
||||
* requested pause parameters)
|
||||
*/
|
||||
u8 efx_mdio_get_pause(struct efx_nic *efx);
|
||||
|
||||
/* Wait for specified MMDs to exit reset within a timeout */
|
||||
int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
|
||||
|
||||
/* Set or clear flag, debouncing */
|
||||
static inline void
|
||||
efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
|
||||
int mask, bool state)
|
||||
{
|
||||
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
|
||||
}
|
||||
|
||||
/* Liveness self-test for MDIO PHYs */
|
||||
int efx_mdio_test_alive(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_MDIO_10G_H */
|
||||
133
drivers/net/ethernet/sfc/mtd.c
Normal file
133
drivers/net/ethernet/sfc/mtd.c
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
|
||||
#define to_efx_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mtd_partition, mtd)
|
||||
|
||||
/* MTD interface */
|
||||
|
||||
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
||||
{
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
|
||||
if (rc == 0) {
|
||||
erase->state = MTD_ERASE_DONE;
|
||||
} else {
|
||||
erase->state = MTD_ERASE_FAILED;
|
||||
erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
|
||||
}
|
||||
mtd_erase_callback(erase);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx->type->mtd_sync(mtd);
|
||||
if (rc)
|
||||
pr_err("%s: %s sync failed (%d)\n",
|
||||
part->name, part->dev_type_name, rc);
|
||||
}
|
||||
|
||||
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
|
||||
{
|
||||
int rc;
|
||||
|
||||
for (;;) {
|
||||
rc = mtd_device_unregister(&part->mtd);
|
||||
if (rc != -EBUSY)
|
||||
break;
|
||||
ssleep(1);
|
||||
}
|
||||
WARN_ON(rc);
|
||||
list_del(&part->node);
|
||||
}
|
||||
|
||||
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
|
||||
size_t n_parts, size_t sizeof_part)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n_parts; i++) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
|
||||
part->mtd.writesize = 1;
|
||||
|
||||
part->mtd.owner = THIS_MODULE;
|
||||
part->mtd.priv = efx;
|
||||
part->mtd.name = part->name;
|
||||
part->mtd._erase = efx_mtd_erase;
|
||||
part->mtd._read = efx->type->mtd_read;
|
||||
part->mtd._write = efx->type->mtd_write;
|
||||
part->mtd._sync = efx_mtd_sync;
|
||||
|
||||
efx->type->mtd_rename(part);
|
||||
|
||||
if (mtd_device_register(&part->mtd, NULL, 0))
|
||||
goto fail;
|
||||
|
||||
/* Add to list in order - efx_mtd_remove() depends on this */
|
||||
list_add_tail(&part->node, &efx->mtd_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i--) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
efx_mtd_remove_partition(part);
|
||||
}
|
||||
/* Failure is unlikely here, but probably means we're out of memory */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void efx_mtd_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *parts, *part, *next;
|
||||
|
||||
WARN_ON(efx_dev_registered(efx));
|
||||
|
||||
if (list_empty(&efx->mtd_list))
|
||||
return;
|
||||
|
||||
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
|
||||
node);
|
||||
|
||||
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
|
||||
efx_mtd_remove_partition(part);
|
||||
|
||||
kfree(parts);
|
||||
}
|
||||
|
||||
void efx_mtd_rename(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(part, &efx->mtd_list, node)
|
||||
efx->type->mtd_rename(part);
|
||||
}
|
||||
1513
drivers/net/ethernet/sfc/net_driver.h
Normal file
1513
drivers/net/ethernet/sfc/net_driver.h
Normal file
File diff suppressed because it is too large
Load diff
534
drivers/net/ethernet/sfc/nic.c
Normal file
534
drivers/net/ethernet/sfc/nic.c
Normal file
|
|
@ -0,0 +1,534 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cpu_rmap.h>
|
||||
#include "net_driver.h"
|
||||
#include "bitfield.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "ef10_regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Generic buffer handling
|
||||
* These buffers are used for interrupt status, MAC stats, etc.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags)
|
||||
{
|
||||
buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
|
||||
&buffer->dma_addr, gfp_flags);
|
||||
if (!buffer->addr)
|
||||
return -ENOMEM;
|
||||
buffer->len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
|
||||
{
|
||||
if (buffer->addr) {
|
||||
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
|
||||
buffer->addr, buffer->dma_addr);
|
||||
buffer->addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check whether an event is present in the eventq at the current
|
||||
* read pointer. Only useful for self-test.
|
||||
*/
|
||||
bool efx_nic_event_present(struct efx_channel *channel)
|
||||
{
|
||||
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
|
||||
}
|
||||
|
||||
void efx_nic_event_test_start(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = -1;
|
||||
smp_wmb();
|
||||
channel->efx->type->ev_test_generate(channel);
|
||||
}
|
||||
|
||||
void efx_nic_irq_test_start(struct efx_nic *efx)
|
||||
{
|
||||
efx->last_irq_cpu = -1;
|
||||
smp_wmb();
|
||||
efx->type->irq_test_generate(efx);
|
||||
}
|
||||
|
||||
/* Hook interrupt handler(s)
|
||||
* Try MSI and then legacy interrupts.
|
||||
*/
|
||||
int efx_nic_init_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n_irqs;
|
||||
int rc;
|
||||
|
||||
if (!EFX_INT_MODE_USE_MSI(efx)) {
|
||||
rc = request_irq(efx->legacy_irq,
|
||||
efx->type->irq_handle_legacy, IRQF_SHARED,
|
||||
efx->name, efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook legacy IRQ %d\n",
|
||||
efx->pci_dev->irq);
|
||||
goto fail1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
|
||||
efx->net_dev->rx_cpu_rmap =
|
||||
alloc_irq_cpu_rmap(efx->n_rx_channels);
|
||||
if (!efx->net_dev->rx_cpu_rmap) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hook MSI or MSI-X interrupt */
|
||||
n_irqs = 0;
|
||||
efx_for_each_channel(channel, efx) {
|
||||
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
|
||||
IRQF_PROBE_SHARED, /* Not shared */
|
||||
efx->msi_context[channel->channel].name,
|
||||
&efx->msi_context[channel->channel]);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook IRQ %d\n", channel->irq);
|
||||
goto fail2;
|
||||
}
|
||||
++n_irqs;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
|
||||
channel->channel < efx->n_rx_channels) {
|
||||
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
|
||||
channel->irq);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (n_irqs-- == 0)
|
||||
break;
|
||||
free_irq(channel->irq, &efx->msi_context[channel->channel]);
|
||||
}
|
||||
fail1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_nic_fini_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
|
||||
if (EFX_INT_MODE_USE_MSI(efx)) {
|
||||
/* Disable MSI/MSI-X interrupts */
|
||||
efx_for_each_channel(channel, efx)
|
||||
free_irq(channel->irq,
|
||||
&efx->msi_context[channel->channel]);
|
||||
} else {
|
||||
/* Disable legacy interrupt */
|
||||
free_irq(efx->legacy_irq, efx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Register dump */
|
||||
|
||||
#define REGISTER_REVISION_FA 1
|
||||
#define REGISTER_REVISION_FB 2
|
||||
#define REGISTER_REVISION_FC 3
|
||||
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
|
||||
#define REGISTER_REVISION_ED 4
|
||||
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
|
||||
|
||||
struct efx_nic_reg {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
};
|
||||
|
||||
#define REGISTER(name, arch, min_rev, max_rev) { \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev \
|
||||
}
|
||||
#define REGISTER_AA(name) REGISTER(name, F, A, A)
|
||||
#define REGISTER_AB(name) REGISTER(name, F, A, B)
|
||||
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
|
||||
#define REGISTER_BB(name) REGISTER(name, F, B, B)
|
||||
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
|
||||
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
|
||||
#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
|
||||
|
||||
static const struct efx_nic_reg efx_nic_regs[] = {
|
||||
REGISTER_AZ(ADR_REGION),
|
||||
REGISTER_AZ(INT_EN_KER),
|
||||
REGISTER_BZ(INT_EN_CHAR),
|
||||
REGISTER_AZ(INT_ADR_KER),
|
||||
REGISTER_BZ(INT_ADR_CHAR),
|
||||
/* INT_ACK_KER is WO */
|
||||
/* INT_ISR0 is RC */
|
||||
REGISTER_AZ(HW_INIT),
|
||||
REGISTER_CZ(USR_EV_CFG),
|
||||
REGISTER_AB(EE_SPI_HCMD),
|
||||
REGISTER_AB(EE_SPI_HADR),
|
||||
REGISTER_AB(EE_SPI_HDATA),
|
||||
REGISTER_AB(EE_BASE_PAGE),
|
||||
REGISTER_AB(EE_VPD_CFG0),
|
||||
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
|
||||
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
|
||||
/* PCIE_CORE_INDIRECT is indirect */
|
||||
REGISTER_AB(NIC_STAT),
|
||||
REGISTER_AB(GPIO_CTL),
|
||||
REGISTER_AB(GLB_CTL),
|
||||
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
|
||||
REGISTER_BZ(DP_CTRL),
|
||||
REGISTER_AZ(MEM_STAT),
|
||||
REGISTER_AZ(CS_DEBUG),
|
||||
REGISTER_AZ(ALTERA_BUILD),
|
||||
REGISTER_AZ(CSR_SPARE),
|
||||
REGISTER_AB(PCIE_SD_CTL0123),
|
||||
REGISTER_AB(PCIE_SD_CTL45),
|
||||
REGISTER_AB(PCIE_PCS_CTL_STAT),
|
||||
/* DEBUG_DATA_OUT is not used */
|
||||
/* DRV_EV is WO */
|
||||
REGISTER_AZ(EVQ_CTL),
|
||||
REGISTER_AZ(EVQ_CNT1),
|
||||
REGISTER_AZ(EVQ_CNT2),
|
||||
REGISTER_AZ(BUF_TBL_CFG),
|
||||
REGISTER_AZ(SRM_RX_DC_CFG),
|
||||
REGISTER_AZ(SRM_TX_DC_CFG),
|
||||
REGISTER_AZ(SRM_CFG),
|
||||
/* BUF_TBL_UPD is WO */
|
||||
REGISTER_AZ(SRM_UPD_EVQ),
|
||||
REGISTER_AZ(SRAM_PARITY),
|
||||
REGISTER_AZ(RX_CFG),
|
||||
REGISTER_BZ(RX_FILTER_CTL),
|
||||
/* RX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(RX_DC_CFG),
|
||||
REGISTER_AZ(RX_DC_PF_WM),
|
||||
REGISTER_BZ(RX_RSS_TKEY),
|
||||
/* RX_NODESC_DROP is RC */
|
||||
REGISTER_AA(RX_SELF_RST),
|
||||
/* RX_DEBUG, RX_PUSH_DROP are not used */
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG1),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG2),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG3),
|
||||
/* TX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(TX_DC_CFG),
|
||||
REGISTER_AA(TX_CHKSM_CFG),
|
||||
REGISTER_AZ(TX_CFG),
|
||||
/* TX_PUSH_DROP is not used */
|
||||
REGISTER_AZ(TX_RESERVED),
|
||||
REGISTER_BZ(TX_PACE),
|
||||
/* TX_PACE_DROP_QID is RC */
|
||||
REGISTER_BB(TX_VLAN),
|
||||
REGISTER_BZ(TX_IPFIL_PORTEN),
|
||||
REGISTER_AB(MD_TXD),
|
||||
REGISTER_AB(MD_RXD),
|
||||
REGISTER_AB(MD_CS),
|
||||
REGISTER_AB(MD_PHY_ADR),
|
||||
REGISTER_AB(MD_ID),
|
||||
/* MD_STAT is RC */
|
||||
REGISTER_AB(MAC_STAT_DMA),
|
||||
REGISTER_AB(MAC_CTRL),
|
||||
REGISTER_BB(GEN_MODE),
|
||||
REGISTER_AB(MAC_MC_HASH_REG0),
|
||||
REGISTER_AB(MAC_MC_HASH_REG1),
|
||||
REGISTER_AB(GM_CFG1),
|
||||
REGISTER_AB(GM_CFG2),
|
||||
/* GM_IPG and GM_HD are not used */
|
||||
REGISTER_AB(GM_MAX_FLEN),
|
||||
/* GM_TEST is not used */
|
||||
REGISTER_AB(GM_ADR1),
|
||||
REGISTER_AB(GM_ADR2),
|
||||
REGISTER_AB(GMF_CFG0),
|
||||
REGISTER_AB(GMF_CFG1),
|
||||
REGISTER_AB(GMF_CFG2),
|
||||
REGISTER_AB(GMF_CFG3),
|
||||
REGISTER_AB(GMF_CFG4),
|
||||
REGISTER_AB(GMF_CFG5),
|
||||
REGISTER_BB(TX_SRC_MAC_CTL),
|
||||
REGISTER_AB(XM_ADR_LO),
|
||||
REGISTER_AB(XM_ADR_HI),
|
||||
REGISTER_AB(XM_GLB_CFG),
|
||||
REGISTER_AB(XM_TX_CFG),
|
||||
REGISTER_AB(XM_RX_CFG),
|
||||
REGISTER_AB(XM_MGT_INT_MASK),
|
||||
REGISTER_AB(XM_FC),
|
||||
REGISTER_AB(XM_PAUSE_TIME),
|
||||
REGISTER_AB(XM_TX_PARAM),
|
||||
REGISTER_AB(XM_RX_PARAM),
|
||||
/* XM_MGT_INT_MSK (note no 'A') is RC */
|
||||
REGISTER_AB(XX_PWR_RST),
|
||||
REGISTER_AB(XX_SD_CTL),
|
||||
REGISTER_AB(XX_TXDRV_CTL),
|
||||
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
|
||||
/* XX_CORE_STAT is partly RC */
|
||||
REGISTER_DZ(BIU_HW_REV_ID),
|
||||
REGISTER_DZ(MC_DB_LWRD),
|
||||
REGISTER_DZ(MC_DB_HWRD),
|
||||
};
|
||||
|
||||
struct efx_nic_reg_table {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
u32 step:6, rows:21;
|
||||
};
|
||||
|
||||
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
|
||||
offset, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev, \
|
||||
step, rows \
|
||||
}
|
||||
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
|
||||
REGISTER_TABLE_DIMENSIONS( \
|
||||
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
arch, min_rev, max_rev, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
|
||||
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
|
||||
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
|
||||
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
|
||||
#define REGISTER_TABLE_BB_CZ(name) \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_BB_ ## name ## _ROWS), \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_CZ_ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
|
||||
#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
|
||||
|
||||
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
|
||||
/* DRIVER is not used */
|
||||
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
|
||||
REGISTER_TABLE_BB(TX_IPFIL_TBL),
|
||||
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
|
||||
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
|
||||
/* We can't reasonably read all of the buffer table (up to 8MB!).
|
||||
* However this driver will only use a few entries. Reading
|
||||
* 1K entries allows for some expansion of queue count and
|
||||
* size before we need to change the version. */
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
|
||||
F, A, A, 8, 1024),
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
|
||||
F, B, Z, 8, 1024),
|
||||
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_BB_CZ(TIMER_TBL),
|
||||
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
|
||||
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
|
||||
/* TX_FILTER_TBL0 is huge and not used by this driver */
|
||||
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_CZ(MC_TREG_SMEM),
|
||||
/* MSIX_PBA_TABLE is not mapped */
|
||||
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
|
||||
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
|
||||
REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
|
||||
};
|
||||
|
||||
size_t efx_nic_get_regs_len(struct efx_nic *efx)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
size_t len = 0;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++)
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision)
|
||||
len += sizeof(efx_oword_t);
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++)
|
||||
if (efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision)
|
||||
len += table->rows * min_t(size_t, table->step, 16);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
void efx_nic_get_regs(struct efx_nic *efx, void *buf)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++) {
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision) {
|
||||
efx_reado(efx, (efx_oword_t *)buf, reg->offset);
|
||||
buf += sizeof(efx_oword_t);
|
||||
}
|
||||
}
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++) {
|
||||
size_t size, i;
|
||||
|
||||
if (!(efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision))
|
||||
continue;
|
||||
|
||||
size = min_t(size_t, table->step, 16);
|
||||
|
||||
for (i = 0; i < table->rows; i++) {
|
||||
switch (table->step) {
|
||||
case 4: /* 32-bit SRAM */
|
||||
efx_readd(efx, buf, table->offset + 4 * i);
|
||||
break;
|
||||
case 8: /* 64-bit SRAM */
|
||||
efx_sram_readq(efx,
|
||||
efx->membase + table->offset,
|
||||
buf, i);
|
||||
break;
|
||||
case 16: /* 128-bit-readable register */
|
||||
efx_reado_table(efx, buf, table->offset, i);
|
||||
break;
|
||||
case 32: /* 128-bit register, interleaved */
|
||||
efx_reado_table(efx, buf, table->offset, 2 * i);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
buf += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_nic_describe_stats - Describe supported statistics for ethtool
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the statistics
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @names: Buffer to copy names to, or %NULL. The names are copied
|
||||
* starting at intervals of %ETH_GSTRING_LEN bytes.
|
||||
*
|
||||
* Returns the number of visible statistics, i.e. the number of set
|
||||
* bits in the first @count bits of @mask for which a name is defined.
|
||||
*/
|
||||
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names)
|
||||
{
|
||||
size_t visible = 0;
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].name) {
|
||||
if (names) {
|
||||
strlcpy(names, desc[index].name,
|
||||
ETH_GSTRING_LEN);
|
||||
names += ETH_GSTRING_LEN;
|
||||
}
|
||||
++visible;
|
||||
}
|
||||
}
|
||||
|
||||
return visible;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_nic_update_stats - Convert statistics DMA buffer to array of u64
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
|
||||
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
|
||||
* the width is specified as 0 the corresponding element of
|
||||
* @stats is not updated.
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @stats: Buffer to update with the converted statistics. The length
|
||||
* of this array must be at least @count.
|
||||
* @dma_buf: DMA buffer containing hardware statistics
|
||||
* @accumulate: If set, the converted values will be added rather than
|
||||
* directly stored to the corresponding elements of @stats
|
||||
*/
|
||||
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask,
|
||||
u64 *stats, const void *dma_buf, bool accumulate)
|
||||
{
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].dma_width) {
|
||||
const void *addr = dma_buf + desc[index].offset;
|
||||
u64 val;
|
||||
|
||||
switch (desc[index].dma_width) {
|
||||
case 16:
|
||||
val = le16_to_cpup((__le16 *)addr);
|
||||
break;
|
||||
case 32:
|
||||
val = le32_to_cpup((__le32 *)addr);
|
||||
break;
|
||||
case 64:
|
||||
val = le64_to_cpup((__le64 *)addr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (accumulate)
|
||||
stats[index] += val;
|
||||
else
|
||||
stats[index] = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
|
||||
{
|
||||
/* if down, or this is the first update after coming up */
|
||||
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
|
||||
efx->rx_nodesc_drops_while_down +=
|
||||
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
|
||||
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
|
||||
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
|
||||
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
|
||||
}
|
||||
824
drivers/net/ethernet/sfc/nic.h
Normal file
824
drivers/net/ethernet/sfc/nic.h
Normal file
|
|
@ -0,0 +1,824 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_NIC_H
|
||||
#define EFX_NIC_H
|
||||
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "mcdi.h"
|
||||
|
||||
enum {
|
||||
EFX_REV_FALCON_A0 = 0,
|
||||
EFX_REV_FALCON_A1 = 1,
|
||||
EFX_REV_FALCON_B0 = 2,
|
||||
EFX_REV_SIENA_A0 = 3,
|
||||
EFX_REV_HUNT_A0 = 4,
|
||||
};
|
||||
|
||||
static inline int efx_nic_rev(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->revision;
|
||||
}
|
||||
|
||||
u32 efx_farch_fpga_ver(struct efx_nic *efx);
|
||||
|
||||
/* NIC has two interlinked PCI functions for the same port. */
|
||||
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
|
||||
{
|
||||
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
|
||||
}
|
||||
|
||||
/* Read the current event from the event queue */
|
||||
static inline efx_qword_t *efx_event(struct efx_channel *channel,
|
||||
unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
|
||||
(index & channel->eventq_mask);
|
||||
}
|
||||
|
||||
/* See if an event is present
|
||||
*
|
||||
* We check both the high and low dword of the event for all ones. We
|
||||
* wrote all ones when we cleared the event, and no valid event can
|
||||
* have all ones in either its high or low dwords. This approach is
|
||||
* robust against reordering.
|
||||
*
|
||||
* Note that using a single 64-bit comparison is incorrect; even
|
||||
* though the CPU read will be atomic, the DMA write may not be.
|
||||
*/
|
||||
static inline int efx_event_present(efx_qword_t *event)
|
||||
{
|
||||
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
|
||||
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified transmit descriptor in the TX
|
||||
* descriptor queue belonging to the specified channel.
|
||||
*/
|
||||
static inline efx_qword_t *
|
||||
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Get partner of a TX queue, seen as part of the same net core queue */
|
||||
static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
|
||||
return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
|
||||
else
|
||||
return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
|
||||
}
|
||||
|
||||
/* Report whether this TX queue would be empty for the given write_count.
|
||||
* May return false negative.
|
||||
*/
|
||||
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
||||
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
|
||||
}
|
||||
|
||||
/* Decide whether we can use TX PIO, ie. write packet data directly into
|
||||
* a buffer on the device. This can reduce latency at the expense of
|
||||
* throughput, so we only do this if both hardware and software TX rings
|
||||
* are empty. This also ensures that only one packet at a time can be
|
||||
* using the PIO buffer.
|
||||
*/
|
||||
static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
|
||||
return tx_queue->piobuf &&
|
||||
__efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
|
||||
__efx_nic_tx_is_empty(partner, partner->insert_count);
|
||||
}
|
||||
|
||||
/* Decide whether to push a TX descriptor to the NIC vs merely writing
|
||||
* the doorbell. This can reduce latency when we are adding a single
|
||||
* descriptor to an empty queue, but is otherwise pointless. Further,
|
||||
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
|
||||
* triggered if we don't check this.
|
||||
* We use the write_count used for the last doorbell push, to get the
|
||||
* NIC's view of the tx queue.
|
||||
*/
|
||||
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
|
||||
|
||||
tx_queue->empty_read_count = 0;
|
||||
return was_empty && tx_queue->write_count - write_count == 1;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
|
||||
static inline efx_qword_t *
|
||||
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
enum {
|
||||
PHY_TYPE_NONE = 0,
|
||||
PHY_TYPE_TXC43128 = 1,
|
||||
PHY_TYPE_88E1111 = 2,
|
||||
PHY_TYPE_SFX7101 = 3,
|
||||
PHY_TYPE_QT2022C2 = 4,
|
||||
PHY_TYPE_PM8358 = 6,
|
||||
PHY_TYPE_SFT9001A = 8,
|
||||
PHY_TYPE_QT2025C = 9,
|
||||
PHY_TYPE_SFT9001B = 10,
|
||||
};
|
||||
|
||||
#define FALCON_XMAC_LOOPBACKS \
|
||||
((1 << LOOPBACK_XGMII) | \
|
||||
(1 << LOOPBACK_XGXS) | \
|
||||
(1 << LOOPBACK_XAUI))
|
||||
|
||||
/* Alignment of PCIe DMA boundaries (4KB) */
|
||||
#define EFX_PAGE_SIZE 4096
|
||||
/* Size and alignment of buffer table entries (same) */
|
||||
#define EFX_BUF_SIZE EFX_PAGE_SIZE
|
||||
|
||||
/* NIC-generic software stats */
|
||||
enum {
|
||||
GENERIC_STAT_rx_noskb_drops,
|
||||
GENERIC_STAT_rx_nodesc_trunc,
|
||||
GENERIC_STAT_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct falcon_board_type - board operations and type information
|
||||
* @id: Board type id, as found in NVRAM
|
||||
* @init: Allocate resources and initialise peripheral hardware
|
||||
* @init_phy: Do board-specific PHY initialisation
|
||||
* @fini: Shut down hardware and free resources
|
||||
* @set_id_led: Set state of identifying LED or revert to automatic function
|
||||
* @monitor: Board-specific health check function
|
||||
*/
|
||||
struct falcon_board_type {
|
||||
u8 id;
|
||||
int (*init) (struct efx_nic *nic);
|
||||
void (*init_phy) (struct efx_nic *efx);
|
||||
void (*fini) (struct efx_nic *nic);
|
||||
void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
|
||||
int (*monitor) (struct efx_nic *nic);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct falcon_board - board information
|
||||
* @type: Type of board
|
||||
* @major: Major rev. ('A', 'B' ...)
|
||||
* @minor: Minor rev. (0, 1, ...)
|
||||
* @i2c_adap: I2C adapter for on-board peripherals
|
||||
* @i2c_data: Data for bit-banging algorithm
|
||||
* @hwmon_client: I2C client for hardware monitor
|
||||
* @ioexp_client: I2C client for power/port control
|
||||
*/
|
||||
struct falcon_board {
|
||||
const struct falcon_board_type *type;
|
||||
int major;
|
||||
int minor;
|
||||
struct i2c_adapter i2c_adap;
|
||||
struct i2c_algo_bit_data i2c_data;
|
||||
struct i2c_client *hwmon_client, *ioexp_client;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
|
||||
* @device_id: Controller's id for the device
|
||||
* @size: Size (in bytes)
|
||||
* @addr_len: Number of address bytes in read/write commands
|
||||
* @munge_address: Flag whether addresses should be munged.
|
||||
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
|
||||
* use bit 3 of the command byte as address bit A8, rather
|
||||
* than having a two-byte address. If this flag is set, then
|
||||
* commands should be munged in this way.
|
||||
* @erase_command: Erase command (or 0 if sector erase not needed).
|
||||
* @erase_size: Erase sector size (in bytes)
|
||||
* Erase commands affect sectors with this size and alignment.
|
||||
* This must be a power of two.
|
||||
* @block_size: Write block size (in bytes).
|
||||
* Write commands are limited to blocks with this size and alignment.
|
||||
*/
|
||||
struct falcon_spi_device {
|
||||
int device_id;
|
||||
unsigned int size;
|
||||
unsigned int addr_len;
|
||||
unsigned int munge_address:1;
|
||||
u8 erase_command;
|
||||
unsigned int erase_size;
|
||||
unsigned int block_size;
|
||||
};
|
||||
|
||||
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
|
||||
{
|
||||
return spi->size != 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
|
||||
FALCON_STAT_tx_packets,
|
||||
FALCON_STAT_tx_pause,
|
||||
FALCON_STAT_tx_control,
|
||||
FALCON_STAT_tx_unicast,
|
||||
FALCON_STAT_tx_multicast,
|
||||
FALCON_STAT_tx_broadcast,
|
||||
FALCON_STAT_tx_lt64,
|
||||
FALCON_STAT_tx_64,
|
||||
FALCON_STAT_tx_65_to_127,
|
||||
FALCON_STAT_tx_128_to_255,
|
||||
FALCON_STAT_tx_256_to_511,
|
||||
FALCON_STAT_tx_512_to_1023,
|
||||
FALCON_STAT_tx_1024_to_15xx,
|
||||
FALCON_STAT_tx_15xx_to_jumbo,
|
||||
FALCON_STAT_tx_gtjumbo,
|
||||
FALCON_STAT_tx_non_tcpudp,
|
||||
FALCON_STAT_tx_mac_src_error,
|
||||
FALCON_STAT_tx_ip_src_error,
|
||||
FALCON_STAT_rx_bytes,
|
||||
FALCON_STAT_rx_good_bytes,
|
||||
FALCON_STAT_rx_bad_bytes,
|
||||
FALCON_STAT_rx_packets,
|
||||
FALCON_STAT_rx_good,
|
||||
FALCON_STAT_rx_bad,
|
||||
FALCON_STAT_rx_pause,
|
||||
FALCON_STAT_rx_control,
|
||||
FALCON_STAT_rx_unicast,
|
||||
FALCON_STAT_rx_multicast,
|
||||
FALCON_STAT_rx_broadcast,
|
||||
FALCON_STAT_rx_lt64,
|
||||
FALCON_STAT_rx_64,
|
||||
FALCON_STAT_rx_65_to_127,
|
||||
FALCON_STAT_rx_128_to_255,
|
||||
FALCON_STAT_rx_256_to_511,
|
||||
FALCON_STAT_rx_512_to_1023,
|
||||
FALCON_STAT_rx_1024_to_15xx,
|
||||
FALCON_STAT_rx_15xx_to_jumbo,
|
||||
FALCON_STAT_rx_gtjumbo,
|
||||
FALCON_STAT_rx_bad_lt64,
|
||||
FALCON_STAT_rx_bad_gtjumbo,
|
||||
FALCON_STAT_rx_overflow,
|
||||
FALCON_STAT_rx_symbol_error,
|
||||
FALCON_STAT_rx_align_error,
|
||||
FALCON_STAT_rx_length_error,
|
||||
FALCON_STAT_rx_internal_error,
|
||||
FALCON_STAT_rx_nodesc_drop_cnt,
|
||||
FALCON_STAT_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct falcon_nic_data - Falcon NIC state
|
||||
* @pci_dev2: Secondary function of Falcon A
|
||||
* @board: Board state and functions
|
||||
* @stats: Hardware statistics
|
||||
* @stats_disable_count: Nest count for disabling statistics fetches
|
||||
* @stats_pending: Is there a pending DMA of MAC statistics.
|
||||
* @stats_timer: A timer for regularly fetching MAC statistics.
|
||||
* @spi_flash: SPI flash device
|
||||
* @spi_eeprom: SPI EEPROM device
|
||||
* @spi_lock: SPI bus lock
|
||||
* @mdio_lock: MDIO bus lock
|
||||
* @xmac_poll_required: XMAC link state needs polling
|
||||
*/
|
||||
struct falcon_nic_data {
|
||||
struct pci_dev *pci_dev2;
|
||||
struct falcon_board board;
|
||||
u64 stats[FALCON_STAT_COUNT];
|
||||
unsigned int stats_disable_count;
|
||||
bool stats_pending;
|
||||
struct timer_list stats_timer;
|
||||
struct falcon_spi_device spi_flash;
|
||||
struct falcon_spi_device spi_eeprom;
|
||||
struct mutex spi_lock;
|
||||
struct mutex mdio_lock;
|
||||
bool xmac_poll_required;
|
||||
};
|
||||
|
||||
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
|
||||
{
|
||||
struct falcon_nic_data *data = efx->nic_data;
|
||||
return &data->board;
|
||||
}
|
||||
|
||||
enum {
|
||||
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
|
||||
SIENA_STAT_tx_good_bytes,
|
||||
SIENA_STAT_tx_bad_bytes,
|
||||
SIENA_STAT_tx_packets,
|
||||
SIENA_STAT_tx_bad,
|
||||
SIENA_STAT_tx_pause,
|
||||
SIENA_STAT_tx_control,
|
||||
SIENA_STAT_tx_unicast,
|
||||
SIENA_STAT_tx_multicast,
|
||||
SIENA_STAT_tx_broadcast,
|
||||
SIENA_STAT_tx_lt64,
|
||||
SIENA_STAT_tx_64,
|
||||
SIENA_STAT_tx_65_to_127,
|
||||
SIENA_STAT_tx_128_to_255,
|
||||
SIENA_STAT_tx_256_to_511,
|
||||
SIENA_STAT_tx_512_to_1023,
|
||||
SIENA_STAT_tx_1024_to_15xx,
|
||||
SIENA_STAT_tx_15xx_to_jumbo,
|
||||
SIENA_STAT_tx_gtjumbo,
|
||||
SIENA_STAT_tx_collision,
|
||||
SIENA_STAT_tx_single_collision,
|
||||
SIENA_STAT_tx_multiple_collision,
|
||||
SIENA_STAT_tx_excessive_collision,
|
||||
SIENA_STAT_tx_deferred,
|
||||
SIENA_STAT_tx_late_collision,
|
||||
SIENA_STAT_tx_excessive_deferred,
|
||||
SIENA_STAT_tx_non_tcpudp,
|
||||
SIENA_STAT_tx_mac_src_error,
|
||||
SIENA_STAT_tx_ip_src_error,
|
||||
SIENA_STAT_rx_bytes,
|
||||
SIENA_STAT_rx_good_bytes,
|
||||
SIENA_STAT_rx_bad_bytes,
|
||||
SIENA_STAT_rx_packets,
|
||||
SIENA_STAT_rx_good,
|
||||
SIENA_STAT_rx_bad,
|
||||
SIENA_STAT_rx_pause,
|
||||
SIENA_STAT_rx_control,
|
||||
SIENA_STAT_rx_unicast,
|
||||
SIENA_STAT_rx_multicast,
|
||||
SIENA_STAT_rx_broadcast,
|
||||
SIENA_STAT_rx_lt64,
|
||||
SIENA_STAT_rx_64,
|
||||
SIENA_STAT_rx_65_to_127,
|
||||
SIENA_STAT_rx_128_to_255,
|
||||
SIENA_STAT_rx_256_to_511,
|
||||
SIENA_STAT_rx_512_to_1023,
|
||||
SIENA_STAT_rx_1024_to_15xx,
|
||||
SIENA_STAT_rx_15xx_to_jumbo,
|
||||
SIENA_STAT_rx_gtjumbo,
|
||||
SIENA_STAT_rx_bad_gtjumbo,
|
||||
SIENA_STAT_rx_overflow,
|
||||
SIENA_STAT_rx_false_carrier,
|
||||
SIENA_STAT_rx_symbol_error,
|
||||
SIENA_STAT_rx_align_error,
|
||||
SIENA_STAT_rx_length_error,
|
||||
SIENA_STAT_rx_internal_error,
|
||||
SIENA_STAT_rx_nodesc_drop_cnt,
|
||||
SIENA_STAT_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct siena_nic_data - Siena NIC state
|
||||
* @wol_filter_id: Wake-on-LAN packet filter id
|
||||
* @stats: Hardware statistics
|
||||
*/
|
||||
struct siena_nic_data {
|
||||
int wol_filter_id;
|
||||
u64 stats[SIENA_STAT_COUNT];
|
||||
};
|
||||
|
||||
enum {
|
||||
EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
|
||||
EF10_STAT_tx_packets,
|
||||
EF10_STAT_tx_pause,
|
||||
EF10_STAT_tx_control,
|
||||
EF10_STAT_tx_unicast,
|
||||
EF10_STAT_tx_multicast,
|
||||
EF10_STAT_tx_broadcast,
|
||||
EF10_STAT_tx_lt64,
|
||||
EF10_STAT_tx_64,
|
||||
EF10_STAT_tx_65_to_127,
|
||||
EF10_STAT_tx_128_to_255,
|
||||
EF10_STAT_tx_256_to_511,
|
||||
EF10_STAT_tx_512_to_1023,
|
||||
EF10_STAT_tx_1024_to_15xx,
|
||||
EF10_STAT_tx_15xx_to_jumbo,
|
||||
EF10_STAT_rx_bytes,
|
||||
EF10_STAT_rx_bytes_minus_good_bytes,
|
||||
EF10_STAT_rx_good_bytes,
|
||||
EF10_STAT_rx_bad_bytes,
|
||||
EF10_STAT_rx_packets,
|
||||
EF10_STAT_rx_good,
|
||||
EF10_STAT_rx_bad,
|
||||
EF10_STAT_rx_pause,
|
||||
EF10_STAT_rx_control,
|
||||
EF10_STAT_rx_unicast,
|
||||
EF10_STAT_rx_multicast,
|
||||
EF10_STAT_rx_broadcast,
|
||||
EF10_STAT_rx_lt64,
|
||||
EF10_STAT_rx_64,
|
||||
EF10_STAT_rx_65_to_127,
|
||||
EF10_STAT_rx_128_to_255,
|
||||
EF10_STAT_rx_256_to_511,
|
||||
EF10_STAT_rx_512_to_1023,
|
||||
EF10_STAT_rx_1024_to_15xx,
|
||||
EF10_STAT_rx_15xx_to_jumbo,
|
||||
EF10_STAT_rx_gtjumbo,
|
||||
EF10_STAT_rx_bad_gtjumbo,
|
||||
EF10_STAT_rx_overflow,
|
||||
EF10_STAT_rx_align_error,
|
||||
EF10_STAT_rx_length_error,
|
||||
EF10_STAT_rx_nodesc_drops,
|
||||
EF10_STAT_rx_pm_trunc_bb_overflow,
|
||||
EF10_STAT_rx_pm_discard_bb_overflow,
|
||||
EF10_STAT_rx_pm_trunc_vfifo_full,
|
||||
EF10_STAT_rx_pm_discard_vfifo_full,
|
||||
EF10_STAT_rx_pm_trunc_qbb,
|
||||
EF10_STAT_rx_pm_discard_qbb,
|
||||
EF10_STAT_rx_pm_discard_mapping,
|
||||
EF10_STAT_rx_dp_q_disabled_packets,
|
||||
EF10_STAT_rx_dp_di_dropped_packets,
|
||||
EF10_STAT_rx_dp_streaming_packets,
|
||||
EF10_STAT_rx_dp_hlb_fetch,
|
||||
EF10_STAT_rx_dp_hlb_wait,
|
||||
EF10_STAT_COUNT
|
||||
};
|
||||
|
||||
/* Maximum number of TX PIO buffers we may allocate to a function.
|
||||
* This matches the total number of buffers on each SFC9100-family
|
||||
* controller.
|
||||
*/
|
||||
#define EF10_TX_PIOBUF_COUNT 16
|
||||
|
||||
/**
|
||||
* struct efx_ef10_nic_data - EF10 architecture NIC state
|
||||
* @mcdi_buf: DMA buffer for MCDI
|
||||
* @warm_boot_count: Last seen MC warm boot count
|
||||
* @vi_base: Absolute index of first VI in this function
|
||||
* @n_allocated_vis: Number of VIs allocated to this function
|
||||
* @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
|
||||
* @must_restore_filters: Flag: filters have yet to be restored after MC reboot
|
||||
* @n_piobufs: Number of PIO buffers allocated to this function
|
||||
* @wc_membase: Base address of write-combining mapping of the memory BAR
|
||||
* @pio_write_base: Base address for writing PIO buffers
|
||||
* @pio_write_vi_base: Relative VI number for @pio_write_base
|
||||
* @piobuf_handle: Handle of each PIO buffer allocated
|
||||
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
|
||||
* reboot
|
||||
* @rx_rss_context: Firmware handle for our RSS context
|
||||
* @stats: Hardware statistics
|
||||
* @workaround_35388: Flag: firmware supports workaround for bug 35388
|
||||
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
|
||||
* after MC reboot
|
||||
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
|
||||
* %MC_CMD_GET_CAPABILITIES response)
|
||||
*/
|
||||
struct efx_ef10_nic_data {
|
||||
struct efx_buffer mcdi_buf;
|
||||
u16 warm_boot_count;
|
||||
unsigned int vi_base;
|
||||
unsigned int n_allocated_vis;
|
||||
bool must_realloc_vis;
|
||||
bool must_restore_filters;
|
||||
unsigned int n_piobufs;
|
||||
void __iomem *wc_membase, *pio_write_base;
|
||||
unsigned int pio_write_vi_base;
|
||||
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
|
||||
bool must_restore_piobufs;
|
||||
u32 rx_rss_context;
|
||||
u64 stats[EF10_STAT_COUNT];
|
||||
bool workaround_35388;
|
||||
bool must_check_datapath_caps;
|
||||
u32 datapath_caps;
|
||||
};
|
||||
|
||||
/*
|
||||
* On the SFC9000 family each port is associated with 1 PCI physical
|
||||
* function (PF) handled by sfc and a configurable number of virtual
|
||||
* functions (VFs) that may be handled by some other driver, often in
|
||||
* a VM guest. The queue pointer registers are mapped in both PF and
|
||||
* VF BARs such that an 8K region provides access to a single RX, TX
|
||||
* and event queue (collectively a Virtual Interface, VI or VNIC).
|
||||
*
|
||||
* The PF has access to all 1024 VIs while VFs are mapped to VIs
|
||||
* according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
|
||||
* in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
|
||||
* The number of VIs and the VI_SCALE value are configurable but must
|
||||
* be established at boot time by firmware.
|
||||
*/
|
||||
|
||||
/* Maximum VI_SCALE parameter supported by Siena */
|
||||
#define EFX_VI_SCALE_MAX 6
|
||||
/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
|
||||
* so this is the smallest allowed value. */
|
||||
#define EFX_VI_BASE 128U
|
||||
/* Maximum number of VFs allowed */
|
||||
#define EFX_VF_COUNT_MAX 127
|
||||
/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
|
||||
#define EFX_MAX_VF_EVQ_SIZE 8192UL
|
||||
/* The number of buffer table entries reserved for each VI on a VF */
|
||||
#define EFX_VF_BUFTBL_PER_VI \
|
||||
((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
|
||||
sizeof(efx_qword_t) / EFX_BUF_SIZE)
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
|
||||
static inline bool efx_sriov_wanted(struct efx_nic *efx)
|
||||
{
|
||||
return efx->vf_count != 0;
|
||||
}
|
||||
static inline bool efx_sriov_enabled(struct efx_nic *efx)
|
||||
{
|
||||
return efx->vf_init_count != 0;
|
||||
}
|
||||
static inline unsigned int efx_vf_size(struct efx_nic *efx)
|
||||
{
|
||||
return 1 << efx->vi_scale;
|
||||
}
|
||||
|
||||
int efx_init_sriov(void);
|
||||
void efx_sriov_probe(struct efx_nic *efx);
|
||||
int efx_sriov_init(struct efx_nic *efx);
|
||||
void efx_sriov_mac_address_changed(struct efx_nic *efx);
|
||||
void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
|
||||
void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
|
||||
void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
|
||||
void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
|
||||
void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
|
||||
void efx_sriov_reset(struct efx_nic *efx);
|
||||
void efx_sriov_fini(struct efx_nic *efx);
|
||||
void efx_fini_sriov(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
|
||||
static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
|
||||
static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
|
||||
|
||||
static inline int efx_init_sriov(void) { return 0; }
|
||||
static inline void efx_sriov_probe(struct efx_nic *efx) {}
|
||||
static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
|
||||
static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
|
||||
static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
|
||||
efx_qword_t *event) {}
|
||||
static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
|
||||
efx_qword_t *event) {}
|
||||
static inline void efx_sriov_event(struct efx_channel *channel,
|
||||
efx_qword_t *event) {}
|
||||
static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
|
||||
static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
|
||||
static inline void efx_sriov_reset(struct efx_nic *efx) {}
|
||||
static inline void efx_sriov_fini(struct efx_nic *efx) {}
|
||||
static inline void efx_fini_sriov(void) {}
|
||||
|
||||
#endif
|
||||
|
||||
int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
|
||||
int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
|
||||
int efx_sriov_get_vf_config(struct net_device *dev, int vf,
|
||||
struct ifla_vf_info *ivf);
|
||||
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
|
||||
bool spoofchk);
|
||||
|
||||
struct ethtool_ts_info;
|
||||
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
|
||||
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
|
||||
void efx_ptp_remove(struct efx_nic *efx);
|
||||
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
|
||||
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
int efx_ptp_get_mode(struct efx_nic *efx);
|
||||
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
|
||||
unsigned int new_mode);
|
||||
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
|
||||
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
|
||||
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
|
||||
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb);
|
||||
static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (channel->sync_events_state == SYNC_EVENTS_VALID)
|
||||
__efx_rx_skb_attach_timestamp(channel, skb);
|
||||
}
|
||||
void efx_ptp_start_datapath(struct efx_nic *efx);
|
||||
void efx_ptp_stop_datapath(struct efx_nic *efx);
|
||||
|
||||
extern const struct efx_nic_type falcon_a1_nic_type;
|
||||
extern const struct efx_nic_type falcon_b0_nic_type;
|
||||
extern const struct efx_nic_type siena_a0_nic_type;
|
||||
extern const struct efx_nic_type efx_hunt_a0_nic_type;
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Externs
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
/* TX data path */
|
||||
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return tx_queue->efx->type->tx_probe(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_init(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_write(tx_queue);
|
||||
}
|
||||
|
||||
/* RX data path */
|
||||
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
return rx_queue->efx->type->rx_probe(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_init(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_remove(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_write(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_defer_refill(rx_queue);
|
||||
}
|
||||
|
||||
/* Event data path */
|
||||
static inline int efx_nic_probe_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_probe(channel);
|
||||
}
|
||||
static inline int efx_nic_init_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_init(channel);
|
||||
}
|
||||
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_fini(channel);
|
||||
}
|
||||
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_remove(channel);
|
||||
}
|
||||
static inline int
|
||||
efx_nic_process_eventq(struct efx_channel *channel, int quota)
|
||||
{
|
||||
return channel->efx->type->ev_process(channel, quota);
|
||||
}
|
||||
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_read_ack(channel);
|
||||
}
|
||||
void efx_nic_event_test_start(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
|
||||
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
|
||||
int efx_farch_ev_probe(struct efx_channel *channel);
|
||||
int efx_farch_ev_init(struct efx_channel *channel);
|
||||
void efx_farch_ev_fini(struct efx_channel *channel);
|
||||
void efx_farch_ev_remove(struct efx_channel *channel);
|
||||
int efx_farch_ev_process(struct efx_channel *channel, int quota);
|
||||
void efx_farch_ev_read_ack(struct efx_channel *channel);
|
||||
void efx_farch_ev_test_generate(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena filter operations */
|
||||
int efx_farch_filter_table_probe(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_restore(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_remove(struct efx_nic *efx);
|
||||
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
|
||||
bool replace);
|
||||
int efx_farch_filter_remove_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
int efx_farch_filter_get_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 filter_id,
|
||||
struct efx_filter_spec *);
|
||||
int efx_farch_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 *buf,
|
||||
u32 size);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec);
|
||||
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int index);
|
||||
#endif
|
||||
void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
|
||||
|
||||
bool efx_nic_event_present(struct efx_channel *channel);
|
||||
|
||||
/* Some statistics are computed as A - B where A and B each increase
|
||||
* linearly with some hardware counter(s) and the counters are read
|
||||
* asynchronously. If the counters contributing to B are always read
|
||||
* after those contributing to A, the computed value may be lower than
|
||||
* the true value by some variable amount, and may decrease between
|
||||
* subsequent computations.
|
||||
*
|
||||
* We should never allow statistics to decrease or to exceed the true
|
||||
* value. Since the computed value will never be greater than the
|
||||
* true value, we can achieve this by only storing the computed value
|
||||
* when it increases.
|
||||
*/
|
||||
static inline void efx_update_diff_stat(u64 *stat, u64 diff)
|
||||
{
|
||||
if ((s64)(diff - *stat) > 0)
|
||||
*stat = diff;
|
||||
}
|
||||
|
||||
/* Interrupts */
|
||||
int efx_nic_init_interrupt(struct efx_nic *efx);
|
||||
void efx_nic_irq_test_start(struct efx_nic *efx);
|
||||
void efx_nic_fini_interrupt(struct efx_nic *efx);
|
||||
|
||||
/* Falcon/Siena interrupts */
|
||||
void efx_farch_irq_enable_master(struct efx_nic *efx);
|
||||
void efx_farch_irq_test_generate(struct efx_nic *efx);
|
||||
void efx_farch_irq_disable_master(struct efx_nic *efx);
|
||||
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||
{
|
||||
return ACCESS_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
||||
{
|
||||
return ACCESS_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
int efx_nic_flush_queues(struct efx_nic *efx);
|
||||
void siena_prepare_flush(struct efx_nic *efx);
|
||||
int efx_farch_fini_dmaq(struct efx_nic *efx);
|
||||
void efx_farch_finish_flr(struct efx_nic *efx);
|
||||
void siena_finish_flush(struct efx_nic *efx);
|
||||
void falcon_start_nic_stats(struct efx_nic *efx);
|
||||
void falcon_stop_nic_stats(struct efx_nic *efx);
|
||||
int falcon_reset_xaui(struct efx_nic *efx);
|
||||
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
|
||||
void efx_farch_init_common(struct efx_nic *efx);
|
||||
void efx_ef10_handle_drain_event(struct efx_nic *efx);
|
||||
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
|
||||
|
||||
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags);
|
||||
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
|
||||
|
||||
/* Tests */
|
||||
struct efx_farch_register_test {
|
||||
unsigned address;
|
||||
efx_oword_t mask;
|
||||
};
|
||||
int efx_farch_test_registers(struct efx_nic *efx,
|
||||
const struct efx_farch_register_test *regs,
|
||||
size_t n_regs);
|
||||
|
||||
size_t efx_nic_get_regs_len(struct efx_nic *efx);
|
||||
void efx_nic_get_regs(struct efx_nic *efx, void *buf);
|
||||
|
||||
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names);
|
||||
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u64 *stats,
|
||||
const void *dma_buf, bool accumulate);
|
||||
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
|
||||
|
||||
#define EFX_MAX_FLUSH_TIME 5000
|
||||
|
||||
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
|
||||
efx_qword_t *event);
|
||||
|
||||
#endif /* EFX_NIC_H */
|
||||
50
drivers/net/ethernet/sfc/phy.h
Normal file
50
drivers/net/ethernet/sfc/phy.h
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2007-2010 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_PHY_H
|
||||
#define EFX_PHY_H
|
||||
|
||||
/****************************************************************************
|
||||
* 10Xpress (SFX7101) PHY
|
||||
*/
|
||||
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
|
||||
|
||||
void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
|
||||
|
||||
/****************************************************************************
|
||||
* AMCC/Quake QT202x PHYs
|
||||
*/
|
||||
extern const struct efx_phy_operations falcon_qt202x_phy_ops;
|
||||
|
||||
/* These PHYs provide various H/W control states for LEDs */
|
||||
#define QUAKE_LED_LINK_INVAL (0)
|
||||
#define QUAKE_LED_LINK_STAT (1)
|
||||
#define QUAKE_LED_LINK_ACT (2)
|
||||
#define QUAKE_LED_LINK_ACTSTAT (3)
|
||||
#define QUAKE_LED_OFF (4)
|
||||
#define QUAKE_LED_ON (5)
|
||||
#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
|
||||
/* What link the LED tracks */
|
||||
#define QUAKE_LED_TXLINK (0)
|
||||
#define QUAKE_LED_RXLINK (8)
|
||||
|
||||
void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
|
||||
|
||||
/****************************************************************************
|
||||
* Transwitch CX4 retimer
|
||||
*/
|
||||
extern const struct efx_phy_operations falcon_txc_phy_ops;
|
||||
|
||||
#define TXC_GPIO_DIR_INPUT 0
|
||||
#define TXC_GPIO_DIR_OUTPUT 1
|
||||
|
||||
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
|
||||
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
|
||||
|
||||
#endif
|
||||
1939
drivers/net/ethernet/sfc/ptp.c
Normal file
1939
drivers/net/ethernet/sfc/ptp.c
Normal file
File diff suppressed because it is too large
Load diff
495
drivers/net/ethernet/sfc/qt202x_phy.c
Normal file
495
drivers/net/ethernet/sfc/qt202x_phy.c
Normal file
|
|
@ -0,0 +1,495 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
/*
|
||||
* Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/delay.h>
|
||||
#include "efx.h"
|
||||
#include "mdio_10g.h"
|
||||
#include "phy.h"
|
||||
#include "nic.h"
|
||||
|
||||
#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
|
||||
MDIO_DEVS_PMAPMD | \
|
||||
MDIO_DEVS_PHYXS)
|
||||
|
||||
#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
|
||||
(1 << LOOPBACK_PMAPMD) | \
|
||||
(1 << LOOPBACK_PHYXS_WS))
|
||||
|
||||
/****************************************************************************/
|
||||
/* Quake-specific MDIO registers */
|
||||
#define MDIO_QUAKE_LED0_REG (0xD006)
|
||||
|
||||
/* QT2025C only */
|
||||
#define PCS_FW_HEARTBEAT_REG 0xd7ee
|
||||
#define PCS_FW_HEARTB_LBN 0
|
||||
#define PCS_FW_HEARTB_WIDTH 8
|
||||
#define PCS_FW_PRODUCT_CODE_1 0xd7f0
|
||||
#define PCS_FW_VERSION_1 0xd7f3
|
||||
#define PCS_FW_BUILD_1 0xd7f6
|
||||
#define PCS_UC8051_STATUS_REG 0xd7fd
|
||||
#define PCS_UC_STATUS_LBN 0
|
||||
#define PCS_UC_STATUS_WIDTH 8
|
||||
#define PCS_UC_STATUS_FW_SAVE 0x20
|
||||
#define PMA_PMD_MODE_REG 0xc301
|
||||
#define PMA_PMD_RXIN_SEL_LBN 6
|
||||
#define PMA_PMD_FTX_CTRL2_REG 0xc309
|
||||
#define PMA_PMD_FTX_STATIC_LBN 13
|
||||
#define PMA_PMD_VEND1_REG 0xc001
|
||||
#define PMA_PMD_VEND1_LBTXD_LBN 15
|
||||
#define PCS_VEND1_REG 0xc000
|
||||
#define PCS_VEND1_LBTXD_LBN 5
|
||||
|
||||
void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
|
||||
{
|
||||
int addr = MDIO_QUAKE_LED0_REG + led;
|
||||
efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
|
||||
}
|
||||
|
||||
struct qt202x_phy_data {
|
||||
enum efx_phy_mode phy_mode;
|
||||
bool bug17190_in_bad_state;
|
||||
unsigned long bug17190_timer;
|
||||
u32 firmware_ver;
|
||||
};
|
||||
|
||||
#define QT2022C2_MAX_RESET_TIME 500
|
||||
#define QT2022C2_RESET_WAIT 10
|
||||
|
||||
#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
|
||||
#define QT2025C_HEARTB_WAIT 100
|
||||
#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
|
||||
#define QT2025C_FWSTART_WAIT 100
|
||||
|
||||
#define BUG17190_INTERVAL (2 * HZ)
|
||||
|
||||
static int qt2025c_wait_heartbeat(struct efx_nic *efx)
|
||||
{
|
||||
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
|
||||
int reg, old_counter = 0;
|
||||
|
||||
/* Wait for firmware heartbeat to start */
|
||||
for (;;) {
|
||||
int counter;
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
counter = ((reg >> PCS_FW_HEARTB_LBN) &
|
||||
((1 << PCS_FW_HEARTB_WIDTH) - 1));
|
||||
if (old_counter == 0)
|
||||
old_counter = counter;
|
||||
else if (counter != old_counter)
|
||||
break;
|
||||
if (time_after(jiffies, timeout)) {
|
||||
/* Some cables have EEPROMs that conflict with the
|
||||
* PHY's on-board EEPROM so it cannot load firmware */
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"If an SFP+ direct attach cable is"
|
||||
" connected, please check that it complies"
|
||||
" with the SFP+ specification\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
msleep(QT2025C_HEARTB_WAIT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
|
||||
{
|
||||
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
|
||||
int reg;
|
||||
|
||||
/* Wait for firmware status to look good */
|
||||
for (;;) {
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
if ((reg &
|
||||
((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
|
||||
PCS_UC_STATUS_FW_SAVE)
|
||||
break;
|
||||
if (time_after(jiffies, timeout))
|
||||
return -ETIMEDOUT;
|
||||
msleep(QT2025C_FWSTART_WAIT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qt2025c_restart_firmware(struct efx_nic *efx)
|
||||
{
|
||||
/* Restart microcontroller execution of firmware from RAM */
|
||||
efx_mdio_write(efx, 3, 0xe854, 0x00c0);
|
||||
efx_mdio_write(efx, 3, 0xe854, 0x0040);
|
||||
msleep(50);
|
||||
}
|
||||
|
||||
static int qt2025c_wait_reset(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = qt2025c_wait_heartbeat(efx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
rc = qt2025c_wait_fw_status_good(efx);
|
||||
if (rc == -ETIMEDOUT) {
|
||||
/* Bug 17689: occasionally heartbeat starts but firmware status
|
||||
* code never progresses beyond 0x00. Try again, once, after
|
||||
* restarting execution of the firmware image. */
|
||||
netif_dbg(efx, hw, efx->net_dev,
|
||||
"bashing QT2025C microcontroller\n");
|
||||
qt2025c_restart_firmware(efx);
|
||||
rc = qt2025c_wait_heartbeat(efx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
rc = qt2025c_wait_fw_status_good(efx);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qt2025c_firmware_id(struct efx_nic *efx)
|
||||
{
|
||||
struct qt202x_phy_data *phy_data = efx->phy_data;
|
||||
u8 firmware_id[9];
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(firmware_id); i++)
|
||||
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
|
||||
PCS_FW_PRODUCT_CODE_1 + i);
|
||||
netif_info(efx, probe, efx->net_dev,
|
||||
"QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
|
||||
(firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
|
||||
firmware_id[3] >> 4, firmware_id[3] & 0xf,
|
||||
firmware_id[4], firmware_id[5],
|
||||
firmware_id[6], firmware_id[7], firmware_id[8]);
|
||||
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
|
||||
((firmware_id[3] & 0x0f) << 16) |
|
||||
(firmware_id[4] << 8) | firmware_id[5];
|
||||
}
|
||||
|
||||
static void qt2025c_bug17190_workaround(struct efx_nic *efx)
|
||||
{
|
||||
struct qt202x_phy_data *phy_data = efx->phy_data;
|
||||
|
||||
/* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
|
||||
* layers up, but PCS down (no block_lock). If we notice this state
|
||||
* persisting for a couple of seconds, we switch PMA/PMD loopback
|
||||
* briefly on and then off again, which is normally sufficient to
|
||||
* recover it.
|
||||
*/
|
||||
if (efx->link_state.up ||
|
||||
!efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
|
||||
phy_data->bug17190_in_bad_state = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!phy_data->bug17190_in_bad_state) {
|
||||
phy_data->bug17190_in_bad_state = true;
|
||||
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
|
||||
netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
|
||||
MDIO_PMA_CTRL1_LOOPBACK, true);
|
||||
msleep(100);
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
|
||||
MDIO_PMA_CTRL1_LOOPBACK, false);
|
||||
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int qt2025c_select_phy_mode(struct efx_nic *efx)
|
||||
{
|
||||
struct qt202x_phy_data *phy_data = efx->phy_data;
|
||||
struct falcon_board *board = falcon_board(efx);
|
||||
int reg, rc, i;
|
||||
uint16_t phy_op_mode;
|
||||
|
||||
/* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
|
||||
* Self-Configure mode. Don't attempt any switching if we encounter
|
||||
* older firmware. */
|
||||
if (phy_data->firmware_ver < 0x02000100)
|
||||
return 0;
|
||||
|
||||
/* In general we will get optimal behaviour in "SFP+ Self-Configure"
|
||||
* mode; however, that powers down most of the PHY when no module is
|
||||
* present, so we must use a different mode (any fixed mode will do)
|
||||
* to be sure that loopbacks will work. */
|
||||
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
|
||||
|
||||
/* Only change mode if really necessary */
|
||||
reg = efx_mdio_read(efx, 1, 0xc319);
|
||||
if ((reg & 0x0038) == phy_op_mode)
|
||||
return 0;
|
||||
netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
|
||||
phy_op_mode);
|
||||
|
||||
/* This sequence replicates the register writes configured in the boot
|
||||
* EEPROM (including the differences between board revisions), except
|
||||
* that the operating mode is changed, and the PHY is prevented from
|
||||
* unnecessarily reloading the main firmware image again. */
|
||||
efx_mdio_write(efx, 1, 0xc300, 0x0000);
|
||||
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
|
||||
* STOPs onto the firmware/module I2C bus to reset it, varies across
|
||||
* board revisions, as the bus is connected to different GPIO/LED
|
||||
* outputs on the PHY.) */
|
||||
if (board->major == 0 && board->minor < 2) {
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4498);
|
||||
for (i = 0; i < 9; i++) {
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4488);
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4480);
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4490);
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4498);
|
||||
}
|
||||
} else {
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x0920);
|
||||
efx_mdio_write(efx, 1, 0xd008, 0x0004);
|
||||
for (i = 0; i < 9; i++) {
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x0900);
|
||||
efx_mdio_write(efx, 1, 0xd008, 0x0005);
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x0920);
|
||||
efx_mdio_write(efx, 1, 0xd008, 0x0004);
|
||||
}
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4900);
|
||||
}
|
||||
efx_mdio_write(efx, 1, 0xc303, 0x4900);
|
||||
efx_mdio_write(efx, 1, 0xc302, 0x0004);
|
||||
efx_mdio_write(efx, 1, 0xc316, 0x0013);
|
||||
efx_mdio_write(efx, 1, 0xc318, 0x0054);
|
||||
efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
|
||||
efx_mdio_write(efx, 1, 0xc31a, 0x0098);
|
||||
efx_mdio_write(efx, 3, 0x0026, 0x0e00);
|
||||
efx_mdio_write(efx, 3, 0x0027, 0x0013);
|
||||
efx_mdio_write(efx, 3, 0x0028, 0xa528);
|
||||
efx_mdio_write(efx, 1, 0xd006, 0x000a);
|
||||
efx_mdio_write(efx, 1, 0xd007, 0x0009);
|
||||
efx_mdio_write(efx, 1, 0xd008, 0x0004);
|
||||
/* This additional write is not present in the boot EEPROM. It
|
||||
* prevents the PHY's internal boot ROM doing another pointless (and
|
||||
* slow) reload of the firmware image (the microcontroller's code
|
||||
* memory is not affected by the microcontroller reset). */
|
||||
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
|
||||
/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
|
||||
* restart doesn't reset it. We need to do that ourselves. */
|
||||
efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
|
||||
1 << PMA_PMD_RXIN_SEL_LBN, false);
|
||||
efx_mdio_write(efx, 1, 0xc300, 0x0002);
|
||||
msleep(20);
|
||||
|
||||
/* Restart microcontroller execution of firmware from RAM */
|
||||
qt2025c_restart_firmware(efx);
|
||||
|
||||
/* Wait for the microcontroller to be ready again */
|
||||
rc = qt2025c_wait_reset(efx);
|
||||
if (rc < 0) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"PHY microcontroller reset during mode switch "
|
||||
"timed out\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt202x_reset_phy(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C) {
|
||||
/* Wait for the reset triggered by falcon_reset_hw()
|
||||
* to complete */
|
||||
rc = qt2025c_wait_reset(efx);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
/* Reset the PHYXS MMD. This is documented as doing
|
||||
* a complete soft reset. */
|
||||
rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
|
||||
QT2022C2_MAX_RESET_TIME /
|
||||
QT2022C2_RESET_WAIT,
|
||||
QT2022C2_RESET_WAIT);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Wait 250ms for the PHY to complete bootup */
|
||||
msleep(250);
|
||||
|
||||
falcon_board(efx)->type->init_phy(efx);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qt202x_phy_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct qt202x_phy_data *phy_data;
|
||||
|
||||
phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
|
||||
if (!phy_data)
|
||||
return -ENOMEM;
|
||||
efx->phy_data = phy_data;
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
phy_data->bug17190_in_bad_state = false;
|
||||
phy_data->bug17190_timer = 0;
|
||||
|
||||
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
|
||||
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
|
||||
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt202x_phy_init(struct efx_nic *efx)
|
||||
{
|
||||
u32 devid;
|
||||
int rc;
|
||||
|
||||
rc = qt202x_reset_phy(efx);
|
||||
if (rc) {
|
||||
netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
|
||||
netif_info(efx, probe, efx->net_dev,
|
||||
"PHY ID reg %x (OUI %06x model %02x revision %x)\n",
|
||||
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
|
||||
efx_mdio_id_rev(devid));
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C)
|
||||
qt2025c_firmware_id(efx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt202x_link_ok(struct efx_nic *efx)
|
||||
{
|
||||
return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
|
||||
}
|
||||
|
||||
static bool qt202x_phy_poll(struct efx_nic *efx)
|
||||
{
|
||||
bool was_up = efx->link_state.up;
|
||||
|
||||
efx->link_state.up = qt202x_link_ok(efx);
|
||||
efx->link_state.speed = 10000;
|
||||
efx->link_state.fd = true;
|
||||
efx->link_state.fc = efx->wanted_fc;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C)
|
||||
qt2025c_bug17190_workaround(efx);
|
||||
|
||||
return efx->link_state.up != was_up;
|
||||
}
|
||||
|
||||
static int qt202x_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct qt202x_phy_data *phy_data = efx->phy_data;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C) {
|
||||
int rc = qt2025c_select_phy_mode(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* There are several different register bits which can
|
||||
* disable TX (and save power) on direct-attach cables
|
||||
* or optical transceivers, varying somewhat between
|
||||
* firmware versions. Only 'static mode' appears to
|
||||
* cover everything. */
|
||||
mdio_set_flag(
|
||||
&efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
|
||||
PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
|
||||
efx->phy_mode & PHY_MODE_TX_DISABLED ||
|
||||
efx->phy_mode & PHY_MODE_LOW_POWER ||
|
||||
efx->loopback_mode == LOOPBACK_PCS ||
|
||||
efx->loopback_mode == LOOPBACK_PMAPMD);
|
||||
} else {
|
||||
/* Reset the PHY when moving from tx off to tx on */
|
||||
if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
|
||||
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
|
||||
qt202x_reset_phy(efx);
|
||||
|
||||
efx_mdio_transmit_disable(efx);
|
||||
}
|
||||
|
||||
efx_mdio_phy_reconfigure(efx);
|
||||
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
mdio45_ethtool_gset(&efx->mdio, ecmd);
|
||||
}
|
||||
|
||||
static void qt202x_phy_remove(struct efx_nic *efx)
|
||||
{
|
||||
/* Free the context block */
|
||||
kfree(efx->phy_data);
|
||||
efx->phy_data = NULL;
|
||||
}
|
||||
|
||||
static int qt202x_phy_get_module_info(struct efx_nic *efx,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
modinfo->type = ETH_MODULE_SFF_8079;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
|
||||
struct ethtool_eeprom *ee, u8 *data)
|
||||
{
|
||||
int mmd, reg_base, rc, i;
|
||||
|
||||
if (efx->phy_type == PHY_TYPE_QT2025C) {
|
||||
mmd = MDIO_MMD_PCS;
|
||||
reg_base = 0xd000;
|
||||
} else {
|
||||
mmd = MDIO_MMD_PMAPMD;
|
||||
reg_base = 0x8007;
|
||||
}
|
||||
|
||||
for (i = 0; i < ee->len; i++) {
|
||||
rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
data[i] = rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct efx_phy_operations falcon_qt202x_phy_ops = {
|
||||
.probe = qt202x_phy_probe,
|
||||
.init = qt202x_phy_init,
|
||||
.reconfigure = qt202x_phy_reconfigure,
|
||||
.poll = qt202x_phy_poll,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.remove = qt202x_phy_remove,
|
||||
.get_settings = qt202x_phy_get_settings,
|
||||
.set_settings = efx_mdio_set_settings,
|
||||
.test_alive = efx_mdio_test_alive,
|
||||
.get_module_eeprom = qt202x_phy_get_module_eeprom,
|
||||
.get_module_info = qt202x_phy_get_module_info,
|
||||
};
|
||||
989
drivers/net/ethernet/sfc/rx.c
Normal file
989
drivers/net/ethernet/sfc/rx.c
Normal file
|
|
@ -0,0 +1,989 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/checksum.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
||||
* ring, this number is divided by the number of buffers per page to calculate
|
||||
* the number of pages to store in the RX page recycle ring.
|
||||
*/
|
||||
#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
|
||||
#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
|
||||
|
||||
/* Size of buffer allocated for skb header area. */
|
||||
#define EFX_SKB_HEADERS 128u
|
||||
|
||||
/* This is the percentage fill level below which new RX descriptors
|
||||
* will be added to the RX descriptor ring.
|
||||
*/
|
||||
static unsigned int rx_refill_threshold;
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
/*
|
||||
* RX maximum head room required.
|
||||
*
|
||||
* This must be at least 1 to prevent overflow, plus one packet-worth
|
||||
* to allow pipelined receives.
|
||||
*/
|
||||
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
|
||||
|
||||
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
||||
{
|
||||
return page_address(buf->page) + buf->page_offset;
|
||||
}
|
||||
|
||||
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
|
||||
#else
|
||||
const u8 *data = eh + efx->rx_packet_hash_offset;
|
||||
return (u32)data[0] |
|
||||
(u32)data[1] << 8 |
|
||||
(u32)data[2] << 16 |
|
||||
(u32)data[3] << 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct efx_rx_buffer *
|
||||
efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
|
||||
return efx_rx_buffer(rx_queue, 0);
|
||||
else
|
||||
return rx_buf + 1;
|
||||
}
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
{
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void efx_rx_config_page_split(struct efx_nic *efx)
|
||||
{
|
||||
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
|
||||
EFX_RX_BUF_ALIGNMENT);
|
||||
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
|
||||
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
|
||||
efx->rx_page_buf_step);
|
||||
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
|
||||
efx->rx_bufs_per_page;
|
||||
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
|
||||
efx->rx_bufs_per_page);
|
||||
}
|
||||
|
||||
/* Check the RX page recycle ring for a page that can be reused. */
|
||||
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct page *page;
|
||||
struct efx_rx_page_state *state;
|
||||
unsigned index;
|
||||
|
||||
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
|
||||
page = rx_queue->page_ring[index];
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
rx_queue->page_ring[index] = NULL;
|
||||
/* page_remove cannot exceed page_add. */
|
||||
if (rx_queue->page_remove != rx_queue->page_add)
|
||||
++rx_queue->page_remove;
|
||||
|
||||
/* If page_count is 1 then we hold the only reference to this page. */
|
||||
if (page_count(page) == 1) {
|
||||
++rx_queue->page_recycle_count;
|
||||
return page;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
++rx_queue->page_recycle_failed;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
|
||||
*
|
||||
* @rx_queue: Efx RX queue
|
||||
*
|
||||
* This allocates a batch of pages, maps them for DMA, and populates
|
||||
* struct efx_rx_buffers for each one. Return a negative error code or
|
||||
* 0 on success. If a single page can be used for multiple buffers,
|
||||
* then the page will either be inserted fully, or not at all.
|
||||
*/
|
||||
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
struct efx_rx_page_state *state;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned index, count;
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
page = efx_reuse_page(rx_queue);
|
||||
if (page == NULL) {
|
||||
page = alloc_pages(__GFP_COLD | __GFP_COMP |
|
||||
(atomic ? GFP_ATOMIC : GFP_KERNEL),
|
||||
efx->rx_buffer_order);
|
||||
if (unlikely(page == NULL))
|
||||
return -ENOMEM;
|
||||
dma_addr =
|
||||
dma_map_page(&efx->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
|
||||
dma_addr))) {
|
||||
__free_pages(page, efx->rx_buffer_order);
|
||||
return -EIO;
|
||||
}
|
||||
state = page_address(page);
|
||||
state->dma_addr = dma_addr;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_addr = state->dma_addr;
|
||||
}
|
||||
|
||||
dma_addr += sizeof(struct efx_rx_page_state);
|
||||
page_offset = sizeof(struct efx_rx_page_state);
|
||||
|
||||
do {
|
||||
index = rx_queue->added_count & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
|
||||
rx_buf->page = page;
|
||||
rx_buf->page_offset = page_offset + efx->rx_ip_align;
|
||||
rx_buf->len = efx->rx_dma_len;
|
||||
rx_buf->flags = 0;
|
||||
++rx_queue->added_count;
|
||||
get_page(page);
|
||||
dma_addr += efx->rx_page_buf_step;
|
||||
page_offset += efx->rx_page_buf_step;
|
||||
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
|
||||
|
||||
rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
|
||||
} while (++count < efx->rx_pages_per_batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unmap a DMA-mapped page. This function is only called for the final RX
|
||||
* buffer in a page.
|
||||
*/
|
||||
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct page *page = rx_buf->page;
|
||||
|
||||
if (page) {
|
||||
struct efx_rx_page_state *state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev,
|
||||
state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
if (rx_buf->page) {
|
||||
put_page(rx_buf->page);
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Attempt to recycle the page if there is an RX recycle ring; the page can
|
||||
* only be added if this is the final RX buffer, to prevent pages being used in
|
||||
* the descriptor ring and appearing in the recycle ring simultaneously.
|
||||
*/
|
||||
static void efx_recycle_rx_page(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct page *page = rx_buf->page;
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned index;
|
||||
|
||||
/* Only recycle the page after processing the final buffer. */
|
||||
if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
|
||||
return;
|
||||
|
||||
index = rx_queue->page_add & rx_queue->page_ptr_mask;
|
||||
if (rx_queue->page_ring[index] == NULL) {
|
||||
unsigned read_index = rx_queue->page_remove &
|
||||
rx_queue->page_ptr_mask;
|
||||
|
||||
/* The next slot in the recycle ring is available, but
|
||||
* increment page_remove if the read pointer currently
|
||||
* points here.
|
||||
*/
|
||||
if (read_index == index)
|
||||
++rx_queue->page_remove;
|
||||
rx_queue->page_ring[index] = page;
|
||||
++rx_queue->page_add;
|
||||
return;
|
||||
}
|
||||
++rx_queue->page_recycle_full;
|
||||
efx_unmap_rx_buffer(efx, rx_buf);
|
||||
put_page(rx_buf->page);
|
||||
}
|
||||
|
||||
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
/* Release the page reference we hold for the buffer. */
|
||||
if (rx_buf->page)
|
||||
put_page(rx_buf->page);
|
||||
|
||||
/* If this is the last buffer in a page, unmap and free it. */
|
||||
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
||||
efx_free_rx_buffer(rx_buf);
|
||||
}
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
|
||||
/* Recycle the pages that are used by buffers that have just been received. */
|
||||
static void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
do {
|
||||
efx_recycle_rx_page(channel, rx_buf);
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--n_frags);
|
||||
}
|
||||
|
||||
static void efx_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
do {
|
||||
efx_free_rx_buffer(rx_buf);
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--n_frags);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
||||
* @rx_queue: RX descriptor queue
|
||||
*
|
||||
* This will aim to fill the RX descriptor queue up to
|
||||
* @rx_queue->@max_fill. If there is insufficient atomic
|
||||
* memory to do so, a slow fill will be scheduled.
|
||||
*
|
||||
* The caller must provide serialisation (none is used here). In practise,
|
||||
* this means this function must run from the NAPI handler, or be called
|
||||
* when NAPI is disabled.
|
||||
*/
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int fill_level, batch_size;
|
||||
int space, rc = 0;
|
||||
|
||||
if (!rx_queue->refill_enabled)
|
||||
return;
|
||||
|
||||
/* Calculate current fill level, and exit if we don't need to fill */
|
||||
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
||||
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
|
||||
if (fill_level >= rx_queue->fast_fill_trigger)
|
||||
goto out;
|
||||
|
||||
/* Record minimum fill level */
|
||||
if (unlikely(fill_level < rx_queue->min_fill)) {
|
||||
if (fill_level)
|
||||
rx_queue->min_fill = fill_level;
|
||||
}
|
||||
|
||||
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
space = rx_queue->max_fill - fill_level;
|
||||
EFX_BUG_ON_PARANOID(space < batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d\n",
|
||||
efx_rx_queue_index(rx_queue), fill_level,
|
||||
rx_queue->max_fill);
|
||||
|
||||
|
||||
do {
|
||||
rc = efx_init_rx_buffers(rx_queue, atomic);
|
||||
if (unlikely(rc)) {
|
||||
/* Ensure that we don't leave the rx queue empty */
|
||||
if (rx_queue->added_count == rx_queue->removed_count)
|
||||
efx_schedule_slow_fill(rx_queue);
|
||||
goto out;
|
||||
}
|
||||
} while ((space -= batch_size) >= batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filled descriptor ring "
|
||||
"to level %d\n", efx_rx_queue_index(rx_queue),
|
||||
rx_queue->added_count - rx_queue->removed_count);
|
||||
|
||||
out:
|
||||
if (rx_queue->notified_count != rx_queue->added_count)
|
||||
efx_nic_notify_rx_desc(rx_queue);
|
||||
}
|
||||
|
||||
void efx_rx_slow_fill(unsigned long context)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
|
||||
|
||||
/* Post an event to cause NAPI to run and refill the queue */
|
||||
efx_nic_generate_fill_event(rx_queue);
|
||||
++rx_queue->slow_fill_count;
|
||||
}
|
||||
|
||||
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
int len)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
||||
|
||||
if (likely(len <= max_len))
|
||||
return;
|
||||
|
||||
/* The packet must be discarded, but this is only a fatal error
|
||||
* if the caller indicated it was
|
||||
*/
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
|
||||
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
" RX queue %d seriously overlength "
|
||||
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
|
||||
efx_rx_queue_index(rx_queue), len, max_len,
|
||||
efx->type->rx_buffer_padding);
|
||||
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
|
||||
} else {
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
" RX queue %d overlength RX event "
|
||||
"(0x%x > 0x%x)\n",
|
||||
efx_rx_queue_index(rx_queue), len, max_len);
|
||||
}
|
||||
|
||||
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
||||
}
|
||||
|
||||
/* Pass a received packet up through GRO. GRO can handle pages
|
||||
* regardless of checksum state and skbs with a good checksum.
|
||||
*/
|
||||
static void
|
||||
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh)
|
||||
{
|
||||
struct napi_struct *napi = &channel->napi_str;
|
||||
gro_result_t gro_result;
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = napi_get_frags(napi);
|
||||
if (unlikely(!skb)) {
|
||||
while (n_frags--) {
|
||||
put_page(rx_buf->page);
|
||||
rx_buf->page = NULL;
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (efx->net_dev->features & NETIF_F_RXHASH)
|
||||
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
|
||||
PKT_HASH_TYPE_L3);
|
||||
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
|
||||
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
|
||||
|
||||
for (;;) {
|
||||
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len);
|
||||
rx_buf->page = NULL;
|
||||
skb->len += rx_buf->len;
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
|
||||
skb->data_len = skb->len;
|
||||
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
||||
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
skb_mark_napi_id(skb, &channel->napi_str);
|
||||
gro_result = napi_gro_frags(napi);
|
||||
if (gro_result != GRO_DROP)
|
||||
channel->irq_mod_score += 2;
|
||||
}
|
||||
|
||||
/* Allocate and construct an SKB around page fragments */
|
||||
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags,
|
||||
u8 *eh, int hdr_len)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Allocate an SKB to store the headers */
|
||||
skb = netdev_alloc_skb(efx->net_dev,
|
||||
efx->rx_ip_align + efx->rx_prefix_size +
|
||||
hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
atomic_inc(&efx->n_rx_noskb_drops);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
|
||||
|
||||
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size + hdr_len);
|
||||
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
|
||||
__skb_put(skb, hdr_len);
|
||||
|
||||
/* Append the remaining page(s) onto the frag list */
|
||||
if (rx_buf->len > hdr_len) {
|
||||
rx_buf->page_offset += hdr_len;
|
||||
rx_buf->len -= hdr_len;
|
||||
|
||||
for (;;) {
|
||||
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len);
|
||||
rx_buf->page = NULL;
|
||||
skb->len += rx_buf->len;
|
||||
skb->data_len += rx_buf->len;
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
} else {
|
||||
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
||||
rx_buf->page = NULL;
|
||||
n_frags = 0;
|
||||
}
|
||||
|
||||
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
||||
|
||||
/* Move past the ethernet header */
|
||||
skb->protocol = eth_type_trans(skb, efx->net_dev);
|
||||
|
||||
skb_mark_napi_id(skb, &channel->napi_str);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
rx_queue->rx_packets++;
|
||||
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->flags |= flags;
|
||||
|
||||
/* Validate the number of fragments and completed length */
|
||||
if (n_frags == 1) {
|
||||
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
efx_rx_packet__check_len(rx_queue, rx_buf, len);
|
||||
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
|
||||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
|
||||
unlikely(len > n_frags * efx->rx_dma_len) ||
|
||||
unlikely(!efx->rx_scatter)) {
|
||||
/* If this isn't an explicit discard request, either
|
||||
* the hardware or the driver is broken.
|
||||
*/
|
||||
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
}
|
||||
|
||||
netif_vdbg(efx, rx_status, efx->net_dev,
|
||||
"RX queue %d received ids %x-%x len %d %s%s\n",
|
||||
efx_rx_queue_index(rx_queue), index,
|
||||
(index + n_frags - 1) & rx_queue->ptr_mask, len,
|
||||
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
||||
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
||||
|
||||
/* Discard packet, if instructed to do so. Process the
|
||||
* previous receive first.
|
||||
*/
|
||||
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
||||
efx_rx_flush_packet(channel);
|
||||
efx_discard_rx_packet(channel, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
rx_buf->len = len;
|
||||
|
||||
/* Release and/or sync the DMA mapping - assumes all RX buffers
|
||||
* consumed in-order per RX queue.
|
||||
*/
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
|
||||
/* Prefetch nice and early so data will (hopefully) be in cache by
|
||||
* the time we look at it.
|
||||
*/
|
||||
prefetch(efx_rx_buf_va(rx_buf));
|
||||
|
||||
rx_buf->page_offset += efx->rx_prefix_size;
|
||||
rx_buf->len -= efx->rx_prefix_size;
|
||||
|
||||
if (n_frags > 1) {
|
||||
/* Release/sync DMA mapping for additional fragments.
|
||||
* Fix length for last fragment.
|
||||
*/
|
||||
unsigned int tail_frags = n_frags - 1;
|
||||
|
||||
for (;;) {
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
if (--tail_frags == 0)
|
||||
break;
|
||||
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
|
||||
}
|
||||
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
}
|
||||
|
||||
/* All fragments have been DMA-synced, so recycle pages. */
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
/* Pipeline receives so that we give time for packet headers to be
|
||||
* prefetched into cache.
|
||||
*/
|
||||
efx_rx_flush_packet(channel);
|
||||
channel->rx_pkt_n_frags = n_frags;
|
||||
channel->rx_pkt_index = index;
|
||||
}
|
||||
|
||||
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
|
||||
|
||||
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
efx_free_rx_buffer(rx_buf);
|
||||
return;
|
||||
}
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
/* Set the SKB flags */
|
||||
skb_checksum_none_assert(skb);
|
||||
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
efx_rx_skb_attach_timestamp(channel, skb);
|
||||
|
||||
if (channel->type->receive_skb)
|
||||
if (channel->type->receive_skb(channel, skb))
|
||||
return;
|
||||
|
||||
/* Pass the packet up */
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
/* Handle a received packet. Second half: Touches packet payload. */
|
||||
void __efx_rx_packet(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct efx_rx_buffer *rx_buf =
|
||||
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
||||
u8 *eh = efx_rx_buf_va(rx_buf);
|
||||
|
||||
/* Read length from the prefix if necessary. This already
|
||||
* excludes the length of the prefix itself.
|
||||
*/
|
||||
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
|
||||
rx_buf->len = le16_to_cpup((__le16 *)
|
||||
(eh + efx->rx_packet_len_offset));
|
||||
|
||||
/* If we're in loopback test, then pass the packet directly to the
|
||||
* loopback layer, and free the rx_buf here
|
||||
*/
|
||||
if (unlikely(efx->loopback_selftest)) {
|
||||
efx_loopback_rx_packet(efx, eh, rx_buf->len);
|
||||
efx_free_rx_buffer(rx_buf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
||||
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
||||
|
||||
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
|
||||
!efx_channel_busy_polling(channel))
|
||||
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
|
||||
else
|
||||
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
|
||||
out:
|
||||
channel->rx_pkt_n_frags = 0;
|
||||
}
|
||||
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
rx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating RX queue %d size %#x mask %#x\n",
|
||||
efx_rx_queue_index(rx_queue), efx->rxq_entries,
|
||||
rx_queue->ptr_mask);
|
||||
|
||||
/* Allocate RX buffers */
|
||||
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!rx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = efx_nic_probe_rx(rx_queue);
|
||||
if (rc) {
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_init_rx_recycle_ring(struct efx_nic *efx,
|
||||
struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
unsigned int bufs_in_recycle_ring, page_ring_size;
|
||||
|
||||
/* Set the RX recycle ring size */
|
||||
#ifdef CONFIG_PPC64
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
#else
|
||||
if (iommu_present(&pci_bus_type))
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
else
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int max_fill, trigger, max_trigger;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
/* Initialise ptr fields */
|
||||
rx_queue->added_count = 0;
|
||||
rx_queue->notified_count = 0;
|
||||
rx_queue->removed_count = 0;
|
||||
rx_queue->min_fill = -1U;
|
||||
efx_init_rx_recycle_ring(efx, rx_queue);
|
||||
|
||||
rx_queue->page_remove = 0;
|
||||
rx_queue->page_add = rx_queue->page_ptr_mask + 1;
|
||||
rx_queue->page_recycle_count = 0;
|
||||
rx_queue->page_recycle_failed = 0;
|
||||
rx_queue->page_recycle_full = 0;
|
||||
|
||||
/* Initialise limit fields */
|
||||
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
||||
max_trigger =
|
||||
max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
if (rx_refill_threshold != 0) {
|
||||
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
||||
if (trigger > max_trigger)
|
||||
trigger = max_trigger;
|
||||
} else {
|
||||
trigger = max_trigger;
|
||||
}
|
||||
|
||||
rx_queue->max_fill = max_fill;
|
||||
rx_queue->fast_fill_trigger = trigger;
|
||||
rx_queue->refill_enabled = true;
|
||||
|
||||
/* Set up RX descriptor ring */
|
||||
efx_nic_init_rx(rx_queue);
|
||||
}
|
||||
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
int i;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
del_timer_sync(&rx_queue->slow_fill);
|
||||
|
||||
/* Release RX buffers from the current read ptr to the write ptr */
|
||||
if (rx_queue->buffer) {
|
||||
for (i = rx_queue->removed_count; i < rx_queue->added_count;
|
||||
i++) {
|
||||
unsigned index = i & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_fini_rx_buffer(rx_queue, rx_buf);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unmap and release the pages in the recycle ring. Remove the ring. */
|
||||
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
|
||||
struct page *page = rx_queue->page_ring[i];
|
||||
struct efx_rx_page_state *state;
|
||||
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
}
|
||||
kfree(rx_queue->page_ring);
|
||||
rx_queue->page_ring = NULL;
|
||||
}
|
||||
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
efx_nic_remove_rx(rx_queue);
|
||||
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
|
||||
module_param(rx_refill_threshold, uint, 0444);
|
||||
MODULE_PARM_DESC(rx_refill_threshold,
|
||||
"RX descriptor ring refill threshold (%)");
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
||||
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
struct efx_filter_spec spec;
|
||||
const __be16 *ports;
|
||||
__be16 ether_type;
|
||||
int nhoff;
|
||||
int rc;
|
||||
|
||||
/* The core RPS/RFS code has already parsed and validated
|
||||
* VLAN, IP and transport headers. We assume they are in the
|
||||
* header area.
|
||||
*/
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q)) {
|
||||
const struct vlan_hdr *vh =
|
||||
(const struct vlan_hdr *)skb->data;
|
||||
|
||||
/* We can't filter on the IP 5-tuple and the vlan
|
||||
* together, so just strip the vlan header and filter
|
||||
* on the IP part.
|
||||
*/
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
|
||||
ether_type = vh->h_vlan_encapsulated_proto;
|
||||
nhoff = sizeof(struct vlan_hdr);
|
||||
} else {
|
||||
ether_type = skb->protocol;
|
||||
nhoff = 0;
|
||||
}
|
||||
|
||||
if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
|
||||
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
|
||||
rxq_index);
|
||||
spec.match_flags =
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
||||
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
||||
spec.ether_type = ether_type;
|
||||
|
||||
if (ether_type == htons(ETH_P_IP)) {
|
||||
const struct iphdr *ip =
|
||||
(const struct iphdr *)(skb->data + nhoff);
|
||||
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
|
||||
if (ip_is_fragment(ip))
|
||||
return -EPROTONOSUPPORT;
|
||||
spec.ip_proto = ip->protocol;
|
||||
spec.rem_host[0] = ip->saddr;
|
||||
spec.loc_host[0] = ip->daddr;
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
|
||||
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
||||
} else {
|
||||
const struct ipv6hdr *ip6 =
|
||||
(const struct ipv6hdr *)(skb->data + nhoff);
|
||||
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
|
||||
nhoff + sizeof(*ip6) + 4);
|
||||
spec.ip_proto = ip6->nexthdr;
|
||||
memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
|
||||
memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
|
||||
ports = (const __be16 *)(ip6 + 1);
|
||||
}
|
||||
|
||||
spec.rem_port = ports[0];
|
||||
spec.loc_port = ports[1];
|
||||
|
||||
rc = efx->type->filter_rfs_insert(efx, &spec);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Remember this so we can check whether to expire the filter later */
|
||||
efx->rps_flow_id[rc] = flow_id;
|
||||
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
|
||||
++channel->rfs_filters_added;
|
||||
|
||||
if (ether_type == htons(ETH_P_IP))
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
|
||||
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
spec.rem_host, ntohs(ports[0]), spec.loc_host,
|
||||
ntohs(ports[1]), rxq_index, flow_id, rc);
|
||||
else
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
|
||||
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
spec.rem_host, ntohs(ports[0]), spec.loc_host,
|
||||
ntohs(ports[1]), rxq_index, flow_id, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
|
||||
{
|
||||
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
|
||||
unsigned int index, size;
|
||||
u32 flow_id;
|
||||
|
||||
if (!spin_trylock_bh(&efx->filter_lock))
|
||||
return false;
|
||||
|
||||
expire_one = efx->type->filter_rfs_expire_one;
|
||||
index = efx->rps_expire_index;
|
||||
size = efx->type->max_rx_ip_filters;
|
||||
while (quota--) {
|
||||
flow_id = efx->rps_flow_id[index];
|
||||
if (expire_one(efx, flow_id, index))
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"expired filter %d [flow %u]\n",
|
||||
index, flow_id);
|
||||
if (++index == size)
|
||||
index = 0;
|
||||
}
|
||||
efx->rps_expire_index = index;
|
||||
|
||||
spin_unlock_bh(&efx->filter_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
|
||||
/**
|
||||
* efx_filter_is_mc_recipient - test whether spec is a multicast recipient
|
||||
* @spec: Specification to test
|
||||
*
|
||||
* Return: %true if the specification is a non-drop RX filter that
|
||||
* matches a local MAC address I/G bit value of 1 or matches a local
|
||||
* IPv4 or IPv6 address value in the respective multicast address
|
||||
* range. Otherwise %false.
|
||||
*/
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
|
||||
{
|
||||
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
|
||||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
|
||||
return false;
|
||||
|
||||
if (spec->match_flags &
|
||||
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
|
||||
is_multicast_ether_addr(spec->loc_mac))
|
||||
return true;
|
||||
|
||||
if ((spec->match_flags &
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
|
||||
if (spec->ether_type == htons(ETH_P_IP) &&
|
||||
ipv4_is_multicast(spec->loc_host[0]))
|
||||
return true;
|
||||
if (spec->ether_type == htons(ETH_P_IPV6) &&
|
||||
((const u8 *)spec->loc_host)[0] == 0xff)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
788
drivers/net/ethernet/sfc/selftest.c
Normal file
788
drivers/net/ethernet/sfc/selftest.c
Normal file
|
|
@ -0,0 +1,788 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* IRQ latency can be enormous because:
|
||||
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
|
||||
* slow serial console or an old IDE driver doing error recovery
|
||||
* - The PREEMPT_RT patches mostly deal with this, but also allow a
|
||||
* tasklet or normal task to be given higher priority than our IRQ
|
||||
* threads
|
||||
* Try to avoid blaming the hardware for this.
|
||||
*/
|
||||
#define IRQ_TIMEOUT HZ
|
||||
|
||||
/*
|
||||
* Loopback test packet structure
|
||||
*
|
||||
* The self-test should stress every RSS vector, and unfortunately
|
||||
* Falcon only performs RSS on TCP/UDP packets.
|
||||
*/
|
||||
struct efx_loopback_payload {
|
||||
struct ethhdr header;
|
||||
struct iphdr ip;
|
||||
struct udphdr udp;
|
||||
__be16 iteration;
|
||||
const char msg[64];
|
||||
} __packed;
|
||||
|
||||
/* Loopback test source MAC address */
|
||||
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
|
||||
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
|
||||
};
|
||||
|
||||
static const char payload_msg[] =
|
||||
"Hello world! This is an Efx loopback test in progress!";
|
||||
|
||||
/* Interrupt mode names */
|
||||
static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
|
||||
static const char *const efx_interrupt_mode_names[] = {
|
||||
[EFX_INT_MODE_MSIX] = "MSI-X",
|
||||
[EFX_INT_MODE_MSI] = "MSI",
|
||||
[EFX_INT_MODE_LEGACY] = "legacy",
|
||||
};
|
||||
#define INT_MODE(efx) \
|
||||
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
|
||||
|
||||
/**
|
||||
* efx_loopback_state - persistent state during a loopback selftest
|
||||
* @flush: Drop all packets in efx_loopback_rx_packet
|
||||
* @packet_count: Number of packets being used in this test
|
||||
* @skbs: An array of skbs transmitted
|
||||
* @offload_csum: Checksums are being offloaded
|
||||
* @rx_good: RX good packet count
|
||||
* @rx_bad: RX bad packet count
|
||||
* @payload: Payload used in tests
|
||||
*/
|
||||
struct efx_loopback_state {
|
||||
bool flush;
|
||||
int packet_count;
|
||||
struct sk_buff **skbs;
|
||||
bool offload_csum;
|
||||
atomic_t rx_good;
|
||||
atomic_t rx_bad;
|
||||
struct efx_loopback_payload payload;
|
||||
};
|
||||
|
||||
/* How long to wait for all the packets to arrive (in ms) */
|
||||
#define LOOPBACK_TIMEOUT_MS 1000
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* MII, NVRAM and register tests
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (efx->phy_op->test_alive) {
|
||||
rc = efx->phy_op->test_alive(efx);
|
||||
tests->phy_alive = rc ? -1 : 1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (efx->type->test_nvram) {
|
||||
rc = efx->type->test_nvram(efx);
|
||||
tests->nvram = rc ? -1 : 1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Interrupt and event queue testing
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Test generation and receipt of interrupts */
|
||||
static int efx_test_interrupts(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
unsigned long timeout, wait;
|
||||
int cpu;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
|
||||
tests->interrupt = -1;
|
||||
|
||||
efx_nic_irq_test_start(efx);
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of test interrupt. */
|
||||
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
cpu = efx_nic_irq_test_irq_cpu(efx);
|
||||
if (cpu >= 0)
|
||||
goto success;
|
||||
wait *= 2;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
|
||||
return -ETIMEDOUT;
|
||||
|
||||
success:
|
||||
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
|
||||
INT_MODE(efx), cpu);
|
||||
tests->interrupt = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test generation and receipt of interrupting events */
|
||||
static int efx_test_eventq_irq(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int read_ptr[EFX_MAX_CHANNELS];
|
||||
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
|
||||
unsigned long timeout, wait;
|
||||
|
||||
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
read_ptr[channel->channel] = channel->eventq_read_ptr;
|
||||
set_bit(channel->channel, &dma_pend);
|
||||
set_bit(channel->channel, &int_pend);
|
||||
efx_nic_event_test_start(channel);
|
||||
}
|
||||
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of interrupts. NAPI processing may or may
|
||||
* not complete in time, but we can cope in any case.
|
||||
*/
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_stop_eventq(channel);
|
||||
if (channel->eventq_read_ptr !=
|
||||
read_ptr[channel->channel]) {
|
||||
set_bit(channel->channel, &napi_ran);
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
} else {
|
||||
if (efx_nic_event_present(channel))
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
if (efx_nic_event_test_irq_cpu(channel) >= 0)
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
}
|
||||
efx_start_eventq(channel);
|
||||
}
|
||||
|
||||
wait *= 2;
|
||||
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
bool dma_seen = !test_bit(channel->channel, &dma_pend);
|
||||
bool int_seen = !test_bit(channel->channel, &int_pend);
|
||||
|
||||
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
|
||||
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
|
||||
|
||||
if (dma_seen && int_seen) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"channel %d event queue passed (with%s NAPI)\n",
|
||||
channel->channel,
|
||||
test_bit(channel->channel, &napi_ran) ?
|
||||
"" : "out");
|
||||
} else {
|
||||
/* Report failure and whether either interrupt or DMA
|
||||
* worked
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d timed out waiting for event queue\n",
|
||||
channel->channel);
|
||||
if (int_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d saw interrupt "
|
||||
"during event queue test\n",
|
||||
channel->channel);
|
||||
if (dma_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d event was generated, but "
|
||||
"failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
}
|
||||
}
|
||||
|
||||
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!efx->phy_op->run_tests)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Loopback testing
|
||||
* NB Only one loopback test can be executing concurrently.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Loopback test RX callback
|
||||
* This is called for each received packet during loopback testing.
|
||||
*/
|
||||
void efx_loopback_rx_packet(struct efx_nic *efx,
|
||||
const char *buf_ptr, int pkt_len)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *received;
|
||||
struct efx_loopback_payload *payload;
|
||||
|
||||
BUG_ON(!buf_ptr);
|
||||
|
||||
/* If we are just flushing, then drop the packet */
|
||||
if ((state == NULL) || state->flush)
|
||||
return;
|
||||
|
||||
payload = &state->payload;
|
||||
|
||||
received = (struct efx_loopback_payload *) buf_ptr;
|
||||
received->ip.saddr = payload->ip.saddr;
|
||||
if (state->offload_csum)
|
||||
received->ip.check = payload->ip.check;
|
||||
|
||||
/* Check that header exists */
|
||||
if (pkt_len < sizeof(received->header)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw runt RX packet (length %d) in %s loopback "
|
||||
"test\n", pkt_len, LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that the ethernet header exists */
|
||||
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw non-loopback RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check packet length */
|
||||
if (pkt_len != sizeof(*payload)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw incorrect RX packet length %d (wanted %d) in "
|
||||
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that IP header matches */
|
||||
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted IP header in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that msg and padding matches */
|
||||
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that iteration matches */
|
||||
if (received->iteration != payload->iteration) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw RX packet from iteration %d (wanted %d) in "
|
||||
"%s loopback test\n", ntohs(received->iteration),
|
||||
ntohs(payload->iteration), LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Increase correct RX count */
|
||||
netif_vdbg(efx, drv, efx->net_dev,
|
||||
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
|
||||
|
||||
atomic_inc(&state->rx_good);
|
||||
return;
|
||||
|
||||
err:
|
||||
#ifdef DEBUG
|
||||
if (atomic_read(&state->rx_bad) == 0) {
|
||||
netif_err(efx, drv, efx->net_dev, "received packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
buf_ptr, pkt_len, 0);
|
||||
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
&state->payload, sizeof(state->payload), 0);
|
||||
}
|
||||
#endif
|
||||
atomic_inc(&state->rx_bad);
|
||||
}
|
||||
|
||||
/* Initialise an efx_selftest_state for a new iteration */
|
||||
static void efx_iterate_state(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct efx_loopback_payload *payload = &state->payload;
|
||||
|
||||
/* Initialise the layerII header */
|
||||
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
|
||||
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
|
||||
payload->header.h_proto = htons(ETH_P_IP);
|
||||
|
||||
/* saddr set later and used as incrementing count */
|
||||
payload->ip.daddr = htonl(INADDR_LOOPBACK);
|
||||
payload->ip.ihl = 5;
|
||||
payload->ip.check = (__force __sum16) htons(0xdead);
|
||||
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
|
||||
payload->ip.version = IPVERSION;
|
||||
payload->ip.protocol = IPPROTO_UDP;
|
||||
|
||||
/* Initialise udp header */
|
||||
payload->udp.source = 0;
|
||||
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
|
||||
sizeof(struct iphdr));
|
||||
payload->udp.check = 0; /* checksum ignored */
|
||||
|
||||
/* Fill out payload */
|
||||
payload->iteration = htons(ntohs(payload->iteration) + 1);
|
||||
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
|
||||
|
||||
/* Fill out remaining state members */
|
||||
atomic_set(&state->rx_good, 0);
|
||||
atomic_set(&state->rx_bad, 0);
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *payload;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
netdev_tx_t rc;
|
||||
|
||||
/* Transmit N copies of buffer */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
/* Allocate an skb, holding an extra reference for
|
||||
* transmit completion counting */
|
||||
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
state->skbs[i] = skb;
|
||||
skb_get(skb);
|
||||
|
||||
/* Copy the payload in, incrementing the source address to
|
||||
* exercise the rss vectors */
|
||||
payload = ((struct efx_loopback_payload *)
|
||||
skb_put(skb, sizeof(state->payload)));
|
||||
memcpy(payload, &state->payload, sizeof(state->payload));
|
||||
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
|
||||
|
||||
/* Ensure everything we've written is visible to the
|
||||
* interrupt handler. */
|
||||
smp_wmb();
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_enqueue_skb(tx_queue, skb);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
if (rc != NETDEV_TX_OK) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d could not transmit packet %d of "
|
||||
"%d in %s loopback test\n", tx_queue->queue,
|
||||
i + 1, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
|
||||
/* Defer cleaning up the other skbs for the caller */
|
||||
kfree_skb(skb);
|
||||
return -EPIPE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_poll_loopback(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
|
||||
return atomic_read(&state->rx_good) == state->packet_count;
|
||||
}
|
||||
|
||||
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct sk_buff *skb;
|
||||
int tx_done = 0, rx_good, rx_bad;
|
||||
int i, rc = 0;
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
|
||||
/* Count the number of tx completions, and decrement the refcnt. Any
|
||||
* skbs not already completed will be free'd when the queue is flushed */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
skb = state->skbs[i];
|
||||
if (skb && !skb_shared(skb))
|
||||
++tx_done;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Check TX completion and received packet counts */
|
||||
rx_good = atomic_read(&state->rx_good);
|
||||
rx_bad = atomic_read(&state->rx_bad);
|
||||
if (tx_done != state->packet_count) {
|
||||
/* Don't free the skbs; they will be picked up on TX
|
||||
* overflow or channel teardown.
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"TX completion events in %s loopback test\n",
|
||||
tx_queue->queue, tx_done, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Allow to fall through so we see the RX errors as well */
|
||||
}
|
||||
|
||||
/* We may always be up to a flush away from our desired packet total */
|
||||
if (rx_good != state->packet_count) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"received packets in %s loopback test\n",
|
||||
tx_queue->queue, rx_good, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
/* Update loopback test structure */
|
||||
lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
|
||||
lb_tests->tx_done[tx_queue->queue] += tx_done;
|
||||
lb_tests->rx_good += rx_good;
|
||||
lb_tests->rx_bad += rx_bad;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_test_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
int i, begin_rc, end_rc;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
/* Determine how many packets to send */
|
||||
state->packet_count = efx->txq_entries / 3;
|
||||
state->packet_count = min(1 << (i << 2), state->packet_count);
|
||||
state->skbs = kcalloc(state->packet_count,
|
||||
sizeof(state->skbs[0]), GFP_KERNEL);
|
||||
if (!state->skbs)
|
||||
return -ENOMEM;
|
||||
state->flush = false;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d testing %s loopback with %d packets\n",
|
||||
tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
efx_iterate_state(efx);
|
||||
begin_rc = efx_begin_loopback(tx_queue);
|
||||
|
||||
/* This will normally complete very quickly, but be
|
||||
* prepared to wait much longer. */
|
||||
msleep(1);
|
||||
if (!efx_poll_loopback(efx)) {
|
||||
msleep(LOOPBACK_TIMEOUT_MS);
|
||||
efx_poll_loopback(efx);
|
||||
}
|
||||
|
||||
end_rc = efx_end_loopback(tx_queue, lb_tests);
|
||||
kfree(state->skbs);
|
||||
|
||||
if (begin_rc || end_rc) {
|
||||
/* Wait a while to ensure there are no packets
|
||||
* floating around after a failure. */
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
return begin_rc ? begin_rc : end_rc;
|
||||
}
|
||||
}
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d passed %s loopback test with a burst length "
|
||||
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
|
||||
* any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
|
||||
* to delay and retry. Therefore, it's safer to just poll directly. Wait
|
||||
* for link up and any faults to dissipate. */
|
||||
static int efx_wait_for_link(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state *link_state = &efx->link_state;
|
||||
int count, link_up_count = 0;
|
||||
bool link_up;
|
||||
|
||||
for (count = 0; count < 40; count++) {
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
|
||||
if (efx->type->monitor != NULL) {
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->type->monitor(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
}
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
link_up = link_state->up;
|
||||
if (link_up)
|
||||
link_up = !efx->type->check_mac_fault(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
if (link_up) {
|
||||
if (++link_up_count == 2)
|
||||
return 0;
|
||||
} else {
|
||||
link_up_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned int loopback_modes)
|
||||
{
|
||||
enum efx_loopback_mode mode;
|
||||
struct efx_loopback_state *state;
|
||||
struct efx_channel *channel =
|
||||
efx_get_channel(efx, efx->tx_channel_offset);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
int rc = 0;
|
||||
|
||||
/* Set the port loopback_selftest member. From this point on
|
||||
* all received packets will be dropped. Mark the state as
|
||||
* "flushing" so all inflight packets are dropped */
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
BUG_ON(efx->loopback_selftest);
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = state;
|
||||
|
||||
/* Test all supported loopback modes */
|
||||
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
|
||||
/* Move the port into the specified loopback mode. */
|
||||
state->flush = true;
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->loopback_mode = mode;
|
||||
rc = __efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"unable to move into %s loopback\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = efx_wait_for_link(efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"loopback %s never came up\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Test all enabled types of TX queue */
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
state->offload_csum = (tx_queue->queue &
|
||||
EFX_TXQ_TYPE_OFFLOAD);
|
||||
rc = efx_test_loopback(tx_queue,
|
||||
&tests->loopback[mode]);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Remove the flush. The caller will remove the loopback setting */
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = NULL;
|
||||
wmb();
|
||||
kfree(state);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Entry point
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags)
|
||||
{
|
||||
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
|
||||
int phy_mode = efx->phy_mode;
|
||||
int rc_test = 0, rc_reset, rc;
|
||||
|
||||
efx_selftest_async_cancel(efx);
|
||||
|
||||
/* Online (i.e. non-disruptive) testing
|
||||
* This checks interrupt generation, event delivery and PHY presence. */
|
||||
|
||||
rc = efx_test_phy_alive(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_nvram(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_interrupts(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_eventq_irq(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
if (rc_test)
|
||||
return rc_test;
|
||||
|
||||
if (!(flags & ETH_TEST_FL_OFFLINE))
|
||||
return efx_test_phy(efx, tests, flags);
|
||||
|
||||
/* Offline (i.e. disruptive) testing
|
||||
* This checks MAC and PHY loopback on the specified port. */
|
||||
|
||||
/* Detach the device so the kernel doesn't transmit during the
|
||||
* loopback test and the watchdog timeout doesn't fire.
|
||||
*/
|
||||
efx_device_detach_sync(efx);
|
||||
|
||||
if (efx->type->test_chip) {
|
||||
rc_reset = efx->type->test_chip(efx, tests);
|
||||
if (rc_reset) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Unable to recover from chip test\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
return rc_reset;
|
||||
}
|
||||
|
||||
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
|
||||
rc_test = -EIO;
|
||||
}
|
||||
|
||||
/* Ensure that the phy is powered and out of loopback
|
||||
* for the bist and loopback tests */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
|
||||
efx->loopback_mode = LOOPBACK_NONE;
|
||||
__efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
rc = efx_test_phy(efx, tests, flags);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
/* restore the PHY to the previous state */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode = phy_mode;
|
||||
efx->loopback_mode = loopback_mode;
|
||||
__efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
netif_device_attach(efx->net_dev);
|
||||
|
||||
return rc_test;
|
||||
}
|
||||
|
||||
void efx_selftest_async_start(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_nic_event_test_start(channel);
|
||||
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
|
||||
}
|
||||
|
||||
void efx_selftest_async_cancel(struct efx_nic *efx)
|
||||
{
|
||||
cancel_delayed_work_sync(&efx->selftest_work);
|
||||
}
|
||||
|
||||
void efx_selftest_async_work(struct work_struct *data)
|
||||
{
|
||||
struct efx_nic *efx = container_of(data, struct efx_nic,
|
||||
selftest_work.work);
|
||||
struct efx_channel *channel;
|
||||
int cpu;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
cpu = efx_nic_event_test_irq_cpu(channel);
|
||||
if (cpu < 0)
|
||||
netif_err(efx, ifup, efx->net_dev,
|
||||
"channel %d failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
else
|
||||
netif_dbg(efx, ifup, efx->net_dev,
|
||||
"channel %d triggered interrupt on CPU %d\n",
|
||||
channel->channel, cpu);
|
||||
}
|
||||
}
|
||||
55
drivers/net/ethernet/sfc/selftest.h
Normal file
55
drivers/net/ethernet/sfc/selftest.h
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SELFTEST_H
|
||||
#define EFX_SELFTEST_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
/*
|
||||
* Self tests
|
||||
*/
|
||||
|
||||
struct efx_loopback_self_tests {
|
||||
int tx_sent[EFX_TXQ_TYPES];
|
||||
int tx_done[EFX_TXQ_TYPES];
|
||||
int rx_good;
|
||||
int rx_bad;
|
||||
};
|
||||
|
||||
#define EFX_MAX_PHY_TESTS 20
|
||||
|
||||
/* Efx self test results
|
||||
* For fields which are not counters, 1 indicates success and -1
|
||||
* indicates failure.
|
||||
*/
|
||||
struct efx_self_tests {
|
||||
/* online tests */
|
||||
int phy_alive;
|
||||
int nvram;
|
||||
int interrupt;
|
||||
int eventq_dma[EFX_MAX_CHANNELS];
|
||||
int eventq_int[EFX_MAX_CHANNELS];
|
||||
/* offline tests */
|
||||
int memory;
|
||||
int registers;
|
||||
int phy_ext[EFX_MAX_PHY_TESTS];
|
||||
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
|
||||
};
|
||||
|
||||
void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
|
||||
int pkt_len);
|
||||
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags);
|
||||
void efx_selftest_async_start(struct efx_nic *efx);
|
||||
void efx_selftest_async_cancel(struct efx_nic *efx);
|
||||
void efx_selftest_async_work(struct work_struct *data);
|
||||
|
||||
#endif /* EFX_SELFTEST_H */
|
||||
1024
drivers/net/ethernet/sfc/siena.c
Normal file
1024
drivers/net/ethernet/sfc/siena.c
Normal file
File diff suppressed because it is too large
Load diff
1645
drivers/net/ethernet/sfc/siena_sriov.c
Normal file
1645
drivers/net/ethernet/sfc/siena_sriov.c
Normal file
File diff suppressed because it is too large
Load diff
494
drivers/net/ethernet/sfc/tenxpress.c
Normal file
494
drivers/net/ethernet/sfc/tenxpress.c
Normal file
|
|
@ -0,0 +1,494 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2007-2011 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include "efx.h"
|
||||
#include "mdio_10g.h"
|
||||
#include "nic.h"
|
||||
#include "phy.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* We expect these MMDs to be in the package. */
|
||||
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
|
||||
MDIO_DEVS_PCS | \
|
||||
MDIO_DEVS_PHYXS | \
|
||||
MDIO_DEVS_AN)
|
||||
|
||||
#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
|
||||
(1 << LOOPBACK_PCS) | \
|
||||
(1 << LOOPBACK_PMAPMD) | \
|
||||
(1 << LOOPBACK_PHYXS_WS))
|
||||
|
||||
/* We complain if we fail to see the link partner as 10G capable this many
|
||||
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
|
||||
*/
|
||||
#define MAX_BAD_LP_TRIES (5)
|
||||
|
||||
/* Extended control register */
|
||||
#define PMA_PMD_XCONTROL_REG 49152
|
||||
#define PMA_PMD_EXT_GMII_EN_LBN 1
|
||||
#define PMA_PMD_EXT_GMII_EN_WIDTH 1
|
||||
#define PMA_PMD_EXT_CLK_OUT_LBN 2
|
||||
#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
|
||||
#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
|
||||
#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
|
||||
#define PMA_PMD_EXT_CLK312_WIDTH 1
|
||||
#define PMA_PMD_EXT_LPOWER_LBN 12
|
||||
#define PMA_PMD_EXT_LPOWER_WIDTH 1
|
||||
#define PMA_PMD_EXT_ROBUST_LBN 14
|
||||
#define PMA_PMD_EXT_ROBUST_WIDTH 1
|
||||
#define PMA_PMD_EXT_SSR_LBN 15
|
||||
#define PMA_PMD_EXT_SSR_WIDTH 1
|
||||
|
||||
/* extended status register */
|
||||
#define PMA_PMD_XSTATUS_REG 49153
|
||||
#define PMA_PMD_XSTAT_MDIX_LBN 14
|
||||
#define PMA_PMD_XSTAT_FLP_LBN (12)
|
||||
|
||||
/* LED control register */
|
||||
#define PMA_PMD_LED_CTRL_REG 49159
|
||||
#define PMA_PMA_LED_ACTIVITY_LBN (3)
|
||||
|
||||
/* LED function override register */
|
||||
#define PMA_PMD_LED_OVERR_REG 49161
|
||||
/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
|
||||
#define PMA_PMD_LED_LINK_LBN (0)
|
||||
#define PMA_PMD_LED_SPEED_LBN (2)
|
||||
#define PMA_PMD_LED_TX_LBN (4)
|
||||
#define PMA_PMD_LED_RX_LBN (6)
|
||||
/* Override settings */
|
||||
#define PMA_PMD_LED_AUTO (0) /* H/W control */
|
||||
#define PMA_PMD_LED_ON (1)
|
||||
#define PMA_PMD_LED_OFF (2)
|
||||
#define PMA_PMD_LED_FLASH (3)
|
||||
#define PMA_PMD_LED_MASK 3
|
||||
/* All LEDs under hardware control */
|
||||
/* Green and Amber under hardware control, Red off */
|
||||
#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
|
||||
|
||||
#define PMA_PMD_SPEED_ENABLE_REG 49192
|
||||
#define PMA_PMD_100TX_ADV_LBN 1
|
||||
#define PMA_PMD_100TX_ADV_WIDTH 1
|
||||
#define PMA_PMD_1000T_ADV_LBN 2
|
||||
#define PMA_PMD_1000T_ADV_WIDTH 1
|
||||
#define PMA_PMD_10000T_ADV_LBN 3
|
||||
#define PMA_PMD_10000T_ADV_WIDTH 1
|
||||
#define PMA_PMD_SPEED_LBN 4
|
||||
#define PMA_PMD_SPEED_WIDTH 4
|
||||
|
||||
/* Misc register defines */
|
||||
#define PCS_CLOCK_CTRL_REG 55297
|
||||
#define PLL312_RST_N_LBN 2
|
||||
|
||||
#define PCS_SOFT_RST2_REG 55302
|
||||
#define SERDES_RST_N_LBN 13
|
||||
#define XGXS_RST_N_LBN 12
|
||||
|
||||
#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
|
||||
#define CLK312_EN_LBN 3
|
||||
|
||||
/* PHYXS registers */
|
||||
#define PHYXS_XCONTROL_REG 49152
|
||||
#define PHYXS_RESET_LBN 15
|
||||
#define PHYXS_RESET_WIDTH 1
|
||||
|
||||
#define PHYXS_TEST1 (49162)
|
||||
#define LOOPBACK_NEAR_LBN (8)
|
||||
#define LOOPBACK_NEAR_WIDTH (1)
|
||||
|
||||
/* Boot status register */
|
||||
#define PCS_BOOT_STATUS_REG 53248
|
||||
#define PCS_BOOT_FATAL_ERROR_LBN 0
|
||||
#define PCS_BOOT_PROGRESS_LBN 1
|
||||
#define PCS_BOOT_PROGRESS_WIDTH 2
|
||||
#define PCS_BOOT_PROGRESS_INIT 0
|
||||
#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
|
||||
#define PCS_BOOT_PROGRESS_CHECKSUM 2
|
||||
#define PCS_BOOT_PROGRESS_JUMP 3
|
||||
#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
|
||||
#define PCS_BOOT_CODE_STARTED_LBN 4
|
||||
|
||||
/* 100M/1G PHY registers */
|
||||
#define GPHY_XCONTROL_REG 49152
|
||||
#define GPHY_ISOLATE_LBN 10
|
||||
#define GPHY_ISOLATE_WIDTH 1
|
||||
#define GPHY_DUPLEX_LBN 8
|
||||
#define GPHY_DUPLEX_WIDTH 1
|
||||
#define GPHY_LOOPBACK_NEAR_LBN 14
|
||||
#define GPHY_LOOPBACK_NEAR_WIDTH 1
|
||||
|
||||
#define C22EXT_STATUS_REG 49153
|
||||
#define C22EXT_STATUS_LINK_LBN 2
|
||||
#define C22EXT_STATUS_LINK_WIDTH 1
|
||||
|
||||
#define C22EXT_MSTSLV_CTRL 49161
|
||||
#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
|
||||
#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
|
||||
|
||||
#define C22EXT_MSTSLV_STATUS 49162
|
||||
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
|
||||
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
|
||||
|
||||
/* Time to wait between powering down the LNPGA and turning off the power
|
||||
* rails */
|
||||
#define LNPGA_PDOWN_WAIT (HZ / 5)
|
||||
|
||||
struct tenxpress_phy_data {
|
||||
enum efx_loopback_mode loopback_mode;
|
||||
enum efx_phy_mode phy_mode;
|
||||
int bad_lp_tries;
|
||||
};
|
||||
|
||||
static int tenxpress_init(struct efx_nic *efx)
|
||||
{
|
||||
/* Enable 312.5 MHz clock */
|
||||
efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
|
||||
1 << CLK312_EN_LBN);
|
||||
|
||||
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
|
||||
1 << PMA_PMA_LED_ACTIVITY_LBN, true);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
|
||||
SFX7101_PMA_PMD_LED_DEFAULT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tenxpress_phy_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct tenxpress_phy_data *phy_data;
|
||||
|
||||
/* Allocate phy private storage */
|
||||
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
|
||||
if (!phy_data)
|
||||
return -ENOMEM;
|
||||
efx->phy_data = phy_data;
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
|
||||
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
|
||||
efx->mdio.mode_support = MDIO_SUPPORTS_C45;
|
||||
|
||||
efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
|
||||
|
||||
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
|
||||
ADVERTISED_10000baseT_Full);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tenxpress_phy_init(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
falcon_board(efx)->type->init_phy(efx);
|
||||
|
||||
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
|
||||
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = tenxpress_init(efx);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Reinitialise flow control settings */
|
||||
efx_link_set_wanted_fc(efx, efx->wanted_fc);
|
||||
efx_mdio_an_reconfigure(efx);
|
||||
|
||||
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
|
||||
|
||||
/* Let XGXS and SerDes out of reset */
|
||||
falcon_reset_xaui(efx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Perform a "special software reset" on the PHY. The caller is
|
||||
* responsible for saving and restoring the PHY hardware registers
|
||||
* properly, and masking/unmasking LASI */
|
||||
static int tenxpress_special_reset(struct efx_nic *efx)
|
||||
{
|
||||
int rc, reg;
|
||||
|
||||
/* The XGMAC clock is driven from the SFX7101 312MHz clock, so
|
||||
* a special software reset can glitch the XGMAC sufficiently for stats
|
||||
* requests to fail. */
|
||||
falcon_stop_nic_stats(efx);
|
||||
|
||||
/* Initiate reset */
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
|
||||
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
|
||||
|
||||
mdelay(200);
|
||||
|
||||
/* Wait for the blocks to come out of reset */
|
||||
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
/* Try and reconfigure the device */
|
||||
rc = tenxpress_init(efx);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
/* Wait for the XGXS state machine to churn */
|
||||
mdelay(10);
|
||||
out:
|
||||
falcon_start_nic_stats(efx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
|
||||
{
|
||||
struct tenxpress_phy_data *pd = efx->phy_data;
|
||||
bool bad_lp;
|
||||
int reg;
|
||||
|
||||
if (link_ok) {
|
||||
bad_lp = false;
|
||||
} else {
|
||||
/* Check that AN has started but not completed. */
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
|
||||
if (!(reg & MDIO_AN_STAT1_LPABLE))
|
||||
return; /* LP status is unknown */
|
||||
bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
|
||||
if (bad_lp)
|
||||
pd->bad_lp_tries++;
|
||||
}
|
||||
|
||||
/* Nothing to do if all is well and was previously so. */
|
||||
if (!pd->bad_lp_tries)
|
||||
return;
|
||||
|
||||
/* Use the RX (red) LED as an error indicator once we've seen AN
|
||||
* failure several times in a row, and also log a message. */
|
||||
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
|
||||
PMA_PMD_LED_OVERR_REG);
|
||||
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
|
||||
if (!bad_lp) {
|
||||
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
|
||||
} else {
|
||||
reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
|
||||
netif_err(efx, link, efx->net_dev,
|
||||
"appears to be plugged into a port"
|
||||
" that is not 10GBASE-T capable. The PHY"
|
||||
" supports 10GBASE-T ONLY, so no link can"
|
||||
" be established\n");
|
||||
}
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
|
||||
PMA_PMD_LED_OVERR_REG, reg);
|
||||
pd->bad_lp_tries = bad_lp;
|
||||
}
|
||||
}
|
||||
|
||||
static bool sfx7101_link_ok(struct efx_nic *efx)
|
||||
{
|
||||
return efx_mdio_links_ok(efx,
|
||||
MDIO_DEVS_PMAPMD |
|
||||
MDIO_DEVS_PCS |
|
||||
MDIO_DEVS_PHYXS);
|
||||
}
|
||||
|
||||
static void tenxpress_ext_loopback(struct efx_nic *efx)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
|
||||
1 << LOOPBACK_NEAR_LBN,
|
||||
efx->loopback_mode == LOOPBACK_PHYXS);
|
||||
}
|
||||
|
||||
static void tenxpress_low_power(struct efx_nic *efx)
|
||||
{
|
||||
efx_mdio_set_mmds_lpower(
|
||||
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
|
||||
TENXPRESS_REQUIRED_DEVS);
|
||||
}
|
||||
|
||||
static int tenxpress_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct tenxpress_phy_data *phy_data = efx->phy_data;
|
||||
bool phy_mode_change, loop_reset;
|
||||
|
||||
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
|
||||
phy_data->phy_mode != PHY_MODE_NORMAL);
|
||||
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
|
||||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
|
||||
|
||||
if (loop_reset || phy_mode_change) {
|
||||
tenxpress_special_reset(efx);
|
||||
falcon_reset_xaui(efx);
|
||||
}
|
||||
|
||||
tenxpress_low_power(efx);
|
||||
efx_mdio_transmit_disable(efx);
|
||||
efx_mdio_phy_reconfigure(efx);
|
||||
tenxpress_ext_loopback(efx);
|
||||
efx_mdio_an_reconfigure(efx);
|
||||
|
||||
phy_data->loopback_mode = efx->loopback_mode;
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
|
||||
|
||||
/* Poll for link state changes */
|
||||
static bool tenxpress_phy_poll(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state old_state = efx->link_state;
|
||||
|
||||
efx->link_state.up = sfx7101_link_ok(efx);
|
||||
efx->link_state.speed = 10000;
|
||||
efx->link_state.fd = true;
|
||||
efx->link_state.fc = efx_mdio_get_pause(efx);
|
||||
|
||||
sfx7101_check_bad_lp(efx, efx->link_state.up);
|
||||
|
||||
return !efx_link_state_equal(&efx->link_state, &old_state);
|
||||
}
|
||||
|
||||
static void sfx7101_phy_fini(struct efx_nic *efx)
|
||||
{
|
||||
int reg;
|
||||
|
||||
/* Power down the LNPGA */
|
||||
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
|
||||
|
||||
/* Waiting here ensures that the board fini, which can turn
|
||||
* off the power to the PHY, won't get run until the LNPGA
|
||||
* powerdown has been given long enough to complete. */
|
||||
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
|
||||
}
|
||||
|
||||
static void tenxpress_phy_remove(struct efx_nic *efx)
|
||||
{
|
||||
kfree(efx->phy_data);
|
||||
efx->phy_data = NULL;
|
||||
}
|
||||
|
||||
|
||||
/* Override the RX, TX and link LEDs */
|
||||
void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
|
||||
{
|
||||
int reg;
|
||||
|
||||
switch (mode) {
|
||||
case EFX_LED_OFF:
|
||||
reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
|
||||
(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
|
||||
(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
|
||||
break;
|
||||
case EFX_LED_ON:
|
||||
reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
|
||||
(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
|
||||
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
|
||||
break;
|
||||
default:
|
||||
reg = SFX7101_PMA_PMD_LED_DEFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
|
||||
}
|
||||
|
||||
static const char *const sfx7101_test_names[] = {
|
||||
"bist"
|
||||
};
|
||||
|
||||
static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
|
||||
{
|
||||
if (index < ARRAY_SIZE(sfx7101_test_names))
|
||||
return sfx7101_test_names[index];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!(flags & ETH_TEST_FL_OFFLINE))
|
||||
return 0;
|
||||
|
||||
/* BIST is automatically run after a special software reset */
|
||||
rc = tenxpress_special_reset(efx);
|
||||
results[0] = rc ? -1 : 1;
|
||||
|
||||
efx_mdio_an_reconfigure(efx);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
u32 adv = 0, lpa = 0;
|
||||
int reg;
|
||||
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
|
||||
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
|
||||
adv |= ADVERTISED_10000baseT_Full;
|
||||
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
|
||||
if (reg & MDIO_AN_10GBT_STAT_LP10G)
|
||||
lpa |= ADVERTISED_10000baseT_Full;
|
||||
|
||||
mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
|
||||
|
||||
/* In loopback, the PHY automatically brings up the correct interface,
|
||||
* but doesn't advertise the correct speed. So override it */
|
||||
if (LOOPBACK_EXTERNAL(efx))
|
||||
ethtool_cmd_speed_set(ecmd, SPEED_10000);
|
||||
}
|
||||
|
||||
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
if (!ecmd->autoneg)
|
||||
return -EINVAL;
|
||||
|
||||
return efx_mdio_set_settings(efx, ecmd);
|
||||
}
|
||||
|
||||
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
|
||||
MDIO_AN_10GBT_CTRL_ADV10G,
|
||||
advertising & ADVERTISED_10000baseT_Full);
|
||||
}
|
||||
|
||||
const struct efx_phy_operations falcon_sfx7101_phy_ops = {
|
||||
.probe = tenxpress_phy_probe,
|
||||
.init = tenxpress_phy_init,
|
||||
.reconfigure = tenxpress_phy_reconfigure,
|
||||
.poll = tenxpress_phy_poll,
|
||||
.fini = sfx7101_phy_fini,
|
||||
.remove = tenxpress_phy_remove,
|
||||
.get_settings = tenxpress_get_settings,
|
||||
.set_settings = tenxpress_set_settings,
|
||||
.set_npage_adv = sfx7101_set_npage_adv,
|
||||
.test_alive = efx_mdio_test_alive,
|
||||
.test_name = sfx7101_test_name,
|
||||
.run_tests = sfx7101_run_tests,
|
||||
};
|
||||
1332
drivers/net/ethernet/sfc/tx.c
Normal file
1332
drivers/net/ethernet/sfc/tx.c
Normal file
File diff suppressed because it is too large
Load diff
560
drivers/net/ethernet/sfc/txc43128_phy.c
Normal file
560
drivers/net/ethernet/sfc/txc43128_phy.c
Normal file
|
|
@ -0,0 +1,560 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2011 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Driver for Transwitch/Mysticom CX4 retimer
|
||||
* see www.transwitch.com, part is TXC-43128
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include "efx.h"
|
||||
#include "mdio_10g.h"
|
||||
#include "phy.h"
|
||||
#include "nic.h"
|
||||
|
||||
/* We expect these MMDs to be in the package */
|
||||
#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
|
||||
MDIO_DEVS_PMAPMD | \
|
||||
MDIO_DEVS_PHYXS)
|
||||
|
||||
#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
|
||||
(1 << LOOPBACK_PMAPMD) | \
|
||||
(1 << LOOPBACK_PHYXS_WS))
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Compile-time config
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
#define TXCNAME "TXC43128"
|
||||
/* Total length of time we'll wait for the PHY to come out of reset (ms) */
|
||||
#define TXC_MAX_RESET_TIME 500
|
||||
/* Interval between checks (ms) */
|
||||
#define TXC_RESET_WAIT 10
|
||||
/* How long to run BIST (us) */
|
||||
#define TXC_BIST_DURATION 50
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Register definitions
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
/* Command register */
|
||||
#define TXC_GLRGS_GLCMD 0xc004
|
||||
/* Useful bits in command register */
|
||||
/* Lane power-down */
|
||||
#define TXC_GLCMD_L01PD_LBN 5
|
||||
#define TXC_GLCMD_L23PD_LBN 6
|
||||
/* Limited SW reset: preserves configuration but
|
||||
* initiates a logic reset. Self-clearing */
|
||||
#define TXC_GLCMD_LMTSWRST_LBN 14
|
||||
|
||||
/* Signal Quality Control */
|
||||
#define TXC_GLRGS_GSGQLCTL 0xc01a
|
||||
/* Enable bit */
|
||||
#define TXC_GSGQLCT_SGQLEN_LBN 15
|
||||
/* Lane selection */
|
||||
#define TXC_GSGQLCT_LNSL_LBN 13
|
||||
#define TXC_GSGQLCT_LNSL_WIDTH 2
|
||||
|
||||
/* Analog TX control */
|
||||
#define TXC_ALRGS_ATXCTL 0xc040
|
||||
/* Lane power-down */
|
||||
#define TXC_ATXCTL_TXPD3_LBN 15
|
||||
#define TXC_ATXCTL_TXPD2_LBN 14
|
||||
#define TXC_ATXCTL_TXPD1_LBN 13
|
||||
#define TXC_ATXCTL_TXPD0_LBN 12
|
||||
|
||||
/* Amplitude on lanes 0, 1 */
|
||||
#define TXC_ALRGS_ATXAMP0 0xc041
|
||||
/* Amplitude on lanes 2, 3 */
|
||||
#define TXC_ALRGS_ATXAMP1 0xc042
|
||||
/* Bit position of value for lane 0 (or 2) */
|
||||
#define TXC_ATXAMP_LANE02_LBN 3
|
||||
/* Bit position of value for lane 1 (or 3) */
|
||||
#define TXC_ATXAMP_LANE13_LBN 11
|
||||
|
||||
#define TXC_ATXAMP_1280_mV 0
|
||||
#define TXC_ATXAMP_1200_mV 8
|
||||
#define TXC_ATXAMP_1120_mV 12
|
||||
#define TXC_ATXAMP_1060_mV 14
|
||||
#define TXC_ATXAMP_0820_mV 25
|
||||
#define TXC_ATXAMP_0720_mV 26
|
||||
#define TXC_ATXAMP_0580_mV 27
|
||||
#define TXC_ATXAMP_0440_mV 28
|
||||
|
||||
#define TXC_ATXAMP_0820_BOTH \
|
||||
((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
|
||||
| (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
|
||||
|
||||
#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
|
||||
|
||||
/* Preemphasis on lanes 0, 1 */
|
||||
#define TXC_ALRGS_ATXPRE0 0xc043
|
||||
/* Preemphasis on lanes 2, 3 */
|
||||
#define TXC_ALRGS_ATXPRE1 0xc044
|
||||
|
||||
#define TXC_ATXPRE_NONE 0
|
||||
#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
|
||||
|
||||
#define TXC_ALRGS_ARXCTL 0xc045
|
||||
/* Lane power-down */
|
||||
#define TXC_ARXCTL_RXPD3_LBN 15
|
||||
#define TXC_ARXCTL_RXPD2_LBN 14
|
||||
#define TXC_ARXCTL_RXPD1_LBN 13
|
||||
#define TXC_ARXCTL_RXPD0_LBN 12
|
||||
|
||||
/* Main control */
|
||||
#define TXC_MRGS_CTL 0xc340
|
||||
/* Bits in main control */
|
||||
#define TXC_MCTL_RESET_LBN 15 /* Self clear */
|
||||
#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
|
||||
#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
|
||||
|
||||
/* GPIO output */
|
||||
#define TXC_GPIO_OUTPUT 0xc346
|
||||
#define TXC_GPIO_DIR 0xc348
|
||||
|
||||
/* Vendor-specific BIST registers */
|
||||
#define TXC_BIST_CTL 0xc280
|
||||
#define TXC_BIST_TXFRMCNT 0xc281
|
||||
#define TXC_BIST_RX0FRMCNT 0xc282
|
||||
#define TXC_BIST_RX1FRMCNT 0xc283
|
||||
#define TXC_BIST_RX2FRMCNT 0xc284
|
||||
#define TXC_BIST_RX3FRMCNT 0xc285
|
||||
#define TXC_BIST_RX0ERRCNT 0xc286
|
||||
#define TXC_BIST_RX1ERRCNT 0xc287
|
||||
#define TXC_BIST_RX2ERRCNT 0xc288
|
||||
#define TXC_BIST_RX3ERRCNT 0xc289
|
||||
|
||||
/* BIST type (controls bit patter in test) */
|
||||
#define TXC_BIST_CTRL_TYPE_LBN 10
|
||||
#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
|
||||
#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
|
||||
#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
|
||||
#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
|
||||
/* Set this to 1 for 10 bit and 0 for 8 bit */
|
||||
#define TXC_BIST_CTRL_B10EN_LBN 12
|
||||
/* Enable BIST (write 0 to disable) */
|
||||
#define TXC_BIST_CTRL_ENAB_LBN 13
|
||||
/* Stop BIST (self-clears when stop complete) */
|
||||
#define TXC_BIST_CTRL_STOP_LBN 14
|
||||
/* Start BIST (cleared by writing 1 to STOP) */
|
||||
#define TXC_BIST_CTRL_STRT_LBN 15
|
||||
|
||||
/* Mt. Diablo test configuration */
|
||||
#define TXC_MTDIABLO_CTRL 0xc34f
|
||||
#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
|
||||
|
||||
struct txc43128_data {
|
||||
unsigned long bug10934_timer;
|
||||
enum efx_phy_mode phy_mode;
|
||||
enum efx_loopback_mode loopback_mode;
|
||||
};
|
||||
|
||||
/* The PHY sometimes needs a reset to bring the link back up. So long as
|
||||
* it reports link down, we reset it every 5 seconds.
|
||||
*/
|
||||
#define BUG10934_RESET_INTERVAL (5 * HZ)
|
||||
|
||||
/* Perform a reset that doesn't clear configuration changes */
|
||||
static void txc_reset_logic(struct efx_nic *efx);
|
||||
|
||||
/* Set the output value of a gpio */
|
||||
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
|
||||
}
|
||||
|
||||
/* Set up the GPIO direction register */
|
||||
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
|
||||
{
|
||||
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
|
||||
}
|
||||
|
||||
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
|
||||
* global reset (it's less clear what reset of other MMDs does).*/
|
||||
static int txc_reset_phy(struct efx_nic *efx)
|
||||
{
|
||||
int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
|
||||
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
|
||||
TXC_RESET_WAIT);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
|
||||
/* Check that all the MMDs we expect are present and responding. */
|
||||
rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Run a single BIST on one MMD */
|
||||
static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
|
||||
{
|
||||
int ctrl, bctl;
|
||||
int lane;
|
||||
int rc = 0;
|
||||
|
||||
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
|
||||
ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
|
||||
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
|
||||
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
|
||||
|
||||
/* The BIST app. note lists these as 3 distinct steps. */
|
||||
/* Set the BIST type */
|
||||
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
|
||||
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
|
||||
|
||||
/* Set the BSTEN bit in the BIST Control register to enable */
|
||||
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
|
||||
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
|
||||
|
||||
/* Set the BSTRT bit in the BIST Control register */
|
||||
efx_mdio_write(efx, mmd, TXC_BIST_CTL,
|
||||
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
|
||||
|
||||
/* Wait. */
|
||||
udelay(TXC_BIST_DURATION);
|
||||
|
||||
/* Set the BSTOP bit in the BIST Control register */
|
||||
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
|
||||
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
|
||||
|
||||
/* The STOP bit should go off when things have stopped */
|
||||
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
|
||||
bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
|
||||
|
||||
/* Check all the error counts are 0 and all the frame counts are
|
||||
non-zero */
|
||||
for (lane = 0; lane < 4; lane++) {
|
||||
int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
|
||||
if (count != 0) {
|
||||
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
|
||||
"Lane %d had %d errs\n", lane, count);
|
||||
rc = -EIO;
|
||||
}
|
||||
count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
|
||||
if (count == 0) {
|
||||
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
|
||||
"Lane %d got 0 frames\n", lane);
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc == 0)
|
||||
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
|
||||
|
||||
/* Disable BIST */
|
||||
efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
|
||||
|
||||
/* Turn off loopback */
|
||||
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
|
||||
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int txc_bist(struct efx_nic *efx)
|
||||
{
|
||||
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
|
||||
}
|
||||
|
||||
/* Push the non-configurable defaults into the PHY. This must be
|
||||
* done after every full reset */
|
||||
static void txc_apply_defaults(struct efx_nic *efx)
|
||||
{
|
||||
int mctrl;
|
||||
|
||||
/* Turn amplitude down and preemphasis off on the host side
|
||||
* (PHY<->MAC) as this is believed less likely to upset Falcon
|
||||
* and no adverse effects have been noted. It probably also
|
||||
* saves a picowatt or two */
|
||||
|
||||
/* Turn off preemphasis */
|
||||
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
|
||||
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
|
||||
|
||||
/* Turn down the amplitude */
|
||||
efx_mdio_write(efx, MDIO_MMD_PHYXS,
|
||||
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
|
||||
efx_mdio_write(efx, MDIO_MMD_PHYXS,
|
||||
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
|
||||
|
||||
/* Set the line side amplitude and preemphasis to the databook
|
||||
* defaults as an erratum causes them to be 0 on at least some
|
||||
* PHY rev.s */
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
|
||||
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
|
||||
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
|
||||
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
|
||||
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
|
||||
|
||||
/* Set up the LEDs */
|
||||
mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
|
||||
|
||||
/* Set the Green and Red LEDs to their default modes */
|
||||
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
|
||||
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
|
||||
|
||||
/* Databook recommends doing this after configuration changes */
|
||||
txc_reset_logic(efx);
|
||||
|
||||
falcon_board(efx)->type->init_phy(efx);
|
||||
}
|
||||
|
||||
static int txc43128_phy_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct txc43128_data *phy_data;
|
||||
|
||||
/* Allocate phy private storage */
|
||||
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
|
||||
if (!phy_data)
|
||||
return -ENOMEM;
|
||||
efx->phy_data = phy_data;
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
|
||||
efx->mdio.mmds = TXC_REQUIRED_DEVS;
|
||||
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
|
||||
|
||||
efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialisation entry point for this PHY driver */
|
||||
static int txc43128_phy_init(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = txc_reset_phy(efx);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = txc_bist(efx);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
txc_apply_defaults(efx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set the lane power down state in the global registers */
|
||||
static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
|
||||
{
|
||||
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
|
||||
int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
|
||||
|
||||
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
|
||||
ctl &= ~pd;
|
||||
else
|
||||
ctl |= pd;
|
||||
|
||||
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
|
||||
}
|
||||
|
||||
/* Set the lane power down state in the analog control registers */
|
||||
static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
|
||||
{
|
||||
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
|
||||
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
|
||||
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
|
||||
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
|
||||
int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
|
||||
int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
|
||||
|
||||
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
|
||||
txctl &= ~txpd;
|
||||
rxctl &= ~rxpd;
|
||||
} else {
|
||||
txctl |= txpd;
|
||||
rxctl |= rxpd;
|
||||
}
|
||||
|
||||
efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
|
||||
efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
|
||||
}
|
||||
|
||||
static void txc_set_power(struct efx_nic *efx)
|
||||
{
|
||||
/* According to the data book, all the MMDs can do low power */
|
||||
efx_mdio_set_mmds_lpower(efx,
|
||||
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
|
||||
TXC_REQUIRED_DEVS);
|
||||
|
||||
/* Global register bank is in PCS, PHY XS. These control the host
|
||||
* side and line side settings respectively. */
|
||||
txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
|
||||
txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
|
||||
|
||||
/* Analog register bank in PMA/PMD, PHY XS */
|
||||
txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
|
||||
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
|
||||
}
|
||||
|
||||
static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
|
||||
{
|
||||
int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
|
||||
int tries = 50;
|
||||
|
||||
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
|
||||
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
|
||||
while (tries--) {
|
||||
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
|
||||
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
if (!tries)
|
||||
netif_info(efx, hw, efx->net_dev,
|
||||
TXCNAME " Logic reset timed out!\n");
|
||||
}
|
||||
|
||||
/* Perform a logic reset. This preserves the configuration registers
|
||||
* and is needed for some configuration changes to take effect */
|
||||
static void txc_reset_logic(struct efx_nic *efx)
|
||||
{
|
||||
/* The data sheet claims we can do the logic reset on either the
|
||||
* PCS or the PHYXS and the result is a reset of both host- and
|
||||
* line-side logic. */
|
||||
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
|
||||
}
|
||||
|
||||
static bool txc43128_phy_read_link(struct efx_nic *efx)
|
||||
{
|
||||
return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
|
||||
}
|
||||
|
||||
static int txc43128_phy_reconfigure(struct efx_nic *efx)
|
||||
{
|
||||
struct txc43128_data *phy_data = efx->phy_data;
|
||||
enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
|
||||
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
|
||||
|
||||
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
|
||||
txc_reset_phy(efx);
|
||||
txc_apply_defaults(efx);
|
||||
falcon_reset_xaui(efx);
|
||||
mode_change &= ~PHY_MODE_TX_DISABLED;
|
||||
}
|
||||
|
||||
efx_mdio_transmit_disable(efx);
|
||||
efx_mdio_phy_reconfigure(efx);
|
||||
if (mode_change & PHY_MODE_LOW_POWER)
|
||||
txc_set_power(efx);
|
||||
|
||||
/* The data sheet claims this is required after every reconfiguration
|
||||
* (note at end of 7.1), but we mustn't do it when nothing changes as
|
||||
* it glitches the link, and reconfigure gets called on link change,
|
||||
* so we get an IRQ storm on link up. */
|
||||
if (loop_change || mode_change)
|
||||
txc_reset_logic(efx);
|
||||
|
||||
phy_data->phy_mode = efx->phy_mode;
|
||||
phy_data->loopback_mode = efx->loopback_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void txc43128_phy_fini(struct efx_nic *efx)
|
||||
{
|
||||
/* Disable link events */
|
||||
efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
|
||||
}
|
||||
|
||||
static void txc43128_phy_remove(struct efx_nic *efx)
|
||||
{
|
||||
kfree(efx->phy_data);
|
||||
efx->phy_data = NULL;
|
||||
}
|
||||
|
||||
/* Periodic callback: this exists mainly to poll link status as we
|
||||
* don't use LASI interrupts */
|
||||
static bool txc43128_phy_poll(struct efx_nic *efx)
|
||||
{
|
||||
struct txc43128_data *data = efx->phy_data;
|
||||
bool was_up = efx->link_state.up;
|
||||
|
||||
efx->link_state.up = txc43128_phy_read_link(efx);
|
||||
efx->link_state.speed = 10000;
|
||||
efx->link_state.fd = true;
|
||||
efx->link_state.fc = efx->wanted_fc;
|
||||
|
||||
if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
|
||||
data->bug10934_timer = jiffies;
|
||||
} else {
|
||||
if (time_after_eq(jiffies, (data->bug10934_timer +
|
||||
BUG10934_RESET_INTERVAL))) {
|
||||
data->bug10934_timer = jiffies;
|
||||
txc_reset_logic(efx);
|
||||
}
|
||||
}
|
||||
|
||||
return efx->link_state.up != was_up;
|
||||
}
|
||||
|
||||
static const char *const txc43128_test_names[] = {
|
||||
"bist"
|
||||
};
|
||||
|
||||
static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
|
||||
{
|
||||
if (index < ARRAY_SIZE(txc43128_test_names))
|
||||
return txc43128_test_names[index];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!(flags & ETH_TEST_FL_OFFLINE))
|
||||
return 0;
|
||||
|
||||
rc = txc_reset_phy(efx);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = txc_bist(efx);
|
||||
txc_apply_defaults(efx);
|
||||
results[0] = rc ? -1 : 1;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
mdio45_ethtool_gset(&efx->mdio, ecmd);
|
||||
}
|
||||
|
||||
const struct efx_phy_operations falcon_txc_phy_ops = {
|
||||
.probe = txc43128_phy_probe,
|
||||
.init = txc43128_phy_init,
|
||||
.reconfigure = txc43128_phy_reconfigure,
|
||||
.poll = txc43128_phy_poll,
|
||||
.fini = txc43128_phy_fini,
|
||||
.remove = txc43128_phy_remove,
|
||||
.get_settings = txc43128_get_settings,
|
||||
.set_settings = efx_mdio_set_settings,
|
||||
.test_alive = efx_mdio_test_alive,
|
||||
.run_tests = txc43128_run_tests,
|
||||
.test_name = txc43128_test_name,
|
||||
};
|
||||
255
drivers/net/ethernet/sfc/vfdi.h
Normal file
255
drivers/net/ethernet/sfc/vfdi.h
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2010-2012 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef _VFDI_H
|
||||
#define _VFDI_H
|
||||
|
||||
/**
|
||||
* DOC: Virtual Function Driver Interface
|
||||
*
|
||||
* This file contains software structures used to form a two way
|
||||
* communication channel between the VF driver and the PF driver,
|
||||
* named Virtual Function Driver Interface (VFDI).
|
||||
*
|
||||
* For the purposes of VFDI, a page is a memory region with size and
|
||||
* alignment of 4K. All addresses are DMA addresses to be used within
|
||||
* the domain of the relevant VF.
|
||||
*
|
||||
* The only hardware-defined channels for a VF driver to communicate
|
||||
* with the PF driver are the event mailboxes (%FR_CZ_USR_EV
|
||||
* registers). Writing to these registers generates an event with
|
||||
* EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
|
||||
* and USER_EV_REG_VALUE set to the value written. The PF driver may
|
||||
* direct or disable delivery of these events by setting
|
||||
* %FR_CZ_USR_EV_CFG.
|
||||
*
|
||||
* The PF driver can send arbitrary events to arbitrary event queues.
|
||||
* However, for consistency, VFDI events from the PF are defined to
|
||||
* follow the same form and be sent to the first event queue assigned
|
||||
* to the VF while that queue is enabled by the VF driver.
|
||||
*
|
||||
* The general form of the variable bits of VFDI events is:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | DATA | TYPE | SEQ |
|
||||
*
|
||||
* SEQ is a sequence number which should be incremented by 1 (modulo
|
||||
* 256) for each event. The sequence numbers used in each direction
|
||||
* are independent.
|
||||
*
|
||||
* The VF submits requests of type &struct vfdi_req by sending the
|
||||
* address of the request (ADDR) in a series of 4 events:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
|
||||
* | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
|
||||
* | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
|
||||
* | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
|
||||
*
|
||||
* The address must be page-aligned. After receiving such a valid
|
||||
* series of events, the PF driver will attempt to read the request
|
||||
* and write a response to the same address. In case of an invalid
|
||||
* sequence of events or a DMA error, there will be no response.
|
||||
*
|
||||
* The VF driver may request that the PF driver writes status
|
||||
* information into its domain asynchronously. After writing the
|
||||
* status, the PF driver will send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_STATUS | SEQ |
|
||||
*
|
||||
* In case the VF must be reset for any reason, the PF driver will
|
||||
* send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_RESET | SEQ |
|
||||
*
|
||||
* It is then the responsibility of the VF driver to request
|
||||
* reinitialisation of its queues.
|
||||
*/
|
||||
#define VFDI_EV_SEQ_LBN 24
|
||||
#define VFDI_EV_SEQ_WIDTH 8
|
||||
#define VFDI_EV_TYPE_LBN 16
|
||||
#define VFDI_EV_TYPE_WIDTH 8
|
||||
#define VFDI_EV_TYPE_REQ_WORD0 0
|
||||
#define VFDI_EV_TYPE_REQ_WORD1 1
|
||||
#define VFDI_EV_TYPE_REQ_WORD2 2
|
||||
#define VFDI_EV_TYPE_REQ_WORD3 3
|
||||
#define VFDI_EV_TYPE_STATUS 4
|
||||
#define VFDI_EV_TYPE_RESET 5
|
||||
#define VFDI_EV_DATA_LBN 0
|
||||
#define VFDI_EV_DATA_WIDTH 16
|
||||
|
||||
struct vfdi_endpoint {
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
__be16 tci;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum vfdi_op - VFDI operation enumeration
|
||||
* @VFDI_OP_RESPONSE: Indicates a response to the request.
|
||||
* @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
|
||||
* @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
|
||||
* @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
|
||||
* @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
|
||||
* finalize the SRAM entries.
|
||||
* @VFDI_OP_INSERT_FILTER: Insert a MAC filter targetting the given RXQ.
|
||||
* @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
|
||||
* @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
|
||||
* from PF and write the initial status.
|
||||
* @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
|
||||
* updates from PF.
|
||||
*/
|
||||
enum vfdi_op {
|
||||
VFDI_OP_RESPONSE = 0,
|
||||
VFDI_OP_INIT_EVQ = 1,
|
||||
VFDI_OP_INIT_RXQ = 2,
|
||||
VFDI_OP_INIT_TXQ = 3,
|
||||
VFDI_OP_FINI_ALL_QUEUES = 4,
|
||||
VFDI_OP_INSERT_FILTER = 5,
|
||||
VFDI_OP_REMOVE_ALL_FILTERS = 6,
|
||||
VFDI_OP_SET_STATUS_PAGE = 7,
|
||||
VFDI_OP_CLEAR_STATUS_PAGE = 8,
|
||||
VFDI_OP_LIMIT,
|
||||
};
|
||||
|
||||
/* Response codes for VFDI operations. Other values may be used in future. */
|
||||
#define VFDI_RC_SUCCESS 0
|
||||
#define VFDI_RC_ENOMEM (-12)
|
||||
#define VFDI_RC_EINVAL (-22)
|
||||
#define VFDI_RC_EOPNOTSUPP (-95)
|
||||
#define VFDI_RC_ETIMEDOUT (-110)
|
||||
|
||||
/**
|
||||
* struct vfdi_req - Request from VF driver to PF driver
|
||||
* @op: Operation code or response indicator, taken from &enum vfdi_op.
|
||||
* @rc: Response code. Set to 0 on success or a negative error code on failure.
|
||||
* @u.init_evq.index: Index of event queue to create.
|
||||
* @u.init_evq.buf_count: Number of 4k buffers backing event queue.
|
||||
* @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
|
||||
* address of each page backing the event queue.
|
||||
* @u.init_rxq.index: Index of receive queue to create.
|
||||
* @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
|
||||
* @u.init_rxq.evq: Instance of event queue to target receive events at.
|
||||
* @u.init_rxq.label: Label used in receive events.
|
||||
* @u.init_rxq.flags: Unused.
|
||||
* @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
|
||||
* address of each page backing the receive queue.
|
||||
* @u.init_txq.index: Index of transmit queue to create.
|
||||
* @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
|
||||
* @u.init_txq.evq: Instance of event queue to target transmit completion
|
||||
* events at.
|
||||
* @u.init_txq.label: Label used in transmit completion events.
|
||||
* @u.init_txq.flags: Checksum offload flags.
|
||||
* @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
|
||||
* address of each page backing the transmit queue.
|
||||
* @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
|
||||
* all traffic at this receive queue.
|
||||
* @u.mac_filter.flags: MAC filter flags.
|
||||
* @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
|
||||
* This address must be page-aligned and the PF may write up to a
|
||||
* whole page (allowing for extension of the structure).
|
||||
* @u.set_status_page.peer_page_count: Number of additional pages the VF
|
||||
* has provided into which peer addresses may be DMAd.
|
||||
* @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
|
||||
* If the number of peers exceeds 256, then the VF must provide
|
||||
* additional pages in this array. The PF will then DMA up to
|
||||
* 512 vfdi_endpoint structures into each page. These addresses
|
||||
* must be page-aligned.
|
||||
*/
|
||||
struct vfdi_req {
|
||||
u32 op;
|
||||
u32 reserved1;
|
||||
s32 rc;
|
||||
u32 reserved2;
|
||||
union {
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u64 addr[];
|
||||
} init_evq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_RXQ_FLAG_SCATTER_EN 1
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_rxq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
|
||||
#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_txq;
|
||||
struct {
|
||||
u32 rxq;
|
||||
u32 flags;
|
||||
#define VFDI_MAC_FILTER_FLAG_RSS 1
|
||||
#define VFDI_MAC_FILTER_FLAG_SCATTER 2
|
||||
} mac_filter;
|
||||
struct {
|
||||
u64 dma_addr;
|
||||
u64 peer_page_count;
|
||||
u64 peer_page_addr[];
|
||||
} set_status_page;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vfdi_status - Status provided by PF driver to VF driver
|
||||
* @generation_start: A generation count DMA'd to VF *before* the
|
||||
* rest of the structure.
|
||||
* @generation_end: A generation count DMA'd to VF *after* the
|
||||
* rest of the structure.
|
||||
* @version: Version of this structure; currently set to 1. Later
|
||||
* versions must either be layout-compatible or only be sent to VFs
|
||||
* that specifically request them.
|
||||
* @length: Total length of this structure including embedded tables
|
||||
* @vi_scale: log2 the number of VIs available on this VF. This quantity
|
||||
* is used by the hardware for register decoding.
|
||||
* @max_tx_channels: The maximum number of transmit queues the VF can use.
|
||||
* @rss_rxq_count: The number of receive queues present in the shared RSS
|
||||
* indirection table.
|
||||
* @peer_count: Total number of peers in the complete peer list. If larger
|
||||
* than ARRAY_SIZE(%peers), then the VF must provide sufficient
|
||||
* additional pages each of which is filled with vfdi_endpoint structures.
|
||||
* @local: The MAC address and outer VLAN tag of *this* VF
|
||||
* @peers: Table of peer addresses. The @tci fields in these structures
|
||||
* are currently unused and must be ignored. Additional peers are
|
||||
* written into any additional pages provided by the VF.
|
||||
* @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
|
||||
* for interrupt moderation timers, in nanoseconds. This member is only
|
||||
* present if @length is sufficiently large.
|
||||
*/
|
||||
struct vfdi_status {
|
||||
u32 generation_start;
|
||||
u32 generation_end;
|
||||
u32 version;
|
||||
u32 length;
|
||||
u8 vi_scale;
|
||||
u8 max_tx_channels;
|
||||
u8 rss_rxq_count;
|
||||
u8 reserved1;
|
||||
u16 peer_count;
|
||||
u16 reserved2;
|
||||
struct vfdi_endpoint local;
|
||||
struct vfdi_endpoint peers[256];
|
||||
|
||||
/* Members below here extend version 1 of this structure */
|
||||
u32 timer_quantum_ns;
|
||||
};
|
||||
|
||||
#endif
|
||||
53
drivers/net/ethernet/sfc/workarounds.h
Normal file
53
drivers/net/ethernet/sfc/workarounds.h
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_WORKAROUNDS_H
|
||||
#define EFX_WORKAROUNDS_H
|
||||
|
||||
/*
|
||||
* Hardware workarounds.
|
||||
* Bug numbers are from Solarflare's Bugzilla.
|
||||
*/
|
||||
|
||||
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
|
||||
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
|
||||
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
|
||||
#define EFX_WORKAROUND_10G(efx) 1
|
||||
|
||||
/* Bit-bashed I2C reads cause performance drop */
|
||||
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
|
||||
/* Truncated IPv4 packets can confuse the TX packet parser */
|
||||
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
|
||||
/* Legacy interrupt storm when interrupt fifo fills */
|
||||
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
||||
|
||||
/* Spurious parity errors in TSORT buffers */
|
||||
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
|
||||
/* Unaligned read request >512 bytes after aligning may break TSORT */
|
||||
#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
|
||||
/* iSCSI parsing errors */
|
||||
#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
|
||||
/* RX events go missing */
|
||||
#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
|
||||
/* RX_RESET on A1 */
|
||||
#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
|
||||
/* Increase filter depth to avoid RX_RESET */
|
||||
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
|
||||
/* Flushes may never complete */
|
||||
#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
|
||||
/* Leak overlength packets rather than free */
|
||||
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
|
||||
|
||||
/* Lockup when writing event block registers at gen2/gen3 */
|
||||
#define EFX_EF10_WORKAROUND_35388(efx) \
|
||||
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
|
||||
#define EFX_WORKAROUND_35388(efx) \
|
||||
(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
|
||||
|
||||
#endif /* EFX_WORKAROUNDS_H */
|
||||
Loading…
Add table
Add a link
Reference in a new issue