Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,345 @@
#
# Intel network device configuration
#
config NET_VENDOR_INTEL
bool "Intel devices"
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Intel cards. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_INTEL
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
select MII
---help---
This driver supports Intel(R) PRO/100 family of adapters.
To verify that your adapter is supported, find the board ID number
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
Use the above information and the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
<http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
To compile this driver as a module, choose M here. The module
will be called e100.
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called e1000.
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
select PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
identify your adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called e1000e.
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
select PTP_1588_CLOCK
select I2C
select I2C_ALGOBIT
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igb.
config IGB_HWMON
bool "Intel(R) PCI-Express Gigabit adapters HWMON support"
default y
depends on IGB && HWMON && !(IGB=y && HWMON=m)
---help---
Say Y if you want to expose thermal sensor data on Intel devices.
Some of our devices contain thermal sensors, both external and internal.
This data is available via the hwmon sysfs interface and exposes
the onboard sensors.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
default y
depends on IGB && DCA && !(IGB=y && DCA=m)
---help---
Say Y here if you want to use Direct Cache Access (DCA) in the
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
---help---
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/ixgb.txt>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called ixgbe.
config IXGBE_HWMON
bool "Intel(R) 10GbE PCI Express adapters HWMON support"
default y
depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m)
---help---
Say Y if you want to expose the thermal sensor data on some of
our cards, via a hwmon sysfs interface.
config IXGBE_DCA
bool "Direct Cache Access (DCA) Support"
default y
depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
---help---
Say Y here if you want to use Direct Cache Access (DCA) in the
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
config IXGBE_DCB
bool "Data Center Bridging (DCB) Support"
default n
depends on IXGBE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the
driver.
If unsure, say N.
config IXGBEVF
tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
This driver supports Intel(R) PCI Express virtual functions for the
Intel(R) ixgbe driver. For more information on how to identify your
adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/ixgbevf.txt>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
select PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called i40e.
config I40E_VXLAN
bool "Virtual eXtensible Local Area Network Support"
default n
depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
---help---
This allows one to create VXLAN virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
to tunnel virtual network infrastructure in virtualized environments.
Say Y here if you want to use Virtual eXtensible Local Area Network
(VXLAN) in the driver.
config I40E_DCB
bool "Data Center Bridging (DCB) Support"
default n
depends on I40E && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the
driver.
If unsure, say N.
config I40E_FCOE
bool "Fibre Channel over Ethernet (FCoE)"
default n
depends on I40E && DCB && FCOE
---help---
Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
in the driver. This will create new netdev for exclusive FCoE
use with XL710 FCoE offloads enabled.
If unsure, say N.
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
---help---
This driver supports Intel(R) XL710 and X710 virtual functions.
For more information on how to identify your adapter, go to the
Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called i40evf. MSI-X interrupt support is required
for this driver to work correctly.
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
select PTP_1588_CLOCK
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
config FM10K_VXLAN
bool "Virtual eXtensible Local Area Network Support"
default n
depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
---help---
This allows one to create VXLAN virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
to tunnel virtual network infrastructure in virtualized environments.
Say Y here if you want to use Virtual eXtensible Local Area Network
(VXLAN) in the driver.
endif # NET_VENDOR_INTEL

View file

@ -0,0 +1,15 @@
#
# Makefile for the Intel network device drivers.
#
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_I40EVF) += i40evf/
obj-$(CONFIG_FM10K) += fm10k/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,35 @@
################################################################################
#
# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2006 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
obj-$(CONFIG_E1000) += e1000.o
e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o

View file

@ -0,0 +1,374 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _E1000_H_
#define _E1000_H_
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/pkt_sched.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <net/checksum.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#define BAR_0 0
#define BAR_1 1
#define BAR_5 5
#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
struct e1000_adapter;
#include "e1000_hw.h"
#define E1000_MAX_INTR 10
/*
* Count for polling __E1000_RESET condition every 10-20msec.
*/
#define E1000_CHECK_RESET_COUNT 50
/* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256
#define E1000_MIN_TXD 48
#define E1000_MAX_82544_TXD 4096
#define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 256
#define E1000_MIN_RXD 48
#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_512 512
#define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
#define E1000_SMARTSPEED_DOWNSHIFT 3
#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
#define E1000_PBA_BYTES_SHIFT 0xA
#define E1000_TX_HEAD_ADDR_SHIFT 7
#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
bool mapped_as_page;
unsigned short segs;
unsigned int bytecount;
};
struct e1000_rx_buffer {
union {
struct page *page; /* jumbo: alloc_page */
u8 *data; /* else, netdev_alloc_frag */
} rxbuf;
dma_addr_t dma;
};
struct e1000_tx_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
/* number of descriptors in the ring */
unsigned int count;
/* next descriptor to associate a buffer with */
unsigned int next_to_use;
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_tx_buffer *buffer_info;
u16 tdh;
u16 tdt;
bool last_tx_tso;
};
struct e1000_rx_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
/* number of descriptors in the ring */
unsigned int count;
/* next descriptor to associate a buffer with */
unsigned int next_to_use;
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_rx_buffer *buffer_info;
struct sk_buff *rx_skb_top;
/* cpu for rx queue */
int cpu;
u16 rdh;
u16 rdt;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) \
? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
/* board specific private data structure */
struct e1000_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 mng_vlan_id;
u32 bd_number;
u32 rx_buffer_len;
u32 wol;
u32 smartspeed;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Interrupt Throttle Rate */
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
u8 fc_autoneg;
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
u32 gotcl;
u64 gotcl_old;
u64 tpt_old;
u64 colc_old;
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u8 tx_timeout_factor;
atomic_t tx_fifo_stall;
bool pcix_82544;
bool detect_tx_hung;
bool dump_buffers;
/* RX */
bool (*clean_rx)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
void (*alloc_rx_buf)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */
struct napi_struct napi;
int num_tx_queues;
int num_rx_queues;
u64 hw_csum_err;
u64 hw_csum_good;
u32 alloc_rx_buff_failed;
u32 rx_int_delay;
u32 rx_abs_int_delay;
bool rx_csum;
u32 gorcl;
u64 gorcl_old;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
u32 test_icr;
struct e1000_tx_ring test_tx_ring;
struct e1000_rx_ring test_rx_ring;
int msg_enable;
/* to not mess up cache alignment, always add to the bottom */
bool tso_force;
bool smart_power_down; /* phy smart power down */
bool quad_port_a;
unsigned long flags;
u32 eeprom_wol;
/* for ioport free */
int bars;
int need_ioport;
bool discarding;
struct work_struct reset_task;
struct delayed_work watchdog_task;
struct delayed_work fifo_stall_task;
struct delayed_work phy_info_task;
};
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
__E1000_DOWN
};
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_warn(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_notice(msglvl, format, arg...) \
netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
dev_warn(&adapter->pdev->dev, format, ## arg)
#define e_dev_err(format, arg...) \
dev_err(&adapter->pdev->dev, format, ## arg)
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
void e1000_update_stats(struct e1000_adapter *adapter);
bool e1000_has_link(struct e1000_adapter *adapter);
void e1000_power_up_phy(struct e1000_adapter *);
void e1000_set_ethtool_ops(struct net_device *netdev);
void e1000_check_options(struct e1000_adapter *adapter);
char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,109 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* glue for the OS independent part of e1000
* includes register access macros
*/
#ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_
#include <asm/io.h>
#define CONFIG_RAM_BASE 0x60000
#define GBE_CONFIG_OFFSET 0x0
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
#define GBE_CONFIG_BASE_VIRT \
((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
(ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
#define ew32(reg, value) \
(writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg))))
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2))))
#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
readl((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2)))
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
writew((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1))))
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
readw((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1)))
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
writeb((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset))))
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
readb((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH() er32(STATUS)
#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))
#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
readl((a)->flash_address + reg))
#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
writew((value), ((a)->flash_address + reg)))
#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
readw((a)->flash_address + reg))
#endif /* _E1000_OSDEP_H_ */

View file

@ -0,0 +1,754 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
*
* Valid Range: 0, 10, 100, 1000
* - 0 - auto-negotiate at all supported speeds
* - 10 - only link at 10 Mbps
* - 100 - only link at 100 Mbps
* - 1000 - only link at 1000 Mbps
*
* Default Value: 0
*/
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
*
* Valid Range: 0-2
* - 0 - auto-negotiate for duplex
* - 1 - only link at half duplex
* - 2 - only link at full duplex
*
* Default Value: 0
*/
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
*
* Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
*
* The AutoNeg value is a bit mask describing which speed and duplex
* combinations should be advertised during auto-negotiation.
* The supported speed and duplex modes are listed below
*
* Bit 7 6 5 4 3 2 1 0
* Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
* Duplex Full Full Half Full Half
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82543 and newer -based NICs
*
* Default Value: 1
*/
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define DEFAULT_RDTR 0
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define DEFAULT_RADV 8
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
e_dev_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
static void e1000_check_fiber_options(struct e1000_adapter *adapter);
static void e1000_check_copper_options(struct e1000_adapter *adapter);
/**
* e1000_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
e_dev_warn("Warning: no configuration for board #%i "
"using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
struct e1000_tx_ring *tx_ring = adapter->tx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_TXD),
.def = E1000_DEFAULT_TXD,
.arg = { .r = {
.min = E1000_MIN_TXD,
.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
}}
};
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
e1000_validate_option(&tx_ring->count, &opt, adapter);
tx_ring->count = ALIGN(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
struct e1000_rx_ring *rx_ring = adapter->rx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_RXD),
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD
}}
};
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
e1000_validate_option(&rx_ring->count, &opt, adapter);
rx_ring->count = ALIGN(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
opt = (struct e1000_option) {
.type = enable_option,
.name = "Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
e1000_validate_option(&rx_csum, &opt, adapter);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
static const struct e1000_opt_list fc_list[] = {
{ E1000_FC_NONE, "Flow Control Disabled" },
{ E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
{ E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
{ E1000_FC_FULL, "Flow Control Enabled" },
{ E1000_FC_DEFAULT, "Flow Control Hardware Default" }
};
opt = (struct e1000_option) {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = E1000_FC_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
}
{ /* Transmit Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY }}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY }}
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
{ /* Receive Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY }}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Receive Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY }}
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
{ /* Interrupt Throttling Rate */
opt = (struct e1000_option) {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR }}
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
e_dev_info("%s turned off\n", opt.name);
break;
case 1:
e_dev_info("%s set to dynamic mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
e_dev_info("%s set to dynamic conservative "
"mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 4:
e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
* used as control
*/
adapter->itr_setting = adapter->itr & ~3;
break;
}
} else {
adapter->itr_setting = opt.def;
adapter->itr = 20000;
}
}
{ /* Smart Power Down */
opt = (struct e1000_option) {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
adapter->smart_power_down = spd;
} else {
adapter->smart_power_down = opt.def;
}
}
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
e1000_check_fiber_options(adapter);
break;
case e1000_media_type_copper:
e1000_check_copper_options(adapter);
break;
default:
BUG();
}
}
/**
* e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
* @adapter: board private structure
*
* Handles speed and duplex options on fiber adapters
**/
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
e_dev_info("Speed not valid for fiber adapters, parameter "
"ignored\n");
}
if (num_Duplex > bd) {
e_dev_info("Duplex not valid for fiber adapters, parameter "
"ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
"adapters, parameter ignored\n");
}
}
/**
* e1000_check_copper_options - Range Checking for Link Options, Copper Version
* @adapter: board private structure
*
* Handles speed and duplex options on copper adapters
**/
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
static const struct e1000_opt_list speed_list[] = {
{ 0, "" },
{ SPEED_10, "" },
{ SPEED_100, "" },
{ SPEED_1000, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Speed",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(speed_list),
.p = speed_list }}
};
if (num_Speed > bd) {
speed = Speed[bd];
e1000_validate_option(&speed, &opt, adapter);
} else {
speed = opt.def;
}
}
{ /* Duplex */
static const struct e1000_opt_list dplx_list[] = {
{ 0, "" },
{ HALF_DUPLEX, "" },
{ FULL_DUPLEX, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Duplex",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
.p = dplx_list }}
};
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
} else {
dplx = opt.def;
}
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
e_dev_info("AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
#define AA "AutoNeg advertising "
{{ 0x01, AA "10/HD" },
{ 0x02, AA "10/FD" },
{ 0x03, AA "10/FD, 10/HD" },
{ 0x04, AA "100/HD" },
{ 0x05, AA "100/HD, 10/HD" },
{ 0x06, AA "100/HD, 10/FD" },
{ 0x07, AA "100/HD, 10/FD, 10/HD" },
{ 0x08, AA "100/FD" },
{ 0x09, AA "100/FD, 10/HD" },
{ 0x0a, AA "100/FD, 10/FD" },
{ 0x0b, AA "100/FD, 10/FD, 10/HD" },
{ 0x0c, AA "100/FD, 100/HD" },
{ 0x0d, AA "100/FD, 100/HD, 10/HD" },
{ 0x0e, AA "100/FD, 100/HD, 10/FD" },
{ 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
{ 0x20, AA "1000/FD" },
{ 0x21, AA "1000/FD, 10/HD" },
{ 0x22, AA "1000/FD, 10/FD" },
{ 0x23, AA "1000/FD, 10/FD, 10/HD" },
{ 0x24, AA "1000/FD, 100/HD" },
{ 0x25, AA "1000/FD, 100/HD, 10/HD" },
{ 0x26, AA "1000/FD, 100/HD, 10/FD" },
{ 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
{ 0x28, AA "1000/FD, 100/FD" },
{ 0x29, AA "1000/FD, 100/FD, 10/HD" },
{ 0x2a, AA "1000/FD, 100/FD, 10/FD" },
{ 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
{ 0x2c, AA "1000/FD, 100/FD, 100/HD" },
{ 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
{ 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
{ 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "AutoNeg",
.err = "parameter ignored",
.def = AUTONEG_ADV_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(an_list),
.p = an_list }}
};
if (num_AutoNeg > bd) {
an = AutoNeg[bd];
e1000_validate_option(&an, &opt, adapter);
} else {
an = opt.def;
}
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
e_dev_info("Speed and duplex autonegotiation "
"enabled\n");
break;
case HALF_DUPLEX:
e_dev_info("Half Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
e_dev_info("100 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
"only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
default:
BUG();
}
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
"Setting MDI-X to a compatible value.\n");
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,88 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
/* PHY Specific Control Register 2 (Page 0, Register 26) */
#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
/* MAC Specific Control Register (Page 2, Register 21) */
/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
#define GG82563_MSCR_TX_CLK_MASK 0x0007
#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
/* DSP Distance Register (Page 5, Register 26)
* 0 = <50M
* 1 = 50-80M
* 2 = 80-100M
* 3 = 110-140M
* 4 = >140M
*/
#define GG82563_DSPD_CABLE_LENGTH 0x0007
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
/* Max number of times Kumeran read/write should be validated */
#define GG82563_MAX_KMRN_RETRY 0x5
/* Power Management Control Register (Page 193, Register 20) */
/* 1=Enable SERDES Electrical Idle */
#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,53 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
#define ID_LED_RESERVED_F746 0xF746
#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_ON2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
/* Intr Throttling - RW */
#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
#define E1000_IVAR_INT_ALLOC_VALID 0x8
/* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
#define E1000_RECEIVE_ERROR_MAX 0xFFFF
bool e1000_check_phy_82574(struct e1000_hw *hw);
bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
#endif

View file

@ -0,0 +1,37 @@
################################################################################
#
# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
mac.o manage.o nvm.o phy.o \
param.o ethtool.o netdev.o ptp.o

View file

@ -0,0 +1,799 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
#define E1000_WUC_APME 0x00000001 /* APM Enable */
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Wake Up Status */
#define E1000_WUS_LNKC E1000_WUFC_LNKC
#define E1000_WUS_MAG E1000_WUFC_MAG
#define E1000_WUS_EX E1000_WUFC_EX
#define E1000_WUS_MC E1000_WUFC_MC
#define E1000_WUS_BC E1000_WUFC_BC
/* Extended Device Control */
#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
/* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
E1000_RXD_ERR_CE | \
E1000_RXD_ERR_SE | \
E1000_RXD_ERR_SEQ | \
E1000_RXD_ERR_CXE | \
E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
* ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
* E1000_PSRCTL_BSIZE1_MASK) |
* ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
* E1000_PSRCTL_BSIZE2_MASK) |
* ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
* E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SWFW_SYNC Definitions */
#define E1000_SWFW_EEP_SM 0x1
#define E1000_SWFW_PHY0_SM 0x2
#define E1000_SWFW_PHY1_SM 0x4
#define E1000_SWFW_CSR_SM 0x8
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
#define E1000_STATUS_SPEED_MASK 0x000000C0
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
#define E1000_ALL_SPEED_DUPLEX ( \
ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
#define E1000_ALL_NOT_GIG ( \
ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
ADVERTISE_100_FULL)
#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
#define E1000_PHY_LED0_MODE_MASK 0x00000007
#define E1000_PHY_LED0_IVRT 0x00000008
#define E1000_PHY_LED0_MASK 0x0000001F
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_MODE_LINK_UP 0x2
#define E1000_LEDCTL_MODE_LED_ON 0xE
#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Transmit Descriptor bit definitions */
#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
#define E1000_RFCTL_NFSW_DIS 0x00000040
#define E1000_RFCTL_NFSR_DIS 0x00000080
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15
#define E1000_CT_SHIFT 4
#define E1000_COLLISION_DISTANCE 63
#define E1000_COLD_SHIFT 12
/* Default values for the transmit IPG register */
#define DEFAULT_82543_TIPG_IPGT_COPPER 8
#define E1000_TIPG_IPGT_MASK 0x000003FF
#define DEFAULT_82543_TIPG_IPGR1 8
#define E1000_TIPG_IPGR1_SHIFT 10
#define DEFAULT_82543_TIPG_IPGR2 6
#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
#define E1000_TIPG_IPGR2_SHIFT 20
#define MAX_JUMBO_FRAME_SIZE 0x3F00
#define E1000_TX_PTR_GAP 0x1F
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* Low Power IDLE Control */
#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
#define E1000_PBA_RXA_MASK 0xFFFF
#define E1000_PBS_16K E1000_PBA_16K
/* Uncorrectable/correctable ECC Error counts and enable bits */
#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
#define IFS_STEP 10
#define MIN_NUM_XMITS 1000
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
* o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
* o RXSEQ = Receive Sequence Error
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
E1000_IMS_RXT0 | \
E1000_IMS_TXDW | \
E1000_IMS_RXDMT0 | \
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
/* Interrupt Cause Set */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
/* Enable the counting of desc. still to be processed. */
#define E1000_TXDCTL_COUNT_DESC 0x00400000
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* 802.1q VLAN Packet Size */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
#define E1000_ERR_PARAM 4
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_PHY_TYPE 6
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_INVALID_ARGUMENT 16
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 100
/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
#define MDIO_OWNERSHIP_TIMEOUT 10
/* Number of milliseconds for NVM auto read done after MAC reset. */
#define AUTO_READ_DONE_TIMEOUT 10
/* Flow Control */
#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
#define E1000_TIMINCA_INCPERIOD_SHIFT 24
#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
#define E1000_GCR_TXD_NO_SNOOP 0x00000008
#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
E1000_GCR_RXDSCW_NO_SNOOP | \
E1000_GCR_RXDSCR_NO_SNOOP | \
E1000_GCR_TXD_NO_SNOOP | \
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
#define E1000_EECD_DI 0x00000004 /* NVM Data In */
#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
#define E1000_EECD_PRES 0x00000100 /* NVM Present */
#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
/* NVM Addressing bits based on type (0-small, 1-large) */
#define E1000_EECD_ADDR_BITS 0x00000400
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_FUTURE_INIT_WORD1 0x0019
#define NVM_COMPAT_VALID_CSUM 0x0001
#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_CFG 0x0012
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
#define NVM_WORD0F_PAUSE 0x1000
#define NVM_WORD0F_ASM_DIR 0x2000
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
/* Mask bits for fields in Word 0x03 of the EEPROM */
#define NVM_COMPAT_LOM 0x0800
/* length of string needed to store PBA number */
#define E1000_PBANUM_LENGTH 11
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - SPI */
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
/* SPI NVM Status Register */
#define NVM_STATUS_RDY_SPI 0x01
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
#define ID_LED_ON1_DEF2 0x4
#define ID_LED_ON1_ON2 0x5
#define ID_LED_ON1_OFF2 0x6
#define ID_LED_OFF1_DEF2 0x7
#define ID_LED_OFF1_ON2 0x8
#define ID_LED_OFF1_OFF2 0x9
#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
#define IGP_ACTIVITY_LED_ENABLE 0x0300
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
#define PCIE_LINK_STATUS 0x12
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
#define PCIE_LINK_WIDTH_MASK 0x3F0
#define PCIE_LINK_WIDTH_SHIFT 4
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs.
* I = Integrated
* E = External
*/
#define M88E1000_E_PHY_ID 0x01410C50
#define M88E1000_I_PHY_ID 0x01410C30
#define M88E1011_I_PHY_ID 0x01410C20
#define IGP01E1000_I_PHY_ID 0x02A80380
#define M88E1111_I_PHY_ID 0x01410CC0
#define GG82563_E_PHY_ID 0x01410CA0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define IFE_E_PHY_ID 0x02A80330
#define IFE_PLUS_E_PHY_ID 0x02A80320
#define IFE_C_E_PHY_ID 0x02A80310
#define BME1000_E_PHY_ID 0x01410CB0
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
#define I82579_E_PHY_ID 0x01540090
#define I217_E_PHY_ID 0x015400A0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
/* M88E1000 PHY Specific Control Register */
#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
#define M88E1000_PSSR_CABLE_LENGTH 0x0380
#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
/* Bits...
* 15-5: page
* 4-0: register offset
*/
#define GG82563_PAGE_SHIFT 5
#define GG82563_REG(page, reg) \
(((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
#define GG82563_MIN_ALT_REG 30
/* GG82563 Specific Registers */
#define GG82563_PHY_SPEC_CTRL \
GG82563_REG(0, 16) /* PHY Specific Control */
#define GG82563_PHY_PAGE_SELECT \
GG82563_REG(0, 22) /* Page Select */
#define GG82563_PHY_SPEC_CTRL_2 \
GG82563_REG(0, 26) /* PHY Specific Control 2 */
#define GG82563_PHY_PAGE_SELECT_ALT \
GG82563_REG(0, 29) /* Alternate Page Select */
#define GG82563_PHY_MAC_SPEC_CTRL \
GG82563_REG(2, 21) /* MAC Specific Control Register */
#define GG82563_PHY_DSP_DISTANCE \
GG82563_REG(5, 26) /* DSP Distance */
/* Page 193 - Port Control Registers */
#define GG82563_PHY_KMRN_MODE_CTRL \
GG82563_REG(193, 16) /* Kumeran Mode Control */
#define GG82563_PHY_PWR_MGMT_CTRL \
GG82563_REG(193, 20) /* Power Management Control */
/* Page 194 - KMRN Registers */
#define GG82563_PHY_INBAND_CTRL \
GG82563_REG(194, 18) /* Inband Control */
/* MDI Control */
#define E1000_MDIC_REG_MASK 0x001F0000
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_SHIFT 21
#define E1000_MDIC_OP_WRITE 0x04000000
#define E1000_MDIC_OP_READ 0x08000000
#define E1000_MDIC_READY 0x10000000
#define E1000_MDIC_ERROR 0x40000000
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
#endif /* _E1000_DEFINES_H_ */

View file

@ -0,0 +1,591 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _E1000_H_
#define _E1000_H_
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/mii.h>
#include <linux/mdio.h>
#include "hw.h"
struct e1000_info;
#define e_dbg(format, arg...) \
netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
#define E1000E_INT_MODE_MSIX 2
/* Tx/Rx descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 4096
#define E1000_MIN_TXD 64
#define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 4096
#define E1000_MIN_RXD 64
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
#define E1000_MNG_VLAN_NONE (-1)
#define DEFAULT_JUMBO 9234
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
/* Count for polling __E1000_RESET condition every 10-20msec.
* Experimentation has shown the reset can take approximately 210msec.
*/
#define E1000_CHECK_RESET_COUNT 25
#define DEFAULT_RDTR 0
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, so a setting of 5 gives the most efficient bus
* utilization but to avoid possible Tx stalls, set it to 1
*/
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
(1 << 16) | /* wthresh must be +1 more than desired */\
(1 << 8) | /* hthresh */ \
0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
(4 << 16) | /* set writeback threshold */ \
(4 << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
#define E1000_TIDV_FPD (1 << 31)
#define E1000_RDTR_FPD (1 << 31)
enum e1000_boards {
board_82571,
board_82572,
board_82573,
board_82574,
board_82583,
board_80003es2lan,
board_ich8lan,
board_ich9lan,
board_ich10lan,
board_pchlan,
board_pch2lan,
board_pch_lpt,
};
struct e1000_ps_page {
struct page *page;
u64 dma; /* must be u64 - written to hw */
};
/* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
dma_addr_t dma;
struct sk_buff *skb;
union {
/* Tx */
struct {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
unsigned int segs;
unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
struct {
/* arrays of page information for packet split */
struct e1000_ps_page *ps_pages;
struct page *page;
};
};
};
struct e1000_ring {
struct e1000_adapter *adapter; /* back pointer to adapter */
void *desc; /* pointer to ring memory */
dma_addr_t dma; /* phys address of ring */
unsigned int size; /* length of ring in bytes */
unsigned int count; /* number of desc. in ring */
u16 next_to_use;
u16 next_to_clean;
void __iomem *head;
void __iomem *tail;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
char name[IFNAMSIZ + 5];
u32 ims_val;
u32 itr_val;
void __iomem *itr_register;
int set_itr;
struct sk_buff *rx_skb_top;
};
/* PHY register snapshot values */
struct e1000_phy_regs {
u16 bmcr; /* basic mode control register */
u16 bmsr; /* basic mode status register */
u16 advertise; /* auto-negotiation advertisement */
u16 lpa; /* link partner ability register */
u16 expansion; /* auto-negotiation expansion reg */
u16 ctrl1000; /* 1000BASE-T control register */
u16 stat1000; /* 1000BASE-T status register */
u16 estatus; /* extended status register */
};
/* board specific private data structure */
struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
const struct e1000_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
u16 eeprom_vers;
/* track device up/down/testing state */
unsigned long state;
/* Interrupt Throttle Rate */
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
/* Tx - one ring per active queue */
struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;
unsigned int uncorr_errors; /* uncorrectable ECC errors */
unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;
bool detect_tx_hung;
bool tx_hang_recheck;
u8 tx_timeout_factor;
u32 tx_int_delay;
u32 tx_abs_int_delay;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Tx stats */
u64 tpt_old;
u64 colc_old;
u32 gotc;
u64 gotc_old;
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
u32 tx_hwtstamp_timeouts;
/* Rx */
bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
int work_to_do) ____cacheline_aligned_in_smp;
void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
u32 rx_abs_int_delay;
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 gorc;
u64 gorc_old;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 rx_hwtstamp_cleared;
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
spinlock_t stats64_lock; /* protects statistics counters */
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
/* Snapshot of PHY registers */
struct e1000_phy_regs phy_regs;
struct e1000_ring test_tx_ring;
struct e1000_ring test_rx_ring;
u32 test_icr;
u32 msg_enable;
unsigned int num_vectors;
struct msix_entry *msix_entries;
int int_mode;
u32 eiac_mask;
u32 eeprom_wol;
u32 wol;
u32 pba;
u32 max_hw_frame_size;
bool fc_autoneg;
unsigned int flags;
unsigned int flags2;
struct work_struct downshift_task;
struct work_struct update_phy_task;
struct work_struct print_hang_task;
int phy_hang_count;
u16 tx_ring_count;
u16 rx_ring_count;
struct hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
struct work_struct tx_hwtstamp_work;
spinlock_t systim_lock; /* protects SYSTIML/H regsters */
struct cyclecounter cc;
struct timecounter tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
u16 eee_advert;
};
struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
unsigned int flags2;
u32 pba;
u32 max_hw_frame_size;
s32 (*get_variants)(struct e1000_adapter *);
const struct e1000_mac_operations *mac_ops;
const struct e1000_phy_operations *phy_ops;
const struct e1000_nvm_operations *nvm_ops;
};
s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
/* The system time is maintained by a 64-bit counter comprised of the 32-bit
* SYSTIMH and SYSTIML registers. How the counter increments (and therefore
* its resolution) is based on the contents of the TIMINCA register - it
* increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0).
* For the best accuracy, the incperiod should be as small as possible. The
* incvalue is scaled by a factor as large as possible (while still fitting
* in bits 23:0) so that relatively small clock corrections can be made.
*
* As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
*/
#define INCVALUE_96MHz 125
#define INCVALUE_SHIFT_96MHz 17
#define INCPERIOD_SHIFT_96MHz 2
#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
#define INCVALUE_25MHz 40
#define INCVALUE_SHIFT_25MHz 18
#define INCPERIOD_25MHz 1
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
* by simply reading the clock before it overflows.
*
* Clock ns bits Overflows after
* ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
* 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs
* 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
*/
#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
#define E1000_MAX_82574_SYSTIM_REREADS 50
#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
#define FLAG_HAS_FLASH (1 << 1)
#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
#define FLAG_HAS_WOL (1 << 3)
/* reserved bit4 */
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
#define FLAG_READ_ONLY_NVM (1 << 8)
#define FLAG_IS_ICH (1 << 9)
#define FLAG_HAS_MSIX (1 << 10)
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
#define FLAG_NO_WAKE_UCAST (1 << 19)
#define FLAG_MNG_PT_ENABLED (1 << 20)
#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
#define FLAG_RX_NEEDS_RESTART (1 << 24)
#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
#define FLAG_SMART_POWER_DOWN (1 << 26)
#define FLAG_MSI_ENABLED (1 << 27)
/* reserved (1 << 28) */
#define FLAG_TSO_FORCE (1 << 29)
#define FLAG_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
#define FLAG2_HAS_PHY_STATS (1 << 4)
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
__E1000_ACCESS_SHARED_RESOURCE,
__E1000_DOWN
};
enum latency_range {
lowest_latency = 0,
low_latency = 1,
bulk_latency = 2,
latency_invalid = 255
};
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
int e1000e_up(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
void e1000e_reset(struct e1000_adapter *adapter);
void e1000e_power_up_phy(struct e1000_adapter *adapter);
int e1000e_setup_rx_resources(struct e1000_ring *ring);
int e1000e_setup_tx_resources(struct e1000_ring *ring);
void e1000e_free_rx_resources(struct e1000_ring *ring);
void e1000e_free_tx_resources(struct e1000_ring *ring);
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_get_hw_control(struct e1000_adapter *adapter);
void e1000e_release_hw_control(struct e1000_adapter *adapter);
void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
extern const struct e1000_info e1000_82571_info;
extern const struct e1000_info e1000_82572_info;
extern const struct e1000_info e1000_82573_info;
extern const struct e1000_info e1000_82574_info;
extern const struct e1000_info e1000_82583_info;
extern const struct e1000_info e1000_ich8_info;
extern const struct e1000_info e1000_ich9_info;
extern const struct e1000_info e1000_ich10_info;
extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
}
static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
{
return hw->phy.ops.read_reg(hw, offset, data);
}
static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
{
return hw->phy.ops.read_reg_locked(hw, offset, data);
}
static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
{
return hw->phy.ops.write_reg(hw, offset, data);
}
static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
{
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
void e1000e_reload_nvm_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
if (hw->mac.ops.read_mac_addr)
return hw->mac.ops.read_mac_addr(hw);
return e1000_read_mac_addr_generic(hw);
}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
return hw->nvm.ops.validate(hw);
}
static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
{
return hw->nvm.ops.update(hw);
}
static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
return hw->nvm.ops.read(hw, offset, words, data);
}
static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
return hw->nvm.ops.write(hw, offset, words, data);
}
static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
{
return hw->phy.ops.get_info(hw);
}
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
}
#define er32(reg) __er32(hw, E1000_##reg)
s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
#define e1e_flush() er32(STATUS)
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
(__ew32((a), (reg + ((offset) << 2)), (value)))
#define E1000_READ_REG_ARRAY(a, reg, offset) \
(readl((a)->hw_addr + reg + ((offset) << 2)))
#endif /* _E1000_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,692 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
#include "regs.h"
#include "defines.h"
struct e1000_hw;
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
#define E1000_DEV_ID_82572EI 0x10B9
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
#define E1000_DEV_ID_ICH8_IFE 0x104C
#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
#define E1000_DEV_ID_ICH8_IGP_M 0x104D
#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
#define E1000_DEV_ID_ICH9_BM 0x10E5
#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
#define E1000_DEV_ID_ICH9_IGP_C 0x294C
#define E1000_DEV_ID_ICH9_IFE 0x10C0
#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
#define E1000_DEV_ID_PCH2_LV_LM 0x1502
#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_REVISION_4 4
#define E1000_FUNC_1 1
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_82571,
e1000_82572,
e1000_82573,
e1000_82574,
e1000_82583,
e1000_80003es2lan,
e1000_ich8lan,
e1000_ich9lan,
e1000_ich10lan,
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
};
enum e1000_media_type {
e1000_media_type_unknown = 0,
e1000_media_type_copper = 1,
e1000_media_type_fiber = 2,
e1000_media_type_internal_serdes = 3,
e1000_num_media_types
};
enum e1000_nvm_type {
e1000_nvm_unknown = 0,
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
e1000_nvm_flash_sw
};
enum e1000_nvm_override {
e1000_nvm_override_none = 0,
e1000_nvm_override_spi_small,
e1000_nvm_override_spi_large
};
enum e1000_phy_type {
e1000_phy_unknown = 0,
e1000_phy_none,
e1000_phy_m88,
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_gg82563,
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
e1000_phy_82579,
e1000_phy_i217,
};
enum e1000_bus_width {
e1000_bus_width_unknown = 0,
e1000_bus_width_pcie_x1,
e1000_bus_width_pcie_x2,
e1000_bus_width_pcie_x4 = 4,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_reserved
};
enum e1000_1000t_rx_status {
e1000_1000t_rx_status_not_ok = 0,
e1000_1000t_rx_status_ok,
e1000_1000t_rx_status_undefined = 0xFF
};
enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
};
enum e1000_fc_mode {
e1000_fc_none = 0,
e1000_fc_rx_pause,
e1000_fc_tx_pause,
e1000_fc_full,
e1000_fc_default = 0xFF
};
enum e1000_ms_type {
e1000_ms_hw_default = 0,
e1000_ms_force_master,
e1000_ms_force_slave,
e1000_ms_auto
};
enum e1000_smart_speed {
e1000_smart_speed_default = 0,
e1000_smart_speed_on,
e1000_smart_speed_off
};
enum e1000_serdes_link_state {
e1000_serdes_link_down = 0,
e1000_serdes_link_autoneg_progress,
e1000_serdes_link_autoneg_complete,
e1000_serdes_link_forced_up
};
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
__le64 buffer_addr;
__le64 reserved;
} read;
struct {
struct {
__le32 mrq; /* Multiple Rx Queues */
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length;
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
#define MAX_PS_BUFFERS 4
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
/* one buffer for protocol header(s), three data buffers */
__le64 buffer_addr[MAX_PS_BUFFERS];
} read;
struct {
struct {
__le32 mrq; /* Multiple Rx Queues */
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length0; /* length of buffer 0 */
__le16 vlan; /* VLAN tag */
} middle;
struct {
__le16 header_status;
/* length of buffers 1-3 */
__le16 length[PS_PAGE_BUFFERS];
} upper;
__le64 reserved;
} wb; /* writeback */
};
/* Transmit Descriptor */
struct e1000_tx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
union {
__le32 data;
struct {
__le16 length; /* Data buffer length */
u8 cso; /* Checksum offset */
u8 cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 css; /* Checksum start */
__le16 special;
} fields;
} upper;
};
/* Offload Context Descriptor */
struct e1000_context_desc {
union {
__le32 ip_config;
struct {
u8 ipcss; /* IP checksum start */
u8 ipcso; /* IP checksum offset */
__le16 ipcse; /* IP checksum end */
} ip_fields;
} lower_setup;
union {
__le32 tcp_config;
struct {
u8 tucss; /* TCP checksum start */
u8 tucso; /* TCP checksum offset */
__le16 tucse; /* TCP checksum end */
} tcp_fields;
} upper_setup;
__le32 cmd_and_length;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 hdr_len; /* Header length */
__le16 mss; /* Maximum segment size */
} fields;
} tcp_seg_setup;
};
/* Offload data descriptor */
struct e1000_data_desc {
__le64 buffer_addr; /* Address of the descriptor's buffer address */
union {
__le32 data;
struct {
__le16 length; /* Data buffer length */
u8 typ_len_ext;
u8 cmd;
} flags;
} lower;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
__le16 special;
} fields;
} upper;
};
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 tor;
u64 tot;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 icrxptc;
u64 icrxatc;
u64 ictxptc;
u64 ictxatc;
u64 ictxqec;
u64 ictxqmtc;
u64 icrxdmtc;
u64 icrxoc;
};
struct e1000_phy_stats {
u32 idle_errors;
u32 receive_errors;
};
struct e1000_host_mng_dhcp_cookie {
u32 signature;
u8 status;
u8 reserved0;
u16 vlan_id;
u32 reserved1;
u16 reserved2;
u8 reserved3;
u8 checksum;
};
/* Host Interface "Rev 1" */
struct e1000_host_command_header {
u8 command_id;
u8 command_length;
u8 command_options;
u8 checksum;
};
#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
};
/* Host Interface "Rev 2" */
struct e1000_host_mng_command_header {
u8 command_id;
u8 checksum;
u16 reserved1;
u16 reserved2;
u16 command_length;
};
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
#include "mac.h"
#include "phy.h"
#include "nvm.h"
#include "manage.h"
/* Function pointers for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
s32 (*cleanup_led)(struct e1000_hw *);
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
int (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
u32 (*rar_get_count)(struct e1000_hw *);
};
/* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
* ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* X_reg L,P,A n/a for simple PHY reg accesses
* X_reg_locked P,A L for multiple accesses of different regs
* on different pages
* X_reg_page A L,P for multiple accesses of different regs
* on the same page
*
* Where X=[read|write], L=locking, P=sets page, A=register access
*
*/
struct e1000_phy_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*cfg_on_link_up)(struct e1000_hw *);
s32 (*check_polarity)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*commit)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_info)(struct e1000_hw *);
s32 (*set_page)(struct e1000_hw *, u16);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
void (*power_up)(struct e1000_hw *);
void (*power_down)(struct e1000_hw *);
};
/* Function pointers for the NVM. */
struct e1000_nvm_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
void (*reload)(struct e1000_hw *);
s32 (*update)(struct e1000_hw *);
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
s32 (*validate)(struct e1000_hw *);
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
};
struct e1000_mac_info {
struct e1000_mac_operations ops;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
enum e1000_mac_type type;
u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
u32 tx_packet_delta;
u32 txcw;
u16 current_ifs_val;
u16 ifs_max_val;
u16 ifs_min_val;
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
bool adaptive_ifs;
bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
bool get_link_status;
bool in_ifs_mode;
bool serdes_has_link;
bool tx_pkt_filtering;
enum e1000_serdes_link_state serdes_link_state;
};
struct e1000_phy_info {
struct e1000_phy_operations ops;
enum e1000_phy_type type;
enum e1000_1000t_rx_status local_rx;
enum e1000_1000t_rx_status remote_rx;
enum e1000_ms_type ms_type;
enum e1000_ms_type original_ms_type;
enum e1000_rev_polarity cable_polarity;
enum e1000_smart_speed smart_speed;
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
enum e1000_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u16 cable_length;
u16 max_cable_length;
u16 min_cable_length;
u8 mdix;
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
struct e1000_nvm_info {
struct e1000_nvm_operations ops;
enum e1000_nvm_type type;
enum e1000_nvm_override override;
u32 flash_bank_size;
u32 flash_base_addr;
u16 word_size;
u16 delay_usec;
u16 address_bits;
u16 opcode_bits;
u16 page_size;
};
struct e1000_bus_info {
enum e1000_bus_width width;
u16 func;
};
struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
};
struct e1000_dev_spec_82571 {
bool laa_is_present;
u32 smb_counter;
};
struct e1000_dev_spec_80003es2lan {
bool mdic_wa_enable;
};
struct e1000_shadow_ram {
u16 value;
bool modified;
};
#define E1000_ICH8_SHADOW_RAM_WORDS 2048
/* I218 PHY Ultra Low Power (ULP) states */
enum e1000_ulp_state {
e1000_ulp_state_unknown,
e1000_ulp_state_off,
e1000_ulp_state_on,
};
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
bool eee_disable;
u16 eee_lp_ability;
enum e1000_ulp_state ulp_state;
};
struct e1000_hw {
struct e1000_adapter *adapter;
void __iomem *hw_addr;
void __iomem *flash_address;
struct e1000_mac_info mac;
struct e1000_fc_info fc;
struct e1000_phy_info phy;
struct e1000_nvm_info nvm;
struct e1000_bus_info bus;
struct e1000_host_mng_dhcp_cookie mng_cookie;
union {
struct e1000_dev_spec_82571 e82571;
struct e1000_dev_spec_80003es2lan e80003es2lan;
struct e1000_dev_spec_ich8lan ich8lan;
} dev_spec;
};
#include "82571.h"
#include "80003es2lan.h"
#include "ich8lan.h"
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,294 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
#define ICH_FLASH_GFPREG 0x0000
#define ICH_FLASH_HSFSTS 0x0004
#define ICH_FLASH_HSFCTL 0x0006
#define ICH_FLASH_FADDR 0x0008
#define ICH_FLASH_FDATA0 0x0010
#define ICH_FLASH_PR0 0x0074
/* Requires up to 10 seconds when MNG might be accessing part. */
#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
#define ICH_CYCLE_READ 0
#define ICH_CYCLE_WRITE 2
#define ICH_CYCLE_ERASE 3
#define FLASH_GFPREG_BASE_MASK 0x1FFF
#define FLASH_SECTOR_ADDR_SHIFT 12
#define ICH_FLASH_SEG_SIZE_256 256
#define ICH_FLASH_SEG_SIZE_4K 4096
#define ICH_FLASH_SEG_SIZE_8K 8192
#define ICH_FLASH_SEG_SIZE_64K 65536
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
/* FW established a valid mode */
#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
#define E1000_ICH_MNG_IAMT_MODE 0x2
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
#define E1000_H2ME 0x05B50 /* Host to ME */
#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
#define E1000_ICH_NVM_SIG_WORD 0x13
#define E1000_ICH_NVM_SIG_MASK 0xC000
#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
#define E1000_ICH_NVM_SIG_VALUE 0x80
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
/* FEXT register bit definition */
#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
((reg) & MAX_PHY_REG_ADDRESS))
#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
/* PHY Wakeup Registers and defines */
#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
#define HV_STATS_PAGE 778
/* Half-duplex collision counts */
#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
/* SMBus Control Phy Register */
#define CV_SMB_CTRL PHY_REG(769, 23)
#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
/* I218 Ultra Low Power Configuration 1 Register */
#define I218_ULP_CONFIG1 PHY_REG(779, 16)
#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
#define HV_SMB_ADDR_MASK 0x007F
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
#define HV_SMB_ADDR_FREQ_MASK 0x1100
#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
#define E1000_STRAP_SMT_FREQ_SHIFT 12
/* OEM Bits Phy Register */
#define HV_OEM_BITS PHY_REG(768, 25)
#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
/* KMRN Mode Control */
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
/* KMRN FIFO Control and Status */
#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
#define I217_INBAND_CTRL PHY_REG(770, 18)
#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
/* Low Power Idle GPIO Control */
#define I217_LPI_GPIO_CTRL PHY_REG(772, 18)
#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_100_ENABLE 0x2000
#define I82579_LPI_CTRL_1000_ENABLE 0x4000
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
/* Extended Management Interface (EMI) Registers */
#define I82579_EMI_ADDR 0x10
#define I82579_EMI_DATA 0x11
#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
#define I217_RX_CONFIG 0xB20C /* Receive configuration */
#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
/* Intel Rapid Start Technology Support */
#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
#define I217_CGFREG PHY_REG(772, 29)
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
/* Latency Tolerance Reporting */
#define E1000_LTRV 0x000F8
#define E1000_LTRV_SCALE_MAX 5
#define E1000_LTRV_SCALE_FACTOR 5
#define E1000_LTRV_REQ_SHIFT 15
#define E1000_LTRV_NOSNOOP_SHIFT 16
#define E1000_LTRV_SEND (1 << 30)
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
#endif /* _E1000E_ICH8LAN_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,68 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
s32 e1000e_blink_led_generic(struct e1000_hw *hw);
s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
s32 e1000e_force_mac_fc(struct e1000_hw *hw);
s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
void e1000_set_lan_id_single_port(struct e1000_hw *hw);
s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
u16 *speed, u16 *duplex);
s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
s32 e1000e_led_on_generic(struct e1000_hw *hw);
s32 e1000e_led_off_generic(struct e1000_hw *hw);
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);
s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
s32 e1000e_setup_led_generic(struct e1000_hw *hw);
s32 e1000e_setup_link_generic(struct e1000_hw *hw);
s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
void e1000e_put_hw_semaphore(struct e1000_hw *hw);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000e_reset_adaptive(struct e1000_hw *hw);
void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
void e1000e_update_adaptive(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
#endif

View file

@ -0,0 +1,347 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "e1000.h"
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
* @length: size of EEPROM to calculate a checksum for
*
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
{
u32 i;
u8 sum = 0;
if (!buffer)
return 0;
for (i = 0; i < length; i++)
sum += buffer[i];
return (u8)(0 - sum);
}
/**
* e1000_mng_enable_host_if - Checks host interface is enabled
* @hw: pointer to the HW structure
*
* Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND
*
* This function checks whether the HOST IF is enabled for command operation
* and also checks whether the previous command is completed. It busy waits
* in case of previous command is not completed.
**/
static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
{
u32 hicr;
u8 i;
if (!hw->mac.arc_subsystem_valid) {
e_dbg("ARC subsystem not valid.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* Check that the host interface is enabled. */
hicr = er32(HICR);
if (!(hicr & E1000_HICR_EN)) {
e_dbg("E1000_HOST_EN bit disabled.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
/* check the previous command is completed */
for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
hicr = er32(HICR);
if (!(hicr & E1000_HICR_C))
break;
mdelay(1);
}
if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
e_dbg("Previous command timeout failed.\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
return 0;
}
/**
* e1000e_check_mng_mode_generic - Generic check management mode
* @hw: pointer to the HW structure
*
* Reads the firmware semaphore register and returns true (>0) if
* manageability is enabled, else false (0).
**/
bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
{
u32 fwsm = er32(FWSM);
return (fwsm & E1000_FWSM_MODE_MASK) ==
(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
}
/**
* e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
* @hw: pointer to the HW structure
*
* Enables packet filtering on transmit packets if manageability is enabled
* and host interface is enabled.
**/
bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
{
struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
u32 *buffer = (u32 *)&hw->mng_cookie;
u32 offset;
s32 ret_val, hdr_csum, csum;
u8 i, len;
hw->mac.tx_pkt_filtering = true;
/* No manageability, no filtering */
if (!hw->mac.ops.check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
return hw->mac.tx_pkt_filtering;
}
/* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
if (ret_val) {
hw->mac.tx_pkt_filtering = false;
return hw->mac.tx_pkt_filtering;
}
/* Read in the header. Length and offset are in dwords. */
len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
for (i = 0; i < len; i++)
*(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF,
offset + i);
hdr_csum = hdr->checksum;
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
/* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
return hw->mac.tx_pkt_filtering;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
hw->mac.tx_pkt_filtering = false;
return hw->mac.tx_pkt_filtering;
}
/**
* e1000_mng_write_cmd_header - Writes manageability command header
* @hw: pointer to the HW structure
* @hdr: pointer to the host interface command header
*
* Writes the command header after does the checksum calculation.
**/
static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
struct e1000_host_mng_command_header *hdr)
{
u16 i, length = sizeof(struct e1000_host_mng_command_header);
/* Write the whole command header structure with new checksum. */
hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
length >>= 2;
/* Write the relevant command block into the ram area. */
for (i = 0; i < length; i++) {
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i));
e1e_flush();
}
return 0;
}
/**
* e1000_mng_host_if_write - Write to the manageability host interface
* @hw: pointer to the HW structure
* @buffer: pointer to the host interface buffer
* @length: size of the buffer
* @offset: location in the buffer to write to
* @sum: sum of the data (not checksum)
*
* This function writes the buffer content at the offset given on the host if.
* It also does alignment considerations to do the writes in most efficient
* way. Also fills up the sum of the buffer in *buffer parameter.
**/
static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
u16 length, u16 offset, u8 *sum)
{
u8 *tmp;
u8 *bufptr = buffer;
u32 data = 0;
u16 remaining, i, j, prev_bytes;
/* sum = only sum of the data and it is not checksum */
if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
return -E1000_ERR_PARAM;
tmp = (u8 *)&data;
prev_bytes = offset & 0x3;
offset >>= 2;
if (prev_bytes) {
data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
for (j = prev_bytes; j < sizeof(u32); j++) {
*(tmp + j) = *bufptr++;
*sum += *(tmp + j);
}
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
length -= j - prev_bytes;
offset++;
}
remaining = length & 0x3;
length -= remaining;
/* Calculate length in DWORDs */
length >>= 2;
/* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
for (j = 0; j < sizeof(u32); j++) {
*(tmp + j) = *bufptr++;
*sum += *(tmp + j);
}
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
}
if (remaining) {
for (j = 0; j < sizeof(u32); j++) {
if (j < remaining)
*(tmp + j) = *bufptr++;
else
*(tmp + j) = 0;
*sum += *(tmp + j);
}
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
}
return 0;
}
/**
* e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
* @hw: pointer to the HW structure
* @buffer: pointer to the host interface
* @length: size of the buffer
*
* Writes the DHCP information to the host interface.
**/
s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
{
struct e1000_host_mng_command_header hdr;
s32 ret_val;
u32 hicr;
hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
hdr.command_length = length;
hdr.reserved1 = 0;
hdr.reserved2 = 0;
hdr.checksum = 0;
/* Enable the host interface */
ret_val = e1000_mng_enable_host_if(hw);
if (ret_val)
return ret_val;
/* Populate the host interface with the contents of "buffer". */
ret_val = e1000_mng_host_if_write(hw, buffer, length,
sizeof(hdr), &(hdr.checksum));
if (ret_val)
return ret_val;
/* Write the manageability command header */
ret_val = e1000_mng_write_cmd_header(hw, &hdr);
if (ret_val)
return ret_val;
/* Tell the ARC a new command is pending. */
hicr = er32(HICR);
ew32(HICR, hicr | E1000_HICR_C);
return 0;
}
/**
* e1000e_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
* Verifies the hardware needs to leave interface enabled so that frames can
* be directed to and from the management interface.
**/
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
{
u32 manc;
u32 fwsm, factps;
manc = er32(MANC);
if (!(manc & E1000_MANC_RCV_TCO_EN))
return false;
if (hw->mac.has_fwsm) {
fwsm = er32(FWSM);
factps = er32(FACTPS);
if (!(factps & E1000_FACTPS_MNGCG) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
return true;
} else if ((hw->mac.type == e1000_82574) ||
(hw->mac.type == e1000_82583)) {
u16 data;
s32 ret_val;
factps = er32(FACTPS);
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
if (ret_val)
return false;
if (!(factps & E1000_FACTPS_MNGCG) &&
((data & E1000_NVM_INIT_CTRL2_MNGM) ==
(e1000_mng_mode_pt << 13)))
return true;
} else if ((manc & E1000_MANC_SMBUS_EN) &&
!(manc & E1000_MANC_ASF_EN)) {
return true;
}
return false;
}

View file

@ -0,0 +1,65 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
enum e1000_mng_mode {
e1000_mng_mode_none = 0,
e1000_mng_mode_asf,
e1000_mng_mode_pt,
e1000_mng_mode_ipmi,
e1000_mng_mode_host_if_only
};
#define E1000_FACTPS_MNGCG 0x20000000
#define E1000_FWSM_MODE_MASK 0xE
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
#define E1000_VFTA_ENTRY_SHIFT 5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
#define E1000_HICR_EN 0x01 /* Enable bit - RO */
/* Driver sets this bit when done to put command in RAM */
#define E1000_HICR_C 0x02
#define E1000_HICR_SV 0x04 /* Status Validity */
#define E1000_HICR_FW_RESET_ENABLE 0x40
#define E1000_HICR_FW_RESET 0x80
/* Intel(R) Active Management Technology signature */
#define E1000_IAMT_SIGNATURE 0x544D4149
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,633 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "e1000.h"
/**
* e1000_raise_eec_clk - Raise EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Enable/Raise the EEPROM clock bit.
**/
static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd | E1000_EECD_SK;
ew32(EECD, *eecd);
e1e_flush();
udelay(hw->nvm.delay_usec);
}
/**
* e1000_lower_eec_clk - Lower EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Clear/Lower the EEPROM clock bit.
**/
static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
e1e_flush();
udelay(hw->nvm.delay_usec);
}
/**
* e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
* @hw: pointer to the HW structure
* @data: data to send to the EEPROM
* @count: number of bits to shift out
*
* We need to shift 'count' bits out to the EEPROM. So, the value in the
* "data" parameter will be shifted out to the EEPROM one bit at a time.
* In order to do this, "data" must be broken down into bits.
**/
static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u32 mask;
mask = 0x01 << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
do {
eecd &= ~E1000_EECD_DI;
if (data & mask)
eecd |= E1000_EECD_DI;
ew32(EECD, eecd);
e1e_flush();
udelay(nvm->delay_usec);
e1000_raise_eec_clk(hw, &eecd);
e1000_lower_eec_clk(hw, &eecd);
mask >>= 1;
} while (mask);
eecd &= ~E1000_EECD_DI;
ew32(EECD, eecd);
}
/**
* e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
* @hw: pointer to the HW structure
* @count: number of bits to shift in
*
* In order to read a register from the EEPROM, we need to shift 'count' bits
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
* the EEPROM (setting the SK bit), and then reading the value of the data out
* "DO" bit. During this "shifting in" process the data in "DI" bit should
* always be clear.
**/
static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
{
u32 eecd;
u32 i;
u16 data;
eecd = er32(EECD);
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
data = 0;
for (i = 0; i < count; i++) {
data <<= 1;
e1000_raise_eec_clk(hw, &eecd);
eecd = er32(EECD);
eecd &= ~E1000_EECD_DI;
if (eecd & E1000_EECD_DO)
data |= 1;
e1000_lower_eec_clk(hw, &eecd);
}
return data;
}
/**
* e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
* @hw: pointer to the HW structure
* @ee_reg: EEPROM flag for polling
*
* Polls the EEPROM status bit for either read or write completion based
* upon the value of 'ee_reg'.
**/
s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
{
u32 attempts = 100000;
u32 i, reg = 0;
for (i = 0; i < attempts; i++) {
if (ee_reg == E1000_NVM_POLL_READ)
reg = er32(EERD);
else
reg = er32(EEWR);
if (reg & E1000_NVM_RW_REG_DONE)
return 0;
udelay(5);
}
return -E1000_ERR_NVM;
}
/**
* e1000e_acquire_nvm - Generic request for access to EEPROM
* @hw: pointer to the HW structure
*
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
s32 e1000e_acquire_nvm(struct e1000_hw *hw)
{
u32 eecd = er32(EECD);
s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
ew32(EECD, eecd | E1000_EECD_REQ);
eecd = er32(EECD);
while (timeout) {
if (eecd & E1000_EECD_GNT)
break;
udelay(5);
eecd = er32(EECD);
timeout--;
}
if (!timeout) {
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
e_dbg("Could not acquire NVM grant\n");
return -E1000_ERR_NVM;
}
return 0;
}
/**
* e1000_standby_nvm - Return EEPROM to standby state
* @hw: pointer to the HW structure
*
* Return the EEPROM to a standby state.
**/
static void e1000_standby_nvm(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
if (nvm->type == e1000_nvm_eeprom_spi) {
/* Toggle CS to flush commands */
eecd |= E1000_EECD_CS;
ew32(EECD, eecd);
e1e_flush();
udelay(nvm->delay_usec);
eecd &= ~E1000_EECD_CS;
ew32(EECD, eecd);
e1e_flush();
udelay(nvm->delay_usec);
}
}
/**
* e1000_stop_nvm - Terminate EEPROM command
* @hw: pointer to the HW structure
*
* Terminates the current command by inverting the EEPROM's chip select pin.
**/
static void e1000_stop_nvm(struct e1000_hw *hw)
{
u32 eecd;
eecd = er32(EECD);
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
/* Pull CS high */
eecd |= E1000_EECD_CS;
e1000_lower_eec_clk(hw, &eecd);
}
}
/**
* e1000e_release_nvm - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
**/
void e1000e_release_nvm(struct e1000_hw *hw)
{
u32 eecd;
e1000_stop_nvm(hw);
eecd = er32(EECD);
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
}
/**
* e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
* @hw: pointer to the HW structure
*
* Setups the EEPROM for reading and writing.
**/
static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
u16 timeout = NVM_MAX_RETRY_SPI;
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
e1e_flush();
udelay(1);
/* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
*/
while (timeout) {
e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
hw->nvm.opcode_bits);
spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
break;
udelay(5);
e1000_standby_nvm(hw);
timeout--;
}
if (!timeout) {
e_dbg("SPI NVM Status error\n");
return -E1000_ERR_NVM;
}
}
return 0;
}
/**
* e1000e_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, eerd = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
e_dbg("nvm parameter(s) out of bounds\n");
return -E1000_ERR_NVM;
}
for (i = 0; i < words; i++) {
eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
E1000_NVM_RW_REG_START;
ew32(EERD, eerd);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
if (ret_val) {
e_dbg("NVM read error: %d\n", ret_val);
break;
}
data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
}
return ret_val;
}
/**
* e1000e_write_nvm_spi - Write to EEPROM using SPI
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* Writes data to EEPROM at offset using SPI interface.
*
* If e1000e_update_nvm_checksum is not called after this function , the
* EEPROM will most likely contain an invalid checksum.
**/
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
e_dbg("nvm parameter(s) out of bounds\n");
return -E1000_ERR_NVM;
}
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
ret_val = nvm->ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = e1000_ready_nvm_eeprom(hw);
if (ret_val) {
nvm->ops.release(hw);
return ret_val;
}
e1000_standby_nvm(hw);
/* Send the WRITE ENABLE command (8 bit opcode) */
e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
nvm->opcode_bits);
e1000_standby_nvm(hw);
/* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
write_opcode |= NVM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
nvm->address_bits);
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
if ((((offset + widx) * 2) % nvm->page_size) == 0) {
e1000_standby_nvm(hw);
break;
}
}
usleep_range(10000, 20000);
nvm->ops.release(hw);
}
return ret_val;
}
/**
* e1000_read_pba_string_generic - Read device part number
* @hw: pointer to the HW structure
* @pba_num: pointer to device part number
* @pba_num_size: size of part number buffer
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in pba_num.
**/
s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
s32 ret_val;
u16 nvm_data;
u16 pba_ptr;
u16 offset;
u16 length;
if (pba_num == NULL) {
e_dbg("PBA string buffer was null\n");
return -E1000_ERR_INVALID_ARGUMENT;
}
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
/* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
if (nvm_data != NVM_PBA_PTR_GUARD) {
e_dbg("NVM PBA number is not stored as string\n");
/* make sure callers buffer is big enough to store the PBA */
if (pba_num_size < E1000_PBANUM_LENGTH) {
e_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
/* extract hex string from data and pba_ptr */
pba_num[0] = (nvm_data >> 12) & 0xF;
pba_num[1] = (nvm_data >> 8) & 0xF;
pba_num[2] = (nvm_data >> 4) & 0xF;
pba_num[3] = nvm_data & 0xF;
pba_num[4] = (pba_ptr >> 12) & 0xF;
pba_num[5] = (pba_ptr >> 8) & 0xF;
pba_num[6] = '-';
pba_num[7] = 0;
pba_num[8] = (pba_ptr >> 4) & 0xF;
pba_num[9] = pba_ptr & 0xF;
/* put a null character on the end of our string */
pba_num[10] = '\0';
/* switch all the data but the '-' to hex char */
for (offset = 0; offset < 10; offset++) {
if (pba_num[offset] < 0xA)
pba_num[offset] += '0';
else if (pba_num[offset] < 0x10)
pba_num[offset] += 'A' - 0xA;
}
return 0;
}
ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
if (length == 0xFFFF || length == 0) {
e_dbg("NVM PBA number section invalid length\n");
return -E1000_ERR_NVM_PBA_SECTION;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
e_dbg("PBA string buffer too small\n");
return -E1000_ERR_NO_SPACE;
}
/* trim pba length from start of string */
pba_ptr++;
length--;
for (offset = 0; offset < length; offset++) {
ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
pba_num[offset * 2] = (u8)(nvm_data >> 8);
pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
}
pba_num[offset * 2] = '\0';
return 0;
}
/**
* e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
u32 rar_high;
u32 rar_low;
u16 i;
rar_high = er32(RAH(0));
rar_low = er32(RAL(0));
for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return 0;
}
/**
* e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
{
s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
checksum += nvm_data;
}
if (checksum != (u16)NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
}
return 0;
}
/**
* e1000e_update_nvm_checksum_generic - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
{
s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
if (ret_val) {
e_dbg("NVM Read Error while updating checksum.\n");
return ret_val;
}
checksum += nvm_data;
}
checksum = (u16)NVM_SUM - checksum;
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
e_dbg("NVM Write Error while updating checksum.\n");
return ret_val;
}
/**
* e1000e_reload_nvm_generic - Reloads EEPROM
* @hw: pointer to the HW structure
*
* Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
* extended control register.
**/
void e1000e_reload_nvm_generic(struct e1000_hw *hw)
{
u32 ctrl_ext;
usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
}

View file

@ -0,0 +1,40 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
s32 e1000e_acquire_nvm(struct e1000_hw *hw);
s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
void e1000e_release_nvm(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#endif

View file

@ -0,0 +1,533 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
#define COPYBREAK_DEFAULT 256
unsigned int copybreak = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* IntMode (Interrupt Mode)
*
* Valid Range: varies depending on kernel configuration & hardware support
*
* legacy=0, MSI=1, MSI-X=2
*
* When MSI/MSI-X support is enabled in kernel-
* Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise
* When MSI/MSI-X support is not enabled in kernel-
* Default Value: 0 (legacy)
*
* When a mode is specified that is not allowed/supported, it will be
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
/* Write Protect NVM
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(WriteProtectNVM,
"Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/* Enable CRC Stripping
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(CrcStripping,
"Enable CRC Stripping, disable if your BMC needs the CRC");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
/* range_option info */
struct {
int min;
int max;
} r;
/* list_option info */
struct {
int nr;
struct e1000_opt_list {
int i;
char *str;
} *p;
} l;
} arg;
};
static int e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
dev_info(&adapter->pdev->dev, "%s Enabled\n",
opt->name);
return 0;
case OPTION_DISABLED:
dev_info(&adapter->pdev->dev, "%s Disabled\n",
opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
dev_info(&adapter->pdev->dev, "%s set to %i\n",
opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
dev_info(&adapter->pdev->dev, "%s\n",
ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/**
* e1000e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void e1000e_check_options(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
dev_notice(&adapter->pdev->dev,
"Warning: no configuration for board #%i\n", bd);
dev_notice(&adapter->pdev->dev,
"Using defaults for all values\n");
}
/* Transmit Interrupt Delay */
{
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY } }
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
/* Transmit Absolute Interrupt Delay */
{
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY } }
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
/* Receive Interrupt Delay */
{
static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY } }
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
/* Receive Absolute Interrupt Delay */
{
static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY } }
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
/* Interrupt Throttling Rate */
{
static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
__MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR } }
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
/* Make sure a message is printed for non-special
* values. And in case of an invalid option, display
* warning, use default and go through itr/itr_setting
* adjustment logic below
*/
if ((adapter->itr > 4) &&
e1000_validate_option(&adapter->itr, &opt, adapter))
adapter->itr = opt.def;
} else {
/* If no option specified, use default value and go
* through the logic below to adjust itr/itr_setting
*/
adapter->itr = opt.def;
/* Make sure a message is printed for non-special
* default values
*/
if (adapter->itr > 4)
dev_info(&adapter->pdev->dev,
"%s set to default %d\n", opt.name,
adapter->itr);
}
adapter->itr_setting = adapter->itr;
switch (adapter->itr) {
case 0:
dev_info(&adapter->pdev->dev, "%s turned off\n",
opt.name);
break;
case 1:
dev_info(&adapter->pdev->dev,
"%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
case 2:
dev_info(&adapter->pdev->dev,
"%s Invalid mode - setting default\n",
opt.name);
adapter->itr_setting = opt.def;
/* fall-through */
case 3:
dev_info(&adapter->pdev->dev,
"%s set to dynamic conservative mode\n",
opt.name);
adapter->itr = 20000;
break;
case 4:
dev_info(&adapter->pdev->dev,
"%s set to simplified (2000-8000 ints) mode\n",
opt.name);
break;
default:
/* Save the setting, because the dynamic bits
* change itr.
*
* Clear the lower two bits because
* they are used as control.
*/
adapter->itr_setting &= ~3;
break;
}
}
/* Interrupt Mode */
{
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
#ifndef CONFIG_PCI_MSI
.err = "defaulting to 0 (legacy)",
.def = E1000E_INT_MODE_LEGACY,
.arg = { .r = { .min = 0,
.max = 0 } }
#endif
};
#ifdef CONFIG_PCI_MSI
if (adapter->flags & FLAG_HAS_MSIX) {
opt.err = kstrdup("defaulting to 2 (MSI-X)",
GFP_KERNEL);
opt.def = E1000E_INT_MODE_MSIX;
opt.arg.r.max = E1000E_INT_MODE_MSIX;
} else {
opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL);
opt.def = E1000E_INT_MODE_MSI;
opt.arg.r.max = E1000E_INT_MODE_MSI;
}
if (!opt.err) {
dev_err(&adapter->pdev->dev,
"Failed to allocate memory\n");
return;
}
#endif
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
adapter->int_mode = opt.def;
}
#ifdef CONFIG_PCI_MSI
kfree(opt.err);
#endif
}
/* Smart Power Down */
{
static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
/* CRC Stripping */
{
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
} else {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
}
/* Kumeran Lock Loss Workaround */
{
static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
bool enabled = opt.def;
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
if (hw->mac.type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
enabled);
}
/* Write-protect NVM */
{
static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) {
unsigned int write_protect_nvm =
WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt,
adapter);
if (write_protect_nvm)
adapter->flags |= FLAG_READ_ONLY_NVM;
} else {
if (opt.def)
adapter->flags |= FLAG_READ_ONLY_NVM;
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,236 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
s32 e1000e_check_downshift(struct e1000_hw *hw);
s32 e1000_check_polarity_m88(struct e1000_hw *hw);
s32 e1000_check_polarity_igp(struct e1000_hw *hw);
s32 e1000_check_polarity_ife(struct e1000_hw *hw);
s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
s32 e1000e_get_phy_id(struct e1000_hw *hw);
s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
s32 e1000e_setup_copper_link(struct e1000_hw *hw);
s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
s32 e1000e_determine_phy_address(struct e1000_hw *hw);
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
s32 e1000_check_polarity_82577(struct e1000_hw *hw);
s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define E1000_MAX_PHY_ADDR 8
/* IGP01E1000 Specific Registers */
#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
#define IGP_PAGE_SHIFT 5
#define PHY_REG_MASK 0x1F
/* BM/HV Specific Registers */
#define BM_PORT_CTRL_PAGE 769
#define BM_WUC_PAGE 800
#define BM_WUC_ADDRESS_OPCODE 0x11
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
#define BM_WUC_ENABLE_BIT (1 << 2)
#define BM_WUC_HOST_WU_BIT (1 << 4)
#define BM_WUC_ME_WU_BIT (1 << 5)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
(((reg) & MAX_PHY_REG_ADDRESS) |\
(((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
(((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
#define BM_PHY_REG_PAGE(offset) \
((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
#define BM_PHY_REG_NUM(offset) \
((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
(((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
~MAX_PHY_REG_ADDRESS)))
#define HV_INTC_FC_PAGE_START 768
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
#define I82577_PHY_CTRL_2 18
#define I82577_PHY_LBK_CTRL 19
#define I82577_PHY_STATUS_2 26
#define I82577_PHY_DIAG_STATUS 31
/* I82577 PHY Status 2 */
#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
#define I82577_PHY_STATUS2_MDIX 0x0800
#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
/* I82577 PHY Control 2 */
#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
/* BM PHY Copper Specific Control 1 */
#define BM_CS_CTRL1 16
/* BM PHY Copper Specific Status */
#define BM_CS_STATUS 17
#define BM_CS_STATUS_LINK_UP 0x0400
#define BM_CS_STATUS_RESOLVED 0x0800
#define BM_CS_STATUS_SPEED_MASK 0xC000
#define BM_CS_STATUS_SPEED_1000 0x8000
/* 82577 Mobile Phy Status Register */
#define HV_M_STATUS 26
#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
#define HV_M_STATUS_SPEED_MASK 0x0300
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_SPEED_100 0x0100
#define HV_M_STATUS_LINK_UP 0x0040
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
#define IGP02E1000_PHY_CHANNEL_NUM 4
#define IGP02E1000_PHY_AGC_A 0x11B1
#define IGP02E1000_PHY_AGC_B 0x12B1
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
#define IGP02E1000_AGC_RANGE 15
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
/* IFE PHY Extended Status Control */
#define IFE_PESC_POLARITY_REVERSED 0x0100
/* IFE PHY Special Control */
#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
#define IFE_PSC_FORCE_POLARITY 0x0020
/* IFE PHY Special Control and LED Control */
#define IFE_PSCL_PROBE_MODE 0x0020
#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
/* IFE PHY MDIX Control */
#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
#endif

View file

@ -0,0 +1,275 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
*/
#include "e1000.h"
/**
* e1000e_phc_adjfreq - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
* @delta: Desired frequency change in parts per billion
*
* Adjust the frequency of the PHC cycle counter by the indicated delta from
* the base frequency.
**/
static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
bool neg_adj = false;
unsigned long flags;
u64 adjustment;
u32 timinca, incvalue;
s32 ret_val;
if ((delta > ptp->max_adj) || (delta <= -1000000000))
return -EINVAL;
if (delta < 0) {
neg_adj = true;
delta = -delta;
}
/* Get the System Time Register SYSTIM base frequency */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
if (ret_val)
return ret_val;
spin_lock_irqsave(&adapter->systim_lock, flags);
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
adjustment = incvalue;
adjustment *= delta;
adjustment = div_u64(adjustment, 1000000000);
incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
timinca |= incvalue;
ew32(TIMINCA, timinca);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
}
/**
* e1000e_phc_adjtime - Shift the time of the hardware clock
* @ptp: ptp clock structure
* @delta: Desired change in nanoseconds
*
* Adjust the timer by resetting the timecounter structure.
**/
static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
s64 now;
spin_lock_irqsave(&adapter->systim_lock, flags);
now = timecounter_read(&adapter->tc);
now += delta;
timecounter_init(&adapter->tc, &adapter->cc, now);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
}
/**
* e1000e_phc_gettime - Reads the current time from the hardware clock
* @ptp: ptp clock structure
* @ts: timespec structure to hold the current time value
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
u32 remainder;
u64 ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
ts->tv_nsec = remainder;
return 0;
}
/**
* e1000e_phc_settime - Set the current time on the hardware clock
* @ptp: ptp clock structure
* @ts: timespec containing the new time for the cycle counter
*
* Reset the timecounter to use a new base value instead of the kernel
* wall timer value.
**/
static int e1000e_phc_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
u64 ns;
ns = timespec_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->systim_lock, flags);
timecounter_init(&adapter->tc, &adapter->cc, ns);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
}
/**
* e1000e_phc_enable - enable or disable an ancillary feature
* @ptp: ptp clock structure
* @request: Desired resource to enable or disable
* @on: Caller passes one to enable or zero to disable
*
* Enable (or disable) ancillary features of the PHC subsystem.
* Currently, no ancillary features are supported.
**/
static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp,
struct ptp_clock_request __always_unused *request,
int __always_unused on)
{
return -EOPNOTSUPP;
}
static void e1000e_systim_overflow_work(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec ts;
adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
}
static const struct ptp_clock_info e1000e_ptp_clock_info = {
.owner = THIS_MODULE,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
.gettime = e1000e_phc_gettime,
.settime = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
/**
* e1000e_ptp_init - initialize PTP for devices which support it
* @adapter: board private structure
*
* This function performs the required steps for enabling PTP support.
* If PTP support has already been loaded it simply calls the cyclecounter
* init routine and exits.
**/
void e1000e_ptp_init(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
adapter->ptp_clock = NULL;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
adapter->ptp_clock_info = e1000e_ptp_clock_info;
snprintf(adapter->ptp_clock_info.name,
sizeof(adapter->ptp_clock_info.name), "%pm",
adapter->netdev->perm_addr);
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
if ((hw->mac.type != e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
}
/* fall-through */
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = 600000000 - 1;
break;
default:
break;
}
INIT_DELAYED_WORK(&adapter->systim_overflow_work,
e1000e_systim_overflow_work);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_err("ptp_clock_register failed\n");
} else {
e_info("registered PHC clock\n");
}
}
/**
* e1000e_ptp_remove - disable PTP device and stop the overflow check
* @adapter: board private structure
*
* Stop the PTP support, and cancel the delayed work.
**/
void e1000e_ptp_remove(struct e1000_adapter *adapter)
{
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
cancel_delayed_work_sync(&adapter->systim_overflow_work);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_info("removed PHC\n");
}
}

View file

@ -0,0 +1,247 @@
/* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
#define E1000_CTRL 0x00000 /* Device Control - RW */
#define E1000_STATUS 0x00008 /* Device Status - RO */
#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define E1000_EERD 0x00014 /* EEPROM Read - RW */
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FEXT 0x0002C /* Future Extended - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
#define E1000_SVCR 0x000F0
#define E1000_SVT 0x000F4
#define E1000_LPIC 0x000FC /* Low Power IDLE control */
#define E1000_RCTL 0x00100 /* Rx Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
#define E1000_TCTL 0x00400 /* Tx Control - RW */
#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
/* Split and Replication Rx Control - RW */
#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*/
#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
(0x0C000 + ((_n) * 0x40)))
#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
(0x0C004 + ((_n) * 0x40)))
#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
(0x0C008 + ((_n) * 0x40)))
#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
(0x0C010 + ((_n) * 0x40)))
#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
(0x0C018 + ((_n) * 0x40)))
#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
(0x0C028 + ((_n) * 0x40)))
#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
(0x0E000 + ((_n) * 0x40)))
#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
(0x0E004 + ((_n) * 0x40)))
#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
(0x0E008 + ((_n) * 0x40)))
#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
(0x0E010 + ((_n) * 0x40)))
#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
(0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
(0x0E028 + ((_n) * 0x40)))
#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define E1000_COLC 0x04028 /* Collision Count - R/clr */
#define E1000_DC 0x04030 /* Defer Count - R/clr */
#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
#define E1000_RFCTL 0x05008 /* Receive Filter Control */
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define E1000_WUS 0x05810 /* Wakeup Status - RO */
#define E1000_MANC 0x05820 /* Management Control - RW */
#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
#define E1000_HOST_IF 0x08800 /* Host Interface */
#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
/* Management Decision Filters */
#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
/* Driver-only SW semaphore (not used by BOOT agents) */
#define E1000_SWSM2 0x05B58
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
#define E1000_HICR 0x08F00 /* Host Interface Control */
/* RSS registers */
#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
#endif

View file

@ -0,0 +1,33 @@
################################################################################
#
# Intel Ethernet Switch Host Interface Driver
# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
#
obj-$(CONFIG_FM10K) += fm10k.o
fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \
fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \
fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \
fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o

View file

@ -0,0 +1,530 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_H_
#define _FM10K_H_
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
#define MAX_QUEUES FM10K_MAX_QUEUES_PF
#define FM10K_MIN_RXD 128
#define FM10K_MAX_RXD 4096
#define FM10K_DEFAULT_RXD 256
#define FM10K_MIN_TXD 128
#define FM10K_MAX_TXD 4096
#define FM10K_DEFAULT_TXD 256
#define FM10K_DEFAULT_TX_WORK 256
#define FM10K_RXBUFFER_256 256
#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256
#define FM10K_RXBUFFER_2048 2048
#define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define FM10K_MAX_STATIONS 63
struct fm10k_l2_accel {
int size;
u16 count;
u16 dglort;
struct rcu_head rcu;
struct net_device *macvlan[0];
};
enum fm10k_ring_state_t {
__FM10K_TX_DETECT_HANG,
__FM10K_HANG_CHECK_ARMED,
};
#define check_for_tx_hang(ring) \
test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
struct fm10k_tx_buffer {
struct fm10k_tx_desc *next_to_watch;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
u16 tx_flags;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};
struct fm10k_rx_buffer {
dma_addr_t dma;
struct page *page;
u32 page_offset;
};
struct fm10k_queue_stats {
u64 packets;
u64 bytes;
};
struct fm10k_tx_queue_stats {
u64 restart_queue;
u64 csum_err;
u64 tx_busy;
u64 tx_done_old;
};
struct fm10k_rx_queue_stats {
u64 alloc_failed;
u64 csum_err;
u64 errors;
};
struct fm10k_ring {
struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
struct fm10k_l2_accel __rcu *l2_accel; /* L2 acceleration list */
void *desc; /* descriptor ring memory */
union {
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_rx_buffer *rx_buffer;
};
u32 __iomem *tail;
unsigned long state;
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
u8 queue_index; /* needed for queue management */
u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
u8 qos_pc; /* priority class of queue */
u16 vid; /* default vlan ID of queue */
u16 count; /* amount of descriptors */
u16 next_to_alloc;
u16 next_to_use;
u16 next_to_clean;
struct fm10k_queue_stats stats;
struct u64_stats_sync syncp;
union {
/* Tx */
struct fm10k_tx_queue_stats tx_stats;
/* Rx */
struct {
struct fm10k_rx_queue_stats rx_stats;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
struct fm10k_ring_container {
struct fm10k_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u16 itr; /* interrupt throttle rate value */
u8 count; /* total number of rings in vector */
};
#define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */
#define FM10K_ITR_10K 100 /* 100us */
#define FM10K_ITR_20K 50 /* 50us */
#define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */
#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR)
static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
{
return &ring->netdev->_tx[ring->queue_index];
}
/* iterator for handling rings in ring container */
#define fm10k_for_each_ring(pos, head) \
for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)
#define MAX_Q_VECTORS 256
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
NON_Q_VECTORS_PF
};
#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
NON_Q_VECTORS_PF : \
NON_Q_VECTORS_VF)
#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
struct fm10k_q_vector {
struct fm10k_intfc *interface;
u32 __iomem *itr; /* pointer to ITR register for this vector */
u16 v_idx; /* index of q_vector within interface array */
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum fm10k_ring_f_enum {
RING_F_RSS,
RING_F_QOS,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
struct fm10k_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
};
struct fm10k_iov_data {
unsigned int num_vfs;
unsigned int next_vf_mbx;
struct rcu_head rcu;
struct fm10k_vf_info vf_info[0];
};
#define fm10k_vxlan_port_for_each(vp, intfc) \
list_for_each_entry(vp, &(intfc)->vxlan_port, list)
struct fm10k_vxlan_port {
struct list_head list;
sa_family_t sa_family;
__be16 port;
};
struct fm10k_intfc {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
struct pci_dev *pdev;
unsigned long state;
u32 flags;
#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
int xcast_mode;
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr;
/* Rx fast path data */
int num_rx_queues;
u16 rx_itr;
/* TX */
struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp;
u64 restart_queue;
u64 tx_busy;
u64 tx_csum_errors;
u64 alloc_failed;
u64 rx_csum_errors;
u64 rx_errors;
u64 tx_bytes_nic;
u64 tx_packets_nic;
u64 rx_bytes_nic;
u64 rx_packets_nic;
u64 rx_drops_nic;
u64 rx_overrun_pf;
u64 rx_overrun_vf;
u32 tx_timeout_count;
/* RX */
struct fm10k_ring *rx_ring[MAX_QUEUES];
/* Queueing vectors */
struct fm10k_q_vector *q_vector[MAX_Q_VECTORS];
struct msix_entry *msix_entries;
int num_q_vectors; /* current number of q_vectors for device */
struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* SR-IOV information management structure */
struct fm10k_iov_data *iov_data;
struct fm10k_hw_stats stats;
struct fm10k_hw hw;
u32 __iomem *uc_addr;
u32 __iomem *sw_addr;
u16 msg_enable;
u16 tx_ring_count;
u16 rx_ring_count;
struct timer_list service_timer;
struct work_struct service_task;
unsigned long next_stats_update;
unsigned long next_tx_hang_check;
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
/* VXLAN port tracking information */
struct list_head vxlan_port;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
#endif /* CONFIG_DEBUG_FS */
struct ptp_clock_info ptp_caps;
struct ptp_clock *ptp_clock;
struct sk_buff_head ts_tx_skb_queue;
u32 tx_hwtstamp_timeouts;
struct hwtstamp_config ts_config;
/* We are unable to actually adjust the clock beyond the frequency
* value. Once the clock is started there is no resetting it. As
* such we maintain a separate offset from the actual hardware clock
* to allow for offset adjustment.
*/
s64 ptp_adjust;
rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
u8 rx_pause;
/* GLORT resources in use by PF */
u16 glort;
u16 glort_count;
/* VLAN ID for updating multicast/unicast lists */
u16 vid;
};
enum fm10k_state_t {
__FM10K_RESETTING,
__FM10K_DOWN,
__FM10K_SERVICE_SCHED,
__FM10K_SERVICE_DISABLE,
__FM10K_MBX_LOCK,
__FM10K_LINK_DOWN,
};
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
/* busy loop if we cannot obtain the lock as some calls
* such as ndo_set_rx_mode may be made in atomic context
*/
while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
udelay(20);
}
static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
clear_bit(__FM10K_MBX_LOCK, &interface->state);
}
static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
}
/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->d.staterr & cpu_to_le32(stat_err_bits);
}
/* fm10k_desc_unused - calculate if we have unused descriptors */
static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
{
s16 unused = ring->next_to_clean - ring->next_to_use - 1;
return likely(unused < 0) ? unused + ring->count : unused;
}
#define FM10K_TX_DESC(R, i) \
(&(((struct fm10k_tx_desc *)((R)->desc))[i]))
#define FM10K_RX_DESC(R, i) \
(&(((union fm10k_rx_desc *)((R)->desc))[i]))
#define FM10K_MAX_TXD_PWR 14
#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
enum fm10k_tx_flags {
/* Tx offload flags */
FM10K_TX_FLAGS_CSUM = 0x01,
};
/* This structure is stored as little endian values as that is the native
* format of the Rx descriptor. The ordering of these fields is reversed
* from the actual ftag header to allow for a single bswap to take care
* of placing all of the values in network order
*/
union fm10k_ftag_info {
__le64 ftag;
struct {
/* dglort and sglort combined into a single 32bit desc read */
__le32 glort;
/* upper 16 bits of vlan are reserved 0 for swpri_type_user */
__le32 vlan;
} d;
struct {
__le16 dglort;
__le16 sglort;
__le16 vlan;
__le16 swpri_type_user;
} w;
};
struct fm10k_cb {
union {
__le64 tstamp;
unsigned long ts_tx_timeout;
};
union fm10k_ftag_info fi;
};
#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)
/* main */
extern char fm10k_driver_name[];
extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
/* PCI */
void fm10k_mbx_free_irq(struct fm10k_intfc *);
int fm10k_mbx_request_irq(struct fm10k_intfc *);
void fm10k_qv_free_irq(struct fm10k_intfc *interface);
int fm10k_qv_request_irq(struct fm10k_intfc *interface);
int fm10k_register_pci_driver(void);
void fm10k_unregister_pci_driver(void);
void fm10k_up(struct fm10k_intfc *interface);
void fm10k_down(struct fm10k_intfc *interface);
void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
/* Netdev */
struct net_device *fm10k_alloc_netdev(void);
int fm10k_setup_rx_resources(struct fm10k_ring *);
int fm10k_setup_tx_resources(struct fm10k_ring *);
void fm10k_free_rx_resources(struct fm10k_ring *);
void fm10k_free_tx_resources(struct fm10k_ring *);
void fm10k_clean_all_rx_rings(struct fm10k_intfc *);
void fm10k_clean_all_tx_rings(struct fm10k_intfc *);
void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *,
struct fm10k_tx_buffer *);
void fm10k_restore_rx_state(struct fm10k_intfc *);
void fm10k_reset_rx_state(struct fm10k_intfc *);
int fm10k_setup_tc(struct net_device *dev, u8 tc);
int fm10k_open(struct net_device *netdev);
int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
s32 fm10k_iov_mbx(struct fm10k_intfc *interface);
void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
int vf_idx, u16 vid, u8 qos);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector);
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector);
void fm10k_dbg_intfc_init(struct fm10k_intfc *interface);
void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface);
void fm10k_dbg_init(void);
void fm10k_dbg_exit(void);
#else
static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
/* Time Stamping */
void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
struct skb_shared_hwtstamps *hwtstamp,
u64 systime);
void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
u64 systime);
void fm10k_ts_reset(struct fm10k_intfc *interface);
void fm10k_ts_init(struct fm10k_intfc *interface);
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
void fm10k_ptp_register(struct fm10k_intfc *interface);
void fm10k_ptp_unregister(struct fm10k_intfc *interface);
int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
/* DCB */
void fm10k_dcbnl_set_ops(struct net_device *dev);
#endif /* _FM10K_H_ */

View file

@ -0,0 +1,534 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k_common.h"
/**
* fm10k_get_bus_info_generic - Generic set PCI bus info
* @hw: pointer to hardware structure
*
* Gets the PCI bus info (speed, width, type) then calls helper function to
* store this data within the fm10k_hw structure.
**/
s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
{
u16 link_cap, link_status, device_cap, device_control;
/* Get the maximum link width and speed from PCIe config space */
link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP);
switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
case FM10K_PCIE_LINK_WIDTH_1:
hw->bus_caps.width = fm10k_bus_width_pcie_x1;
break;
case FM10K_PCIE_LINK_WIDTH_2:
hw->bus_caps.width = fm10k_bus_width_pcie_x2;
break;
case FM10K_PCIE_LINK_WIDTH_4:
hw->bus_caps.width = fm10k_bus_width_pcie_x4;
break;
case FM10K_PCIE_LINK_WIDTH_8:
hw->bus_caps.width = fm10k_bus_width_pcie_x8;
break;
default:
hw->bus_caps.width = fm10k_bus_width_unknown;
break;
}
switch (link_cap & FM10K_PCIE_LINK_SPEED) {
case FM10K_PCIE_LINK_SPEED_2500:
hw->bus_caps.speed = fm10k_bus_speed_2500;
break;
case FM10K_PCIE_LINK_SPEED_5000:
hw->bus_caps.speed = fm10k_bus_speed_5000;
break;
case FM10K_PCIE_LINK_SPEED_8000:
hw->bus_caps.speed = fm10k_bus_speed_8000;
break;
default:
hw->bus_caps.speed = fm10k_bus_speed_unknown;
break;
}
/* Get the PCIe maximum payload size for the PCIe function */
device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP);
switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
hw->bus_caps.payload = fm10k_bus_payload_128;
break;
case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
hw->bus_caps.payload = fm10k_bus_payload_256;
break;
case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
hw->bus_caps.payload = fm10k_bus_payload_512;
break;
default:
hw->bus_caps.payload = fm10k_bus_payload_unknown;
break;
}
/* Get the negotiated link width and speed from PCIe config space */
link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS);
switch (link_status & FM10K_PCIE_LINK_WIDTH) {
case FM10K_PCIE_LINK_WIDTH_1:
hw->bus.width = fm10k_bus_width_pcie_x1;
break;
case FM10K_PCIE_LINK_WIDTH_2:
hw->bus.width = fm10k_bus_width_pcie_x2;
break;
case FM10K_PCIE_LINK_WIDTH_4:
hw->bus.width = fm10k_bus_width_pcie_x4;
break;
case FM10K_PCIE_LINK_WIDTH_8:
hw->bus.width = fm10k_bus_width_pcie_x8;
break;
default:
hw->bus.width = fm10k_bus_width_unknown;
break;
}
switch (link_status & FM10K_PCIE_LINK_SPEED) {
case FM10K_PCIE_LINK_SPEED_2500:
hw->bus.speed = fm10k_bus_speed_2500;
break;
case FM10K_PCIE_LINK_SPEED_5000:
hw->bus.speed = fm10k_bus_speed_5000;
break;
case FM10K_PCIE_LINK_SPEED_8000:
hw->bus.speed = fm10k_bus_speed_8000;
break;
default:
hw->bus.speed = fm10k_bus_speed_unknown;
break;
}
/* Get the negotiated PCIe maximum payload size for the PCIe function */
device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL);
switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
hw->bus.payload = fm10k_bus_payload_128;
break;
case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
hw->bus.payload = fm10k_bus_payload_256;
break;
case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
hw->bus.payload = fm10k_bus_payload_512;
break;
default:
hw->bus.payload = fm10k_bus_payload_unknown;
break;
}
return 0;
}
static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
{
u16 msix_count;
/* read in value from MSI-X capability register */
msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL);
msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
msix_count++;
if (msix_count > FM10K_MAX_MSIX_VECTORS)
msix_count = FM10K_MAX_MSIX_VECTORS;
return msix_count;
}
/**
* fm10k_get_invariants_generic - Inits constant values
* @hw: pointer to the hardware structure
*
* Initialize the common invariants for the device.
**/
s32 fm10k_get_invariants_generic(struct fm10k_hw *hw)
{
struct fm10k_mac_info *mac = &hw->mac;
/* initialize GLORT state to avoid any false hits */
mac->dglort_map = FM10K_DGLORTMAP_NONE;
/* record maximum number of MSI-X vectors */
mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
return 0;
}
/**
* fm10k_start_hw_generic - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* This function sets the Tx ready flag to indicate that the Tx path has
* been initialized.
**/
s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
{
/* set flag indicating we are beginning Tx */
hw->mac.tx_ready = true;
return 0;
}
/**
* fm10k_disable_queues_generic - Stop Tx/Rx queues
* @hw: pointer to hardware structure
* @q_cnt: number of queues to be disabled
*
**/
s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
{
u32 reg;
u16 i, time;
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
fm10k_write_reg(hw, FM10K_TXDCTL(i),
reg & ~FM10K_TXDCTL_ENABLE);
reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
fm10k_write_reg(hw, FM10K_RXQCTL(i),
reg & ~FM10K_RXQCTL_ENABLE);
}
fm10k_write_flush(hw);
udelay(1);
/* loop through all queues to verify that they are all disabled */
for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
/* if we are at end of rings all rings are disabled */
if (i == q_cnt)
return 0;
/* if queue enables cleared, then move to next ring pair */
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
i++;
continue;
}
}
/* decrement time and wait 1 usec */
time--;
if (time)
udelay(1);
}
return FM10K_ERR_REQUESTS_PENDING;
}
/**
* fm10k_stop_hw_generic - Stop Tx/Rx units
* @hw: pointer to hardware structure
*
**/
s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
{
return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
}
/**
* fm10k_read_hw_stats_32b - Reads value of 32-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing a 32-bit value
*
* Function reads the content of the register and returns the delta
* between the base and the current value.
* **/
u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat)
{
u32 delta = fm10k_read_reg(hw, addr) - stat->base_l;
if (FM10K_REMOVED(hw->hw_addr))
stat->base_h = 0;
return delta;
}
/**
* fm10k_read_hw_stats_48b - Reads value of 48-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing the lower 32-bit value
*
* Function reads the content of 2 registers, combined to represent a 48-bit
* statistical value. Extra processing is required to handle overflowing.
* Finally, a delta value is returned representing the difference between the
* values stored in registers and values stored in the statistic counters.
* **/
static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat)
{
u32 count_l;
u32 count_h;
u32 count_tmp;
u64 delta;
count_h = fm10k_read_reg(hw, addr + 1);
/* Check for overflow */
do {
count_tmp = count_h;
count_l = fm10k_read_reg(hw, addr);
count_h = fm10k_read_reg(hw, addr + 1);
} while (count_h != count_tmp);
delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
delta -= stat->base_l;
return delta & FM10K_48_BIT_MASK;
}
/**
* fm10k_update_hw_base_48b - Updates 48-bit statistic base value
* @stat: pointer to the hardware statistic structure
* @delta: value to be updated into the hardware statistic structure
*
* Function receives a value and determines if an update is required based on
* a delta calculation. Only the base value will be updated.
**/
static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
{
if (!delta)
return;
/* update lower 32 bits */
delta += stat->base_l;
stat->base_l = (u32)delta;
/* update upper 32 bits */
stat->base_h += (u32)(delta >> 32);
}
/**
* fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
*
* Function updates the TX queue statistics counters that are related to the
* hardware.
**/
static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
struct fm10k_hw_stats_q *q,
u32 idx)
{
u32 id_tx, id_tx_prev, tx_packets;
u64 tx_bytes = 0;
/* Retrieve TX Owner Data */
id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
/* Process TX Ring */
do {
tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
&q->tx_packets);
if (tx_packets)
tx_bytes = fm10k_read_hw_stats_48b(hw,
FM10K_QBTC_L(idx),
&q->tx_bytes);
/* Re-Check Owner Data */
id_tx_prev = id_tx;
id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
} while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
/* drop non-ID bits and set VALID ID bit */
id_tx &= FM10K_TXQCTL_ID_MASK;
id_tx |= FM10K_STAT_VALID;
/* update packet counts */
if (q->tx_stats_idx == id_tx) {
q->tx_packets.count += tx_packets;
q->tx_bytes.count += tx_bytes;
}
/* update bases and record ID */
fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
q->tx_stats_idx = id_tx;
}
/**
* fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
*
* Function updates the RX queue statistics counters that are related to the
* hardware.
**/
static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
struct fm10k_hw_stats_q *q,
u32 idx)
{
u32 id_rx, id_rx_prev, rx_packets, rx_drops;
u64 rx_bytes = 0;
/* Retrieve RX Owner Data */
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
/* Process RX Ring*/
do {
rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
&q->rx_drops);
rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
&q->rx_packets);
if (rx_packets)
rx_bytes = fm10k_read_hw_stats_48b(hw,
FM10K_QBRC_L(idx),
&q->rx_bytes);
/* Re-Check Owner Data */
id_rx_prev = id_rx;
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
} while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
/* drop non-ID bits and set VALID ID bit */
id_rx &= FM10K_RXQCTL_ID_MASK;
id_rx |= FM10K_STAT_VALID;
/* update packet counts */
if (q->rx_stats_idx == id_rx) {
q->rx_drops.count += rx_drops;
q->rx_packets.count += rx_packets;
q->rx_bytes.count += rx_bytes;
}
/* update bases and record ID */
fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
q->rx_stats_idx = id_rx;
}
/**
* fm10k_update_hw_stats_q - Updates queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function updates the queue statistics counters that are related to the
* hardware.
**/
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count)
{
u32 i;
for (i = 0; i < count; i++, idx++, q++) {
fm10k_update_hw_stats_tx_q(hw, q, idx);
fm10k_update_hw_stats_rx_q(hw, q, idx);
}
}
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
{
u32 i;
for (i = 0; i < count; i++, idx++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
}
/**
* fm10k_get_host_state_generic - Returns the state of the host
* @hw: pointer to hardware structure
* @host_ready: pointer to boolean value that will record host state
*
* This function will check the health of the mailbox and Tx queue 0
* in order to determine if we should report that the link is up or not.
**/
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
struct fm10k_mac_info *mac = &hw->mac;
s32 ret_val = 0;
u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0));
/* process upstream mailbox in case interrupts were disabled */
mbx->ops.process(hw, mbx);
/* If Tx is no longer enabled link should come down */
if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
mac->get_host_state = true;
/* exit if not checking for link, or link cannot be changed */
if (!mac->get_host_state || !(~txdctl))
goto out;
/* if we somehow dropped the Tx enable we should reset */
if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
/* if Mailbox timed out we should request reset */
if (!mbx->timeout) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
/* verify Mailbox is still valid */
if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
goto out;
/* interface cannot receive traffic without logical ports */
if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
goto out;
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
*/
mac->get_host_state = false;
out:
*host_ready = !mac->get_host_state;
return ret_val;
}

View file

@ -0,0 +1,65 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_COMMON_H_
#define _FM10K_COMMON_H_
#include "fm10k_type.h"
#define FM10K_REMOVED(hw_addr) unlikely(!(hw_addr))
/* PCI configuration read */
u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg);
/* read operations, indexed using DWORDS */
u32 fm10k_read_reg(struct fm10k_hw *hw, int reg);
/* write operations, indexed using DWORDS */
#define fm10k_write_reg(hw, reg, val) \
do { \
u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
if (!FM10K_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
/* Switch register write operations, index using DWORDS */
#define fm10k_write_sw_reg(hw, reg, val) \
do { \
u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \
if (!FM10K_REMOVED(sw_addr)) \
writel((val), &sw_addr[(reg)]); \
} while (0)
/* read ctrl register which has no clear on read fields as PCIe flush */
#define fm10k_write_flush(hw) fm10k_read_reg((hw), FM10K_CTRL)
s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw);
s32 fm10k_get_invariants_generic(struct fm10k_hw *hw);
s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt);
s32 fm10k_start_hw_generic(struct fm10k_hw *hw);
s32 fm10k_stop_hw_generic(struct fm10k_hw *hw);
u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat);
#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta))
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */

View file

@ -0,0 +1,174 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k.h"
#ifdef CONFIG_DCB
/**
* fm10k_dcbnl_ieee_getets - get the ETS configuration for the device
* @dev: netdev interface for the device
* @ets: ETS structure to push configuration to
**/
static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
int i;
/* we support 8 TCs in all modes */
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = 0;
/* we only support strict priority and cannot do traffic shaping */
memset(ets->tc_tx_bw, 0, sizeof(ets->tc_tx_bw));
memset(ets->tc_rx_bw, 0, sizeof(ets->tc_rx_bw));
memset(ets->tc_tsa, IEEE_8021QAZ_TSA_STRICT, sizeof(ets->tc_tsa));
/* populate the prio map based on the netdev */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
ets->prio_tc[i] = netdev_get_prio_tc_map(dev, i);
return 0;
}
/**
* fm10k_dcbnl_ieee_setets - set the ETS configuration for the device
* @dev: netdev interface for the device
* @ets: ETS structure to pull configuration from
**/
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
int i, err;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tx_bw[i] || ets->tc_rx_bw[i])
return -EINVAL;
if (ets->tc_tsa[i] != IEEE_8021QAZ_TSA_STRICT)
return -EINVAL;
if (ets->prio_tc[i] > num_tc)
num_tc = ets->prio_tc[i];
}
/* if requested TC is greater than 0 then num_tcs is max + 1 */
if (num_tc)
num_tc++;
if (num_tc > IEEE_8021QAZ_MAX_TCS)
return -EINVAL;
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
/* update priority mapping */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
return 0;
}
/**
* fm10k_dcbnl_ieee_getpfc - get the PFC configuration for the device
* @dev: netdev interface for the device
* @pfc: PFC structure to push configuration to
**/
static int fm10k_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct fm10k_intfc *interface = netdev_priv(dev);
/* record flow control max count and state of TCs */
pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
pfc->pfc_en = interface->pfc_en;
return 0;
}
/**
* fm10k_dcbnl_ieee_setpfc - set the PFC configuration for the device
* @dev: netdev interface for the device
* @pfc: PFC structure to pull configuration from
**/
static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct fm10k_intfc *interface = netdev_priv(dev);
/* record PFC configuration to interface */
interface->pfc_en = pfc->pfc_en;
/* if we are running update the drop_en state for all queues */
if (netif_running(dev))
fm10k_update_rx_drop_en(interface);
return 0;
}
/**
* fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
*
* Returns that we support only IEEE DCB for this interface
**/
static u8 fm10k_dcbnl_getdcbx(struct net_device *dev)
{
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
/**
* fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
* @mode: new mode for this device
*
* Returns error on attempt to enable anything but IEEE DCB for this interface
**/
static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
}
static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = {
.ieee_getets = fm10k_dcbnl_ieee_getets,
.ieee_setets = fm10k_dcbnl_ieee_setets,
.ieee_getpfc = fm10k_dcbnl_ieee_getpfc,
.ieee_setpfc = fm10k_dcbnl_ieee_setpfc,
.getdcbx = fm10k_dcbnl_getdcbx,
.setdcbx = fm10k_dcbnl_setdcbx,
};
#endif /* CONFIG_DCB */
/**
* fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev
* @dev: netdev interface for the device
*
* Enables PF for DCB by assigning DCBNL ops pointer.
**/
void fm10k_dcbnl_set_ops(struct net_device *dev)
{
#ifdef CONFIG_DCB
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
if (hw->mac.type == fm10k_mac_pf)
dev->dcbnl_ops = &fm10k_dcbnl_ops;
#endif /* CONFIG_DCB */
}

View file

@ -0,0 +1,259 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifdef CONFIG_DEBUG_FS
#include "fm10k.h"
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static struct dentry *dbg_root;
/* Descriptor Seq Functions */
static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
{
struct fm10k_ring *ring = s->private;
return (*pos < ring->count) ? pos : NULL;
}
static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct fm10k_ring *ring = s->private;
return (++(*pos) < ring->count) ? pos : NULL;
}
static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v)
{
/* Do nothing. */
}
static void fm10k_dbg_desc_break(struct seq_file *s, int i)
{
while (i--)
seq_puts(s, "-");
seq_puts(s, "\n");
}
static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v)
{
struct fm10k_ring *ring = s->private;
int i = *(loff_t *)v;
static const char tx_desc_hdr[] =
"DES BUFFER_ADDRESS LENGTH VLAN MSS HDRLEN FLAGS\n";
/* Generate header */
if (!i) {
seq_printf(s, tx_desc_hdr);
fm10k_dbg_desc_break(s, sizeof(tx_desc_hdr) - 1);
}
/* Validate descriptor allocation */
if (!ring->desc) {
seq_printf(s, "%03X Descriptor ring not allocated.\n", i);
} else {
struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i);
seq_printf(s, "%03X %#018llx %#06x %#06x %#06x %#06x %#04x\n",
i, txd->buffer_addr, txd->buflen, txd->vlan,
txd->mss, txd->hdrlen, txd->flags);
}
return 0;
}
static int fm10k_dbg_rx_desc_seq_show(struct seq_file *s, void *v)
{
struct fm10k_ring *ring = s->private;
int i = *(loff_t *)v;
static const char rx_desc_hdr[] =
"DES DATA RSS STATERR LENGTH VLAN DGLORT SGLORT TIMESTAMP\n";
/* Generate header */
if (!i) {
seq_printf(s, rx_desc_hdr);
fm10k_dbg_desc_break(s, sizeof(rx_desc_hdr) - 1);
}
/* Validate descriptor allocation */
if (!ring->desc) {
seq_printf(s, "%03X Descriptor ring not allocated.\n", i);
} else {
union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i);
seq_printf(s,
"%03X %#010x %#010x %#010x %#06x %#06x %#06x %#06x %#018llx\n",
i, rxd->d.data, rxd->d.rss, rxd->d.staterr,
rxd->w.length, rxd->w.vlan, rxd->w.dglort,
rxd->w.sglort, rxd->q.timestamp);
}
return 0;
}
static const struct seq_operations fm10k_dbg_tx_desc_seq_ops = {
.start = fm10k_dbg_desc_seq_start,
.next = fm10k_dbg_desc_seq_next,
.stop = fm10k_dbg_desc_seq_stop,
.show = fm10k_dbg_tx_desc_seq_show,
};
static const struct seq_operations fm10k_dbg_rx_desc_seq_ops = {
.start = fm10k_dbg_desc_seq_start,
.next = fm10k_dbg_desc_seq_next,
.stop = fm10k_dbg_desc_seq_stop,
.show = fm10k_dbg_rx_desc_seq_show,
};
static int fm10k_dbg_desc_open(struct inode *inode, struct file *filep)
{
struct fm10k_ring *ring = inode->i_private;
struct fm10k_q_vector *q_vector = ring->q_vector;
const struct seq_operations *desc_seq_ops;
int err;
if (ring < q_vector->rx.ring)
desc_seq_ops = &fm10k_dbg_tx_desc_seq_ops;
else
desc_seq_ops = &fm10k_dbg_rx_desc_seq_ops;
err = seq_open(filep, desc_seq_ops);
if (err)
return err;
((struct seq_file *)filep->private_data)->private = ring;
return 0;
}
static const struct file_operations fm10k_dbg_desc_fops = {
.owner = THIS_MODULE,
.open = fm10k_dbg_desc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/**
* fm10k_dbg_q_vector_init - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
*
* A folder is created for each q_vector found. In each q_vector
* folder, a debugfs file is created for each tx and rx ring
* allocated to the q_vector.
**/
void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
{
struct fm10k_intfc *interface = q_vector->interface;
char name[16];
int i;
if (!interface->dbg_intfc)
return;
/* Generate a folder for each q_vector */
sprintf(name, "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
if (!q_vector->dbg_q_vector)
return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
struct fm10k_ring *ring = &q_vector->tx.ring[i];
sprintf(name, "tx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
&fm10k_dbg_desc_fops);
}
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->rx.count; i++) {
struct fm10k_ring *ring = &q_vector->rx.ring[i];
sprintf(name, "rx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
&fm10k_dbg_desc_fops);
}
}
/**
* fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
**/
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector)
{
struct fm10k_intfc *interface = q_vector->interface;
if (interface->dbg_intfc)
debugfs_remove_recursive(q_vector->dbg_q_vector);
q_vector->dbg_q_vector = NULL;
}
/**
* fm10k_dbg_intfc_init - setup the debugfs directory for the intferface
* @interface: the interface that is starting up
**/
void fm10k_dbg_intfc_init(struct fm10k_intfc *interface)
{
const char *name = pci_name(interface->pdev);
if (dbg_root)
interface->dbg_intfc = debugfs_create_dir(name, dbg_root);
}
/**
* fm10k_dbg_intfc_exit - clean out the interface's debugfs entries
* @interface: the interface that is stopping
**/
void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface)
{
if (dbg_root)
debugfs_remove_recursive(interface->dbg_intfc);
interface->dbg_intfc = NULL;
}
/**
* fm10k_dbg_init - start up debugfs for the driver
**/
void fm10k_dbg_init(void)
{
dbg_root = debugfs_create_dir(fm10k_driver_name, NULL);
}
/**
* fm10k_dbg_exit - clean out the driver's debugfs entries
**/
void fm10k_dbg_exit(void)
{
debugfs_remove_recursive(dbg_root);
dbg_root = NULL;
}
#endif /* CONFIG_DEBUG_FS */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,536 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k.h"
#include "fm10k_vf.h"
#include "fm10k_pf.h"
static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
struct fm10k_intfc *interface = hw->back;
struct pci_dev *pdev = interface->pdev;
dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
return fm10k_tlv_msg_error(hw, results, mbx);
}
static const struct fm10k_msg_data iov_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
};
s32 fm10k_iov_event(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data;
s64 mbicr, vflre;
int i;
/* if there is no iov_data then there is no mailboxes to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
iov_data = interface->iov_data;
/* check again now that we are in the RCU block */
if (!iov_data)
goto read_unlock;
if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
goto process_mbx;
/* read VFLRE to determine if any VFs have been reset */
do {
vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
vflre <<= 32;
vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
vflre = (vflre << 32) | (vflre >> 32);
vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
i = iov_data->num_vfs;
for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
if (vflre >= 0)
continue;
hw->iov.ops.reset_resources(hw, vf_info);
vf_info->mbx.ops.connect(hw, &vf_info->mbx);
}
} while (i != iov_data->num_vfs);
process_mbx:
/* read MBICR to determine which VFs require attention */
mbicr = fm10k_read_reg(hw, FM10K_MBICR(1));
mbicr <<= 32;
mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0));
i = iov_data->next_vf_mbx ? : iov_data->num_vfs;
for (mbicr <<= 64 - i; i--; mbicr += mbicr) {
struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx;
if (mbicr >= 0)
continue;
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
break;
mbx->ops.process(hw, mbx);
}
if (i >= 0) {
iov_data->next_vf_mbx = i + 1;
} else if (iov_data->next_vf_mbx) {
iov_data->next_vf_mbx = 0;
goto process_mbx;
}
read_unlock:
rcu_read_unlock();
return 0;
}
s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data;
int i;
/* if there is no iov_data then there is no mailboxes to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
iov_data = interface->iov_data;
/* check again now that we are in the RCU block */
if (!iov_data)
goto read_unlock;
/* lock the mailbox for transmit and receive */
fm10k_mbx_lock(interface);
process_mbx:
for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
struct fm10k_mbx_info *mbx = &vf_info->mbx;
u16 glort = vf_info->glort;
/* verify port mapping is valid, if not reset port */
if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
hw->iov.ops.reset_lport(hw, vf_info);
/* reset VFs that have mailbox timed out */
if (!mbx->timeout) {
hw->iov.ops.reset_resources(hw, vf_info);
mbx->ops.connect(hw, mbx);
}
/* no work pending, then just continue */
if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx))
continue;
/* guarantee we have free space in the SM mailbox */
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
break;
/* cleanup mailbox and process received messages */
mbx->ops.process(hw, mbx);
}
if (i >= 0) {
iov_data->next_vf_mbx = i + 1;
} else if (iov_data->next_vf_mbx) {
iov_data->next_vf_mbx = 0;
goto process_mbx;
}
/* free the lock */
fm10k_mbx_unlock(interface);
read_unlock:
rcu_read_unlock();
return 0;
}
void fm10k_iov_suspend(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int num_vfs, i;
/* pull out num_vfs from iov_data */
num_vfs = iov_data ? iov_data->num_vfs : 0;
/* shut down queue mapping for VFs */
fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
FM10K_DGLORTMAP_NONE);
/* Stop any active VFs and reset their resources */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
hw->iov.ops.reset_resources(hw, vf_info);
hw->iov.ops.reset_lport(hw, vf_info);
}
}
int fm10k_iov_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
int num_vfs, i;
/* pull out num_vfs from iov_data */
num_vfs = iov_data ? iov_data->num_vfs : 0;
/* return error if iov_data is not already populated */
if (!iov_data)
return -ENOMEM;
/* allocate hardware resources for the VFs */
hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
/* configure DGLORT mapping for RSS */
dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
dglort.idx = fm10k_dglort_vf_rss;
dglort.inner_rss = 1;
dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
dglort.queue_b = fm10k_vf_queue_index(hw, 0);
dglort.vsi_l = fls(hw->iov.total_vfs - 1);
dglort.vsi_b = 1;
hw->mac.ops.configure_dglort_map(hw, &dglort);
/* assign resources to the device */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* allocate all but the last GLORT to the VFs */
if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
break;
/* assign GLORT to VF, and restrict it to multicast */
hw->iov.ops.set_lport(hw, vf_info, i,
FM10K_VF_FLAG_MULTI_CAPABLE);
/* assign our default vid to the VF following reset */
vf_info->sw_vid = hw->mac.default_vid;
/* mailbox is disconnected so we don't send a message */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
/* now we are ready so we can connect */
vf_info->mbx.ops.connect(hw, &vf_info->mbx);
}
return 0;
}
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
{
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
/* no IOV support, not our message to process */
if (!iov_data)
return FM10K_ERR_PARAM;
/* glort outside our range, not our message to process */
if (vf_idx >= iov_data->num_vfs)
return FM10K_ERR_PARAM;
/* determine if an update has occured and if so notify the VF */
vf_info = &iov_data->vf_info[vf_idx];
if (vf_info->sw_vid != pvid) {
vf_info->sw_vid = pvid;
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
}
return 0;
}
static void fm10k_iov_free_data(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
if (!interface->iov_data)
return;
/* reclaim hardware resources */
fm10k_iov_suspend(pdev);
/* drop iov_data from interface */
kfree_rcu(interface->iov_data, rcu);
interface->iov_data = NULL;
}
static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
int i, err;
/* return error if iov_data is already populated */
if (iov_data)
return -EBUSY;
/* The PF should always be able to assign resources */
if (!hw->iov.ops.assign_resources)
return -ENODEV;
/* nothing to do if no VFs are requested */
if (!num_vfs)
return 0;
/* allocate memory for VF storage */
size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
iov_data = kzalloc(size, GFP_KERNEL);
if (!iov_data)
return -ENOMEM;
/* record number of VFs */
iov_data->num_vfs = num_vfs;
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* Record VF VSI value */
vf_info->vsi = i + 1;
vf_info->vf_idx = i;
/* initialize mailbox memory */
err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
if (err) {
dev_err(&pdev->dev,
"Unable to initialize SR-IOV mailbox\n");
kfree(iov_data);
return err;
}
}
/* assign iov_data to interface */
interface->iov_data = iov_data;
/* allocate hardware resources for the VFs */
fm10k_iov_resume(pdev);
return 0;
}
void fm10k_iov_disable(struct pci_dev *pdev)
{
if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
dev_err(&pdev->dev,
"Cannot disable SR-IOV while VFs are assigned\n");
else
pci_disable_sriov(pdev);
fm10k_iov_free_data(pdev);
}
static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
{
u32 err_sev;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return;
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
}
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
{
int current_vfs = pci_num_vf(pdev);
int err = 0;
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
num_vfs = current_vfs;
} else {
pci_disable_sriov(pdev);
fm10k_iov_free_data(pdev);
}
/* allocate resources for the VFs */
err = fm10k_iov_alloc_data(pdev, num_vfs);
if (err)
return err;
/* allocate VFs if not already allocated */
if (num_vfs && (num_vfs != current_vfs)) {
/* Disable completer abort error reporting as
* the VFs can trigger this any time they read a queue
* that they don't own.
*/
fm10k_disable_aer_comp_abort(pdev);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_err(&pdev->dev,
"Enable PCI SR-IOV failed: %d\n", err);
return err;
}
}
return num_vfs;
}
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* verify MAC addr is valid */
if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
return -EINVAL;
/* record new MAC address */
vf_info = &iov_data->vf_info[vf_idx];
ether_addr_copy(vf_info->mac, mac);
/* assigning the MAC will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* assign MAC address to VF */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
return 0;
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
u8 qos)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
if (vf_info->pf_vid == vid)
return 0;
/* record default VLAN ID for VF */
vf_info->pf_vid = vid;
/* assigning the VLAN will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* Clear the VLAN table for the VF */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
/* Update VF assignment and trigger reset */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
return 0;
}
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused,
int rate)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* rate limit cannot be less than 10Mbs or greater than link speed */
if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
return -EINVAL;
/* store values */
iov_data->vf_info[vf_idx].rate = rate;
/* update hardware configuration */
hw->iov.ops.configure_tc(hw, vf_idx, rate);
return 0;
}
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
vf_info = &iov_data->vf_info[vf_idx];
ivi->vf = vf_idx;
ivi->max_tx_rate = vf_info->rate;
ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, vf_info->mac);
ivi->vlan = vf_info->pf_vid;
ivi->qos = 0;
return 0;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,307 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_MBX_H_
#define _FM10K_MBX_H_
/* forward declaration */
struct fm10k_mbx_info;
#include "fm10k_type.h"
#include "fm10k_tlv.h"
/* PF Mailbox Registers */
#define FM10K_MBMEM(_n) ((_n) + 0x18000)
#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000)
#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400)
#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600)
/* XOR provides means of switching from Tx to Rx FIFO */
#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
#define FM10K_MBX(_n) ((_n) + 0x18800)
#define FM10K_MBX_REQ 0x00000002
#define FM10K_MBX_ACK 0x00000004
#define FM10K_MBX_REQ_INTERRUPT 0x00000008
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
/* VF Mailbox Registers */
#define FM10K_VFMBX 0x00010
#define FM10K_VFMBMEM(_n) ((_n) + 0x00020)
#define FM10K_VFMBMEM_LEN 16
#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2)
/* Delays/timeouts */
#define FM10K_MBX_DISCONNECT_TIMEOUT 500
#define FM10K_MBX_POLL_DELAY 19
#define FM10K_MBX_INT_DELAY 20
/* PF/VF Mailbox state machine
*
* +----------+ connect() +----------+
* | CLOSED | --------------> | CONNECT |
* +----------+ +----------+
* ^ ^ |
* | rcv: rcv: | | rcv:
* | Connect Disconnect | | Connect
* | Disconnect Error | | Data
* | | |
* | | V
* +----------+ disconnect() +----------+
* |DISCONNECT| <-------------- | OPEN |
* +----------+ +----------+
*
* The diagram above describes the PF/VF mailbox state machine. There
* are four main states to this machine.
* Closed: This state represents a mailbox that is in a standby state
* with interrupts disabled. In this state the mailbox should not
* read the mailbox or write any data. The only means of exiting
* this state is for the system to make the connect() call for the
* mailbox, it will then transition to the connect state.
* Connect: In this state the mailbox is seeking a connection. It will
* post a connect message with no specified destination and will
* wait for a reply from the other side of the mailbox. This state
* is exited when either a connect with the local mailbox as the
* destination is received or when a data message is received with
* a valid sequence number.
* Open: In this state the mailbox is able to transfer data between the local
* entity and the remote. It will fall back to connect in the event of
* receiving either an error message, or a disconnect message. It will
* transition to disconnect on a call to disconnect();
* Disconnect: In this state the mailbox is attempting to gracefully terminate
* the connection. It will do so at the first point where it knows
* that the remote endpoint is either done sending, or when the
* remote endpoint has fallen back into connect.
*/
enum fm10k_mbx_state {
FM10K_STATE_CLOSED,
FM10K_STATE_CONNECT,
FM10K_STATE_OPEN,
FM10K_STATE_DISCONNECT,
};
/* PF/VF Mailbox header format
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* The layout above describes the format for the header used in the PF/VF
* mailbox. The header is broken out into the following fields:
* Type: There are 4 supported message types
* 0x8: Data header - used to transport message data
* 0xC: Connect header - used to establish connection
* 0xD: Disconnect header - used to tear down a connection
* 0xE: Error header - used to address message exceptions
* Tail: Tail index for local FIFO
* Tail index actually consists of two parts. The MSB of
* the head is a loop tracker, it is 0 on an even numbered
* loop through the FIFO, and 1 on the odd numbered loops.
* To get the actual mailbox offset based on the tail it
* is necessary to add bit 3 to bit 0 and clear bit 3. This
* gives us a valid range of 0x1 - 0xE.
* Head: Head index for remote FIFO
* Head index follows the same format as the tail index.
* Rsvd0: Reserved 0 portion of the mailbox header
* CRC: Running CRC for all data since connect plus current message header
* Size: Maximum message size - Applies only to connect headers
* The maximum message size is provided during connect to avoid
* jamming the mailbox with messages that do not fit.
* Err_no: Error number - Applies only to error headers
* The error number provides a indication of the type of error
* experienced.
*/
/* macros for retriving and setting header values */
#define FM10K_MSG_HDR_MASK(name) \
((0x1u << FM10K_MSG_##name##_SIZE) - 1)
#define FM10K_MSG_HDR_FIELD_SET(value, name) \
(((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT)
#define FM10K_MSG_HDR_FIELD_GET(value, name) \
((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name))
/* offsets shared between all headers */
#define FM10K_MSG_TYPE_SHIFT 0
#define FM10K_MSG_TYPE_SIZE 4
#define FM10K_MSG_TAIL_SHIFT 4
#define FM10K_MSG_TAIL_SIZE 4
#define FM10K_MSG_HEAD_SHIFT 8
#define FM10K_MSG_HEAD_SIZE 4
#define FM10K_MSG_RSVD0_SHIFT 12
#define FM10K_MSG_RSVD0_SIZE 4
/* offsets for data/disconnect headers */
#define FM10K_MSG_CRC_SHIFT 16
#define FM10K_MSG_CRC_SIZE 16
/* offsets for connect headers */
#define FM10K_MSG_CONNECT_SIZE_SHIFT 16
#define FM10K_MSG_CONNECT_SIZE_SIZE 16
/* offsets for error headers */
#define FM10K_MSG_ERR_NO_SHIFT 16
#define FM10K_MSG_ERR_NO_SIZE 16
enum fm10k_msg_type {
FM10K_MSG_DATA = 0x8,
FM10K_MSG_CONNECT = 0xC,
FM10K_MSG_DISCONNECT = 0xD,
FM10K_MSG_ERROR = 0xE,
};
/* HNI/SM Mailbox FIFO format
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-------+-----------------------+-------+-----------------------+
* | Error | Remote Head |Version| Local Tail |
* +-------+-----------------------+-------+-----------------------+
* | |
* . Local FIFO Data .
* . .
* +-------+-----------------------+-------+-----------------------+
*
* The layout above describes the format for the FIFOs used by the host
* network interface and the switch manager to communicate messages back
* and forth. Both the HNI and the switch maintain one such FIFO. The
* layout in memory has the switch manager FIFO followed immediately by
* the HNI FIFO. For this reason I am using just the pointer to the
* HNI FIFO in the mailbox ops as the offset between the two is fixed.
*
* The header for the FIFO is broken out into the following fields:
* Local Tail: Offset into FIFO region for next DWORD to write.
* Version: Version info for mailbox, only values of 0/1 are supported.
* Remote Head: Offset into remote FIFO to indicate how much we have read.
* Error: Error indication, values TBD.
*/
/* version number for switch manager mailboxes */
#define FM10K_SM_MBX_VERSION 1
#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
/* offsets shared between all SM FIFO headers */
#define FM10K_MSG_SM_TAIL_SHIFT 0
#define FM10K_MSG_SM_TAIL_SIZE 12
#define FM10K_MSG_SM_VER_SHIFT 12
#define FM10K_MSG_SM_VER_SIZE 4
#define FM10K_MSG_SM_HEAD_SHIFT 16
#define FM10K_MSG_SM_HEAD_SIZE 12
#define FM10K_MSG_SM_ERR_SHIFT 28
#define FM10K_MSG_SM_ERR_SIZE 4
/* All error messages returned by mailbox functions
* The value -511 is 0xFE01 in hex. The idea is to order the errors
* from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox
* messages. This also helps to avoid error number collisions as Linux
* doesn't appear to use error numbers 256 - 511.
*/
#define FM10K_MBX_ERR(_n) ((_n) - 512)
#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01)
#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03)
#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05)
#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06)
#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08)
#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09)
#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B)
#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C)
#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E)
#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F)
#define FM10K_MBX_CRC_SEED 0xFFFF
struct fm10k_mbx_ops {
s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *);
void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *);
bool (*rx_ready)(struct fm10k_mbx_info *);
bool (*tx_ready)(struct fm10k_mbx_info *, u16);
bool (*tx_complete)(struct fm10k_mbx_info *);
s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *,
const u32 *);
s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *);
s32 (*register_handlers)(struct fm10k_mbx_info *,
const struct fm10k_msg_data *);
};
struct fm10k_mbx_fifo {
u32 *buffer;
u16 head;
u16 tail;
u16 size;
};
/* size of buffer to be stored in mailbox for FIFOs */
#define FM10K_MBX_TX_BUFFER_SIZE 512
#define FM10K_MBX_RX_BUFFER_SIZE 128
#define FM10K_MBX_BUFFER_SIZE \
(FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE)
/* minimum and maximum message size in dwords */
#define FM10K_MBX_MSG_MAX_SIZE \
((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1))
#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1)
#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */
struct fm10k_mbx_info {
/* function pointers for mailbox operations */
struct fm10k_mbx_ops ops;
const struct fm10k_msg_data *msg_data;
/* message FIFOs */
struct fm10k_mbx_fifo rx;
struct fm10k_mbx_fifo tx;
/* delay for handling timeouts */
u32 timeout;
u32 udelay;
/* mailbox state info */
u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr;
u16 max_size, mbmem_len;
u16 tail, tail_len, pulled;
u16 head, head_len, pushed;
u16 local, remote;
enum fm10k_mbx_state state;
/* result of last mailbox test */
s32 test_result;
/* statistics */
u64 tx_busy;
u64 tx_dropped;
u64 tx_messages;
u64 tx_dwords;
u64 rx_messages;
u64 rx_dwords;
u64 rx_parse_err;
/* Buffer to store messages */
u32 buffer[FM10K_MBX_BUFFER_SIZE];
};
s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
const struct fm10k_msg_data *, u8);
s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
const struct fm10k_msg_data *);
#endif /* _FM10K_MBX_H_ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,135 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_PF_H_
#define _FM10K_PF_H_
#include "fm10k_type.h"
#include "fm10k_common.h"
bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort);
u16 fm10k_queues_per_pool(struct fm10k_hw *hw);
u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx);
enum fm10k_pf_tlv_msg_id_v1 {
FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */
FM10K_PF_MSG_ID_XCAST_MODES = 0x001,
FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002,
FM10K_PF_MSG_ID_LPORT_MAP = 0x100,
FM10K_PF_MSG_ID_LPORT_CREATE = 0x200,
FM10K_PF_MSG_ID_LPORT_DELETE = 0x201,
FM10K_PF_MSG_ID_CONFIG = 0x300,
FM10K_PF_MSG_ID_UPDATE_PVID = 0x400,
FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501,
FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502,
FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
};
enum fm10k_pf_tlv_attr_id_v1 {
FM10K_PF_ATTR_ID_ERR = 0x00,
FM10K_PF_ATTR_ID_LPORT_MAP = 0x01,
FM10K_PF_ATTR_ID_XCAST_MODE = 0x02,
FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03,
FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04,
FM10K_PF_ATTR_ID_CONFIG = 0x05,
FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06,
FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07,
FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08,
FM10K_PF_ATTR_ID_FLOW_STATE = 0x09,
FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A,
FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
FM10K_PF_ATTR_ID_PORT = 0x0C,
FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
};
#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16
#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16
#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16
#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0
#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
struct fm10k_mac_update {
__le32 mac_lower;
__le16 mac_upper;
__le16 vlan;
__le16 glort;
u8 flags;
u8 action;
};
struct fm10k_global_table_data {
__le32 used;
__le32 avail;
};
struct fm10k_swapi_error {
__le32 status;
struct fm10k_global_table_data mac;
struct fm10k_global_table_data nexthop;
struct fm10k_global_table_data ffu;
};
struct fm10k_swapi_1588_timestamp {
__le64 egress;
__le64 ingress;
__le16 dglort;
__le16 sglort;
};
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
fm10k_lport_map_msg_attr, func)
s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[];
#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
fm10k_update_pvid_msg_attr, func)
s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
fm10k_1588_timestamp_msg_attr, func)
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
extern struct fm10k_info fm10k_pf_info;
#endif /* _FM10K_PF_H */

View file

@ -0,0 +1,463 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include "fm10k.h"
#define FM10K_TS_TX_TIMEOUT (HZ * 15)
void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
struct skb_shared_hwtstamps *hwtstamp,
u64 systime)
{
unsigned long flags;
read_lock_irqsave(&interface->systime_lock, flags);
systime += interface->ptp_adjust;
read_unlock_irqrestore(&interface->systime_lock, flags);
hwtstamp->hwtstamp = ns_to_ktime(systime);
}
static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
__le16 dglort)
{
struct sk_buff_head *list = &interface->ts_tx_skb_queue;
struct sk_buff *skb;
skb_queue_walk(list, skb) {
if (FM10K_CB(skb)->fi.w.dglort == dglort)
return skb;
}
return NULL;
}
void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
{
struct sk_buff_head *list = &interface->ts_tx_skb_queue;
struct sk_buff *clone;
unsigned long flags;
__le16 dglort;
/* create clone for us to return on the Tx path */
clone = skb_clone_sk(skb);
if (!clone)
return;
FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
dglort = FM10K_CB(clone)->fi.w.dglort;
spin_lock_irqsave(&list->lock, flags);
/* attempt to locate any buffers with the same dglort,
* if none are present then insert skb in tail of list
*/
skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
if (!skb)
__skb_queue_tail(list, clone);
spin_unlock_irqrestore(&list->lock, flags);
/* if list is already has one then we just free the clone */
if (skb)
kfree_skb(skb);
else
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
}
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
u64 systime)
{
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff_head *list = &interface->ts_tx_skb_queue;
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
/* attempt to locate and pull the sk_buff out of the list */
skb = fm10k_ts_tx_skb(interface, dglort);
if (skb)
__skb_unlink(skb, list);
spin_unlock_irqrestore(&list->lock, flags);
/* if not found do nothing */
if (!skb)
return;
/* timestamp the sk_buff and return it to the socket */
fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
skb_complete_tx_timestamp(skb, &shhwtstamps);
}
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
{
struct sk_buff_head *list = &interface->ts_tx_skb_queue;
struct sk_buff *skb, *tmp;
unsigned long flags;
/* If we're down or resetting, just bail */
if (test_bit(__FM10K_DOWN, &interface->state) ||
test_bit(__FM10K_RESETTING, &interface->state))
return;
spin_lock_irqsave(&list->lock, flags);
/* walk though the list and flush any expired timestamp packets */
skb_queue_walk_safe(list, skb, tmp) {
if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
continue;
__skb_unlink(skb, list);
kfree_skb(skb);
interface->tx_hwtstamp_timeouts++;
}
spin_unlock_irqrestore(&list->lock, flags);
}
static u64 fm10k_systime_read(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
return hw->mac.ops.read_systime(hw);
}
void fm10k_ts_reset(struct fm10k_intfc *interface)
{
s64 ns = ktime_to_ns(ktime_get_real());
unsigned long flags;
/* reinitialize the clock */
write_lock_irqsave(&interface->systime_lock, flags);
interface->ptp_adjust = fm10k_systime_read(interface) - ns;
write_unlock_irqrestore(&interface->systime_lock, flags);
}
void fm10k_ts_init(struct fm10k_intfc *interface)
{
/* Initialize lock protecting systime access */
rwlock_init(&interface->systime_lock);
/* Initialize skb queue for pending timestamp requests */
skb_queue_head_init(&interface->ts_tx_skb_queue);
/* reset the clock to current kernel time */
fm10k_ts_reset(interface);
}
/**
* fm10k_get_ts_config - get current hardware timestamping configuration
* @netdev: network interface device structure
* @ifreq: ioctl data
*
* This function returns the current timestamping settings. Rather than
* attempt to deconstruct registers to fill in the values, simply keep a copy
* of the old settings around, and return a copy when requested.
*/
int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct hwtstamp_config *config = &interface->ts_config;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
/**
* fm10k_set_ts_config - control hardware time stamping
* @netdev: network interface device structure
* @ifreq: ioctl data
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
*
* Incoming time stamping has to be configured via the hardware
* filters. Not all combinations are supported, in particular event
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
*
* Since hardware always timestamps Path delay packets when timestamping V2
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
*/
int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct hwtstamp_config ts_config;
if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
return -EFAULT;
/* reserved for future extensions */
if (ts_config.flags)
return -EINVAL;
switch (ts_config.tx_type) {
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ON:
/* we likely need some check here to see if this is supported */
break;
default:
return -ERANGE;
}
switch (ts_config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_ALL:
interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
/* save these settings for future reference */
interface->ts_config = ts_config;
return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
-EFAULT : 0;
}
static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct fm10k_intfc *interface;
struct fm10k_hw *hw;
int err;
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
hw = &interface->hw;
err = hw->mac.ops.adjust_systime(hw, ppb);
/* the only error we should see is if the value is out of range */
return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
}
static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct fm10k_intfc *interface;
unsigned long flags;
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
write_lock_irqsave(&interface->systime_lock, flags);
interface->ptp_adjust += delta;
write_unlock_irqrestore(&interface->systime_lock, flags);
return 0;
}
static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct fm10k_intfc *interface;
unsigned long flags;
u64 now;
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
read_lock_irqsave(&interface->systime_lock, flags);
now = fm10k_systime_read(interface) + interface->ptp_adjust;
read_unlock_irqrestore(&interface->systime_lock, flags);
*ts = ns_to_timespec(now);
return 0;
}
static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct fm10k_intfc *interface;
unsigned long flags;
u64 ns = timespec_to_ns(ts);
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
write_lock_irqsave(&interface->systime_lock, flags);
interface->ptp_adjust = fm10k_systime_read(interface) - ns;
write_unlock_irqrestore(&interface->systime_lock, flags);
return 0;
}
static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct ptp_clock_time *t = &rq->perout.period;
struct fm10k_intfc *interface;
struct fm10k_hw *hw;
u64 period;
u32 step;
/* we can only support periodic output */
if (rq->type != PTP_CLK_REQ_PEROUT)
return -EINVAL;
/* verify the requested channel is there */
if (rq->perout.index >= ptp->n_per_out)
return -EINVAL;
/* we cannot enforce start time as there is no
* mechanism for that in the hardware, we can only control
* the period.
*/
/* we cannot support periods greater than 4 seconds due to reg limit */
if (t->sec > 4 || t->sec < 0)
return -ERANGE;
interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
hw = &interface->hw;
/* we simply cannot support the operation if we don't have BAR4 */
if (!hw->sw_addr)
return -ENOTSUPP;
/* convert to unsigned 64b ns, verify we can put it in a 32b register */
period = t->sec * 1000000000LL + t->nsec;
/* determine the minimum size for period */
step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
FM10K_SYSTIME_CFG_STEP_MASK);
/* verify the value is in range supported by hardware */
if ((period && (period < step)) || (period > U32_MAX))
return -ERANGE;
/* notify hardware of request to being sending pulses */
fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
(u32)period);
return 0;
}
static struct ptp_pin_desc fm10k_ptp_pd[2] = {
{
.name = "IEEE1588_PULSE0",
.index = 0,
.func = PTP_PF_PEROUT,
.chan = 0
},
{
.name = "IEEE1588_PULSE1",
.index = 1,
.func = PTP_PF_PEROUT,
.chan = 1
}
};
static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
/* verify the requested pin is there */
if (pin >= ptp->n_pins || !ptp->pin_config)
return -EINVAL;
/* enforce locked channels, no changing them */
if (chan != ptp->pin_config[pin].chan)
return -EINVAL;
/* we want to keep the functions locked as well */
if (func != ptp->pin_config[pin].func)
return -EINVAL;
return 0;
}
void fm10k_ptp_register(struct fm10k_intfc *interface)
{
struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
struct device *dev = &interface->pdev->dev;
struct ptp_clock *ptp_clock;
snprintf(ptp_caps->name, sizeof(ptp_caps->name),
"%s", interface->netdev->name);
ptp_caps->owner = THIS_MODULE;
/* This math is simply the inverse of the math in
* fm10k_adjust_systime_pf applied to an adjustment value
* of 2^30 - 1 which is the maximum value of the register:
* max_ppb == ((2^30 - 1) * 5^9) / 2^31
*/
ptp_caps->max_adj = 976562;
ptp_caps->adjfreq = fm10k_ptp_adjfreq;
ptp_caps->adjtime = fm10k_ptp_adjtime;
ptp_caps->gettime = fm10k_ptp_gettime;
ptp_caps->settime = fm10k_ptp_settime;
/* provide pins if BAR4 is accessible */
if (interface->sw_addr) {
/* enable periodic outputs */
ptp_caps->n_per_out = 2;
ptp_caps->enable = fm10k_ptp_enable;
/* enable clock pins */
ptp_caps->verify = fm10k_ptp_verify;
ptp_caps->n_pins = 2;
ptp_caps->pin_config = fm10k_ptp_pd;
}
ptp_clock = ptp_clock_register(ptp_caps, dev);
if (IS_ERR(ptp_clock)) {
ptp_clock = NULL;
dev_err(dev, "ptp_clock_register failed\n");
} else {
dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
}
interface->ptp_clock = ptp_clock;
}
void fm10k_ptp_unregister(struct fm10k_intfc *interface)
{
struct ptp_clock *ptp_clock = interface->ptp_clock;
struct device *dev = &interface->pdev->dev;
if (!ptp_clock)
return;
interface->ptp_clock = NULL;
ptp_clock_unregister(ptp_clock);
dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
}

View file

@ -0,0 +1,863 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k_tlv.h"
/**
* fm10k_tlv_msg_init - Initialize message block for TLV data storage
* @msg: Pointer to message block
* @msg_id: Message ID indicating message type
*
* This function return success if provided with a valid message pointer
**/
s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
{
/* verify pointer is not NULL */
if (!msg)
return FM10K_ERR_PARAM;
*msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id;
return 0;
}
/**
* fm10k_tlv_attr_put_null_string - Place null terminated string on message
* @msg: Pointer to message block
* @attr_id: Attribute ID
* @string: Pointer to string to be stored in attribute
*
* This function will reorder a string to be CPU endian and store it in
* the attribute buffer. It will return success if provided with a valid
* pointers.
**/
s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
const unsigned char *string)
{
u32 attr_data = 0, len = 0;
u32 *attr;
/* verify pointers are not NULL */
if (!string || !msg)
return FM10K_ERR_PARAM;
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
/* copy string into local variable and then write to msg */
do {
/* write data to message */
if (len && !(len % 4)) {
attr[len / 4] = attr_data;
attr_data = 0;
}
/* record character to offset location */
attr_data |= (u32)(*string) << (8 * (len % 4));
len++;
/* test for NULL and then increment */
} while (*(string++));
/* write last piece of data to message */
attr[(len + 3) / 4] = attr_data;
/* record attribute header, update message length */
len <<= FM10K_TLV_LEN_SHIFT;
attr[0] = len | attr_id;
/* add header length to length */
len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
*msg += FM10K_TLV_LEN_ALIGN(len);
return 0;
}
/**
* fm10k_tlv_attr_get_null_string - Get null terminated string from attribute
* @attr: Pointer to attribute
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
* it in the array pointed by by string. It will return success if provided
* with a valid pointers.
**/
s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
{
u32 len;
/* verify pointers are not NULL */
if (!string || !attr)
return FM10K_ERR_PARAM;
len = *attr >> FM10K_TLV_LEN_SHIFT;
attr++;
while (len--)
string[len] = (u8)(attr[len / 4] >> (8 * (len % 4)));
return 0;
}
/**
* fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message
* @msg: Pointer to message block
* @attr_id: Attribute ID
* @mac_addr: MAC address to be stored
*
* This function will reorder a MAC address to be CPU endian and store it
* in the attribute buffer. It will return success if provided with a
* valid pointers.
**/
s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
const u8 *mac_addr, u16 vlan)
{
u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT;
u32 *attr;
/* verify pointers are not NULL */
if (!msg || !mac_addr)
return FM10K_ERR_PARAM;
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
/* record attribute header, update message length */
attr[0] = len | attr_id;
/* copy value into local variable and then write to msg */
attr[1] = le32_to_cpu(*(const __le32 *)&mac_addr[0]);
attr[2] = le16_to_cpu(*(const __le16 *)&mac_addr[4]);
attr[2] |= (u32)vlan << 16;
/* add header length to length */
len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
*msg += FM10K_TLV_LEN_ALIGN(len);
return 0;
}
/**
* fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute
* @attr: Pointer to attribute
* @attr_id: Attribute ID
* @mac_addr: location of buffer to store MAC address
*
* This function pulls the MAC address back out of the attribute and will
* place it in the array pointed by by mac_addr. It will return success
* if provided with a valid pointers.
**/
s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
{
/* verify pointers are not NULL */
if (!mac_addr || !attr)
return FM10K_ERR_PARAM;
*(__le32 *)&mac_addr[0] = cpu_to_le32(attr[1]);
*(__le16 *)&mac_addr[4] = cpu_to_le16((u16)(attr[2]));
*vlan = (u16)(attr[2] >> 16);
return 0;
}
/**
* fm10k_tlv_attr_put_bool - Add header indicating value "true"
* @msg: Pointer to message block
* @attr_id: Attribute ID
*
* This function will simply add an attribute header, the fact
* that the header is here means the attribute value is true, else
* it is false. The function will return success if provided with a
* valid pointers.
**/
s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id)
{
/* verify pointers are not NULL */
if (!msg)
return FM10K_ERR_PARAM;
/* record attribute header */
msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id;
/* add header length to length */
*msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
return 0;
}
/**
* fm10k_tlv_attr_put_value - Store integer value attribute in message
* @msg: Pointer to message block
* @attr_id: Attribute ID
* @value: Value to be written
* @len: Size of value
*
* This function will place an integer value of up to 8 bytes in size
* in a message attribute. The function will return success provided
* that msg is a valid pointer, and len is 1, 2, 4, or 8.
**/
s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
{
u32 *attr;
/* verify non-null msg and len is 1, 2, 4, or 8 */
if (!msg || !len || len > 8 || (len & (len - 1)))
return FM10K_ERR_PARAM;
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
if (len < 4) {
attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
} else {
attr[1] = (u32)value;
if (len > 4)
attr[2] = (u32)(value >> 32);
}
/* record attribute header, update message length */
len <<= FM10K_TLV_LEN_SHIFT;
attr[0] = len | attr_id;
/* add header length to length */
len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
*msg += FM10K_TLV_LEN_ALIGN(len);
return 0;
}
/**
* fm10k_tlv_attr_get_value - Get integer value stored in attribute
* @attr: Pointer to attribute
* @value: Pointer to destination buffer
* @len: Size of value
*
* This function will place an integer value of up to 8 bytes in size
* in the offset pointed to by value. The function will return success
* provided that pointers are valid and the len value matches the
* attribute length.
**/
s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len)
{
/* verify pointers are not NULL */
if (!attr || !value)
return FM10K_ERR_PARAM;
if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
return FM10K_ERR_PARAM;
if (len == 8)
*(u64 *)value = ((u64)attr[2] << 32) | attr[1];
else if (len == 4)
*(u32 *)value = attr[1];
else if (len == 2)
*(u16 *)value = (u16)attr[1];
else
*(u8 *)value = (u8)attr[1];
return 0;
}
/**
* fm10k_tlv_attr_put_le_struct - Store little endian structure in message
* @msg: Pointer to message block
* @attr_id: Attribute ID
* @le_struct: Pointer to structure to be written
* @len: Size of le_struct
*
* This function will place a little endian structure value in a message
* attribute. The function will return success provided that all pointers
* are valid and length is a non-zero multiple of 4.
**/
s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id,
const void *le_struct, u32 len)
{
const __le32 *le32_ptr = (const __le32 *)le_struct;
u32 *attr;
u32 i;
/* verify non-null msg and len is in 32 bit words */
if (!msg || !len || (len % 4))
return FM10K_ERR_PARAM;
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
/* copy le32 structure into host byte order at 32b boundaries */
for (i = 0; i < (len / 4); i++)
attr[i + 1] = le32_to_cpu(le32_ptr[i]);
/* record attribute header, update message length */
len <<= FM10K_TLV_LEN_SHIFT;
attr[0] = len | attr_id;
/* add header length to length */
len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
*msg += FM10K_TLV_LEN_ALIGN(len);
return 0;
}
/**
* fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute
* @attr: Pointer to attribute
* @le_struct: Pointer to structure to be written
* @len: Size of structure
*
* This function will place a little endian structure in the buffer
* pointed to by le_struct. The function will return success
* provided that pointers are valid and the len value matches the
* attribute length.
**/
s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
{
__le32 *le32_ptr = (__le32 *)le_struct;
u32 i;
/* verify pointers are not NULL */
if (!le_struct || !attr)
return FM10K_ERR_PARAM;
if ((*attr >> FM10K_TLV_LEN_SHIFT) != len)
return FM10K_ERR_PARAM;
attr++;
for (i = 0; len; i++, len -= 4)
le32_ptr[i] = cpu_to_le32(attr[i]);
return 0;
}
/**
* fm10k_tlv_attr_nest_start - Start a set of nested attributes
* @msg: Pointer to message block
* @attr_id: Attribute ID
*
* This function will mark off a new nested region for encapsulating
* a given set of attributes. The idea is if you wish to place a secondary
* structure within the message this mechanism allows for that. The
* function will return NULL on failure, and a pointer to the start
* of the nested attributes on success.
**/
u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
{
u32 *attr;
/* verify pointer is not NULL */
if (!msg)
return NULL;
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
attr[0] = attr_id;
/* return pointer to nest header */
return attr;
}
/**
* fm10k_tlv_attr_nest_start - Start a set of nested attributes
* @msg: Pointer to message block
*
* This function closes off an existing set of nested attributes. The
* message pointer should be pointing to the parent of the nest. So in
* the case of a nest within the nest this would be the outer nest pointer.
* This function will return success provided all pointers are valid.
**/
s32 fm10k_tlv_attr_nest_stop(u32 *msg)
{
u32 *attr;
u32 len;
/* verify pointer is not NULL */
if (!msg)
return FM10K_ERR_PARAM;
/* locate the nested header and retrieve its length */
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT;
/* only include nest if data was added to it */
if (len) {
len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT;
*msg += len;
}
return 0;
}
/**
* fm10k_tlv_attr_validate - Validate attribute metadata
* @attr: Pointer to attribute
* @tlv_attr: Type and length info for attribute
*
* This function does some basic validation of the input TLV. It
* verifies the length, and in the case of null terminated strings
* it verifies that the last byte is null. The function will
* return FM10K_ERR_PARAM if any attribute is malformed, otherwise
* it returns 0.
**/
static s32 fm10k_tlv_attr_validate(u32 *attr,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 attr_id = *attr & FM10K_TLV_ID_MASK;
u16 len = *attr >> FM10K_TLV_LEN_SHIFT;
/* verify this is an attribute and not a message */
if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))
return FM10K_ERR_PARAM;
/* search through the list of attributes to find a matching ID */
while (tlv_attr->id < attr_id)
tlv_attr++;
/* if didn't find a match then we should exit */
if (tlv_attr->id != attr_id)
return FM10K_NOT_IMPLEMENTED;
/* move to start of attribute data */
attr++;
switch (tlv_attr->type) {
case FM10K_TLV_NULL_STRING:
if (!len ||
(attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4)))))
return FM10K_ERR_PARAM;
if (len > tlv_attr->len)
return FM10K_ERR_PARAM;
break;
case FM10K_TLV_MAC_ADDR:
if (len != ETH_ALEN)
return FM10K_ERR_PARAM;
break;
case FM10K_TLV_BOOL:
if (len)
return FM10K_ERR_PARAM;
break;
case FM10K_TLV_UNSIGNED:
case FM10K_TLV_SIGNED:
if (len != tlv_attr->len)
return FM10K_ERR_PARAM;
break;
case FM10K_TLV_LE_STRUCT:
/* struct must be 4 byte aligned */
if ((len % 4) || len != tlv_attr->len)
return FM10K_ERR_PARAM;
break;
case FM10K_TLV_NESTED:
/* nested attributes must be 4 byte aligned */
if (len % 4)
return FM10K_ERR_PARAM;
break;
default:
/* attribute id is mapped to bad value */
return FM10K_ERR_PARAM;
}
return 0;
}
/**
* fm10k_tlv_attr_parse - Parses stream of attribute data
* @attr: Pointer to attribute list
* @results: Pointer array to store pointers to attributes
* @tlv_attr: Type and length info for attributes
*
* This function validates a stream of attributes and parses them
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
* and 0 on success.
**/
s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 i, attr_id, offset = 0;
s32 err = 0;
u16 len;
/* verify pointers are not NULL */
if (!attr || !results)
return FM10K_ERR_PARAM;
/* initialize results to NULL */
for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++)
results[i] = NULL;
/* pull length from the message header */
len = *attr >> FM10K_TLV_LEN_SHIFT;
/* no attributes to parse if there is no length */
if (!len)
return 0;
/* no attributes to parse, just raw data, message becomes attribute */
if (!tlv_attr) {
results[0] = attr;
return 0;
}
/* move to start of attribute data */
attr++;
/* run through list parsing all attributes */
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
if (attr_id < FM10K_TLV_RESULTS_MAX)
err = fm10k_tlv_attr_validate(attr, tlv_attr);
else
err = FM10K_NOT_IMPLEMENTED;
if (err < 0)
return err;
if (!err)
results[attr_id] = attr;
/* update offset */
offset += FM10K_TLV_DWORD_LEN(*attr) * 4;
/* move to next attribute */
attr = &attr[FM10K_TLV_DWORD_LEN(*attr)];
}
/* we should find ourselves at the end of the list */
if (offset != len)
return FM10K_ERR_PARAM;
return 0;
}
/**
* fm10k_tlv_msg_parse - Parses message header and calls function handler
* @hw: Pointer to hardware structure
* @msg: Pointer to message
* @mbx: Pointer to mailbox information structure
* @func: Function array containing list of message handling functions
*
* This function should be the first function called upon receiving a
* message. The handler will identify the message type and call the correct
* handler for the given message. It will return the value from the function
* call on a recognized message type, otherwise it will return
* FM10K_NOT_IMPLEMENTED on an unrecognized type.
**/
s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *data)
{
u32 *results[FM10K_TLV_RESULTS_MAX];
u32 msg_id;
s32 err;
/* verify pointer is not NULL */
if (!msg || !data)
return FM10K_ERR_PARAM;
/* verify this is a message and not an attribute */
if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)))
return FM10K_ERR_PARAM;
/* grab message ID */
msg_id = *msg & FM10K_TLV_ID_MASK;
while (data->id < msg_id)
data++;
/* if we didn't find it then pass it up as an error */
if (data->id != msg_id) {
while (data->id != FM10K_TLV_ERROR)
data++;
}
/* parse the attributes into the results list */
err = fm10k_tlv_attr_parse(msg, results, data->attr);
if (err < 0)
return err;
return data->func(hw, results, mbx);
}
/**
* fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs
* @hw: Pointer to hardware structure
* @results: Pointer array to message, results[0] is pointer to message
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
* a minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
return FM10K_NOT_IMPLEMENTED;
}
static const unsigned char test_str[] = "fm10k";
static const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56,
0x78, 0x9a, 0xbc };
static const u16 test_vlan = 0x0FED;
static const u64 test_u64 = 0xfedcba9876543210ull;
static const u32 test_u32 = 0x87654321;
static const u16 test_u16 = 0x8765;
static const u8 test_u8 = 0x87;
static const s64 test_s64 = -0x123456789abcdef0ll;
static const s32 test_s32 = -0x1235678;
static const s16 test_s16 = -0x1234;
static const s8 test_s8 = -0x12;
static const __le32 test_le[2] = { cpu_to_le32(0x12345678),
cpu_to_le32(0x9abcdef0)};
/* The message below is meant to be used as a test message to demonstrate
* how to use the TLV interface and to test the types. Normally this code
* be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG
*/
const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80),
FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR),
FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8),
FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16),
FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32),
FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64),
FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8),
FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16),
FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32),
FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64),
FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8),
FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED),
FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT),
FM10K_TLV_ATTR_LAST
};
/**
* fm10k_tlv_msg_test_generate_data - Stuff message with data
* @msg: Pointer to message
* @attr_flags: List of flags indicating what attributes to add
*
* This function is meant to load a message buffer with attribute data
**/
static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
{
if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
test_str);
if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
test_mac, test_vlan);
if (attr_flags & (1 << FM10K_TEST_MSG_U8))
fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
if (attr_flags & (1 << FM10K_TEST_MSG_U16))
fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
if (attr_flags & (1 << FM10K_TEST_MSG_U32))
fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
if (attr_flags & (1 << FM10K_TEST_MSG_U64))
fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
if (attr_flags & (1 << FM10K_TEST_MSG_S8))
fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
if (attr_flags & (1 << FM10K_TEST_MSG_S16))
fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
if (attr_flags & (1 << FM10K_TEST_MSG_S32))
fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
if (attr_flags & (1 << FM10K_TEST_MSG_S64))
fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
test_le, 8);
}
/**
* fm10k_tlv_msg_test_create - Create a test message testing all attributes
* @msg: Pointer to message
* @attr_flags: List of flags indicating what attributes to add
*
* This function is meant to load a message buffer with all attribute types
* including a nested attribute.
**/
void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags)
{
u32 *nest = NULL;
fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST);
fm10k_tlv_msg_test_generate_data(msg, attr_flags);
/* check for nested attributes */
attr_flags >>= FM10K_TEST_MSG_NESTED;
if (attr_flags) {
nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED);
fm10k_tlv_msg_test_generate_data(nest, attr_flags);
fm10k_tlv_attr_nest_stop(msg);
}
}
/**
* fm10k_tlv_msg_test - Validate all results on test message receive
* @hw: Pointer to hardware structure
* @results: Pointer array to attributes in the mesage
* @mbx: Pointer to mailbox information structure
*
* This function does a check to verify all attributes match what the test
* message placed in the message buffer. It is the default handler
* for TLV test messages.
**/
s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
u32 *nest_results[FM10K_TLV_RESULTS_MAX];
unsigned char result_str[80];
unsigned char result_mac[ETH_ALEN];
s32 err = 0;
__le32 result_le[2];
u16 result_vlan;
u64 result_u64;
u32 result_u32;
u16 result_u16;
u8 result_u8;
s64 result_s64;
s32 result_s32;
s16 result_s16;
s8 result_s8;
u32 reply[3];
/* retrieve results of a previous test */
if (!!results[FM10K_TEST_MSG_RESULT])
return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT],
&mbx->test_result);
parse_nested:
if (!!results[FM10K_TEST_MSG_STRING]) {
err = fm10k_tlv_attr_get_null_string(
results[FM10K_TEST_MSG_STRING],
result_str);
if (!err && memcmp(test_str, result_str, sizeof(test_str)))
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_MAC_ADDR]) {
err = fm10k_tlv_attr_get_mac_vlan(
results[FM10K_TEST_MSG_MAC_ADDR],
result_mac, &result_vlan);
if (!err && memcmp(test_mac, result_mac, ETH_ALEN))
err = FM10K_ERR_INVALID_VALUE;
if (!err && test_vlan != result_vlan)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_U8]) {
err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8],
&result_u8);
if (!err && test_u8 != result_u8)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_U16]) {
err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16],
&result_u16);
if (!err && test_u16 != result_u16)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_U32]) {
err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32],
&result_u32);
if (!err && test_u32 != result_u32)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_U64]) {
err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64],
&result_u64);
if (!err && test_u64 != result_u64)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_S8]) {
err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8],
&result_s8);
if (!err && test_s8 != result_s8)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_S16]) {
err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16],
&result_s16);
if (!err && test_s16 != result_s16)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_S32]) {
err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32],
&result_s32);
if (!err && test_s32 != result_s32)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_S64]) {
err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64],
&result_s64);
if (!err && test_s64 != result_s64)
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_LE_STRUCT]) {
err = fm10k_tlv_attr_get_le_struct(
results[FM10K_TEST_MSG_LE_STRUCT],
result_le,
sizeof(result_le));
if (!err && memcmp(test_le, result_le, sizeof(test_le)))
err = FM10K_ERR_INVALID_VALUE;
if (err)
goto report_result;
}
if (!!results[FM10K_TEST_MSG_NESTED]) {
/* clear any pointers */
memset(nest_results, 0, sizeof(nest_results));
/* parse the nested attributes into the nest results list */
err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED],
nest_results,
fm10k_tlv_msg_test_attr);
if (err)
goto report_result;
/* loop back through to the start */
results = nest_results;
goto parse_nested;
}
report_result:
/* generate reply with test result */
fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST);
fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, reply);
}

View file

@ -0,0 +1,186 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
/* forward declaration */
struct fm10k_msg_data;
#include "fm10k_type.h"
/* Message / Argument header format
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Length | Flags | Type / ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* The message header format described here is used for messages that are
* passed between the PF and the VF. To allow for messages larger then
* mailbox size we will provide a message with the above header and it
* will be segmented and transported to the mailbox to the other side where
* it is reassembled. It contains the following fields:
* Len: Length of the message in bytes excluding the message header
* Flags: TBD
* Rule: These will be the message/argument types we pass
*/
/* message data header */
#define FM10K_TLV_ID_SHIFT 0
#define FM10K_TLV_ID_SIZE 16
#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1)
#define FM10K_TLV_FLAGS_SHIFT 16
#define FM10K_TLV_FLAGS_MSG 0x1
#define FM10K_TLV_FLAGS_SIZE 4
#define FM10K_TLV_LEN_SHIFT 20
#define FM10K_TLV_LEN_SIZE 12
#define FM10K_TLV_HDR_LEN 4ul
#define FM10K_TLV_LEN_ALIGN_MASK \
((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT)
#define FM10K_TLV_LEN_ALIGN(tlv) \
(((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK)
#define FM10K_TLV_DWORD_LEN(tlv) \
((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1)
#define FM10K_TLV_RESULTS_MAX 32
enum fm10k_tlv_type {
FM10K_TLV_NULL_STRING,
FM10K_TLV_MAC_ADDR,
FM10K_TLV_BOOL,
FM10K_TLV_UNSIGNED,
FM10K_TLV_SIGNED,
FM10K_TLV_LE_STRUCT,
FM10K_TLV_NESTED,
FM10K_TLV_MAX_TYPE
};
#define FM10K_TLV_ERROR (~0u)
struct fm10k_tlv_attr {
unsigned int id;
enum fm10k_tlv_type type;
u16 len;
};
#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len }
#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 }
#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 }
#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 }
#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 }
#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 }
#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 }
#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 }
#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 }
#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
struct fm10k_msg_data {
unsigned int id;
const struct fm10k_tlv_attr *attr;
s32 (*func)(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
};
#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func }
s32 fm10k_tlv_msg_init(u32 *, u16);
s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *);
s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *);
s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16);
s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *);
s32 fm10k_tlv_attr_put_bool(u32 *, u16);
s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32);
#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 1)
#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 2)
#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 4)
#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \
fm10k_tlv_attr_put_value(msg, attr_id, val, 8)
s32 fm10k_tlv_attr_get_value(u32 *, void *, u32);
#define fm10k_tlv_attr_get_u8(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8))
#define fm10k_tlv_attr_get_u16(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16))
#define fm10k_tlv_attr_get_u32(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32))
#define fm10k_tlv_attr_get_u64(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64))
#define fm10k_tlv_attr_get_s8(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8))
#define fm10k_tlv_attr_get_s16(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16))
#define fm10k_tlv_attr_get_s32(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32))
#define fm10k_tlv_attr_get_s64(attr, ptr) \
fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64))
s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32);
s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32);
u32 *fm10k_tlv_attr_nest_start(u32 *, u16);
s32 fm10k_tlv_attr_nest_stop(u32 *);
s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *);
s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *,
const struct fm10k_msg_data *);
s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *);
#define FM10K_TLV_MSG_ID_TEST 0
enum fm10k_tlv_test_attr_id {
FM10K_TEST_MSG_UNSET,
FM10K_TEST_MSG_STRING,
FM10K_TEST_MSG_MAC_ADDR,
FM10K_TEST_MSG_U8,
FM10K_TEST_MSG_U16,
FM10K_TEST_MSG_U32,
FM10K_TEST_MSG_U64,
FM10K_TEST_MSG_S8,
FM10K_TEST_MSG_S16,
FM10K_TEST_MSG_S32,
FM10K_TEST_MSG_S64,
FM10K_TEST_MSG_LE_STRUCT,
FM10K_TEST_MSG_NESTED,
FM10K_TEST_MSG_RESULT,
FM10K_TEST_MSG_MAX
};
extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[];
void fm10k_tlv_msg_test_create(u32 *, u32);
s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
#define FM10K_TLV_MSG_TEST_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func)
#define FM10K_TLV_MSG_ERROR_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func)
#endif /* _FM10K_MSG_H_ */

View file

@ -0,0 +1,770 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
/* forward declaration */
struct fm10k_hw;
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/etherdevice.h>
#include "fm10k_mbx.h"
#define FM10K_DEV_ID_PF 0x15A4
#define FM10K_DEV_ID_VF 0x15A5
#define FM10K_MAX_QUEUES 256
#define FM10K_MAX_QUEUES_PF 128
#define FM10K_MAX_QUEUES_POOL 16
#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull
#define FM10K_STAT_VALID 0x80000000
/* PCI Bus Info */
#define FM10K_PCIE_LINK_CAP 0x7C
#define FM10K_PCIE_LINK_STATUS 0x82
#define FM10K_PCIE_LINK_WIDTH 0x3F0
#define FM10K_PCIE_LINK_WIDTH_1 0x10
#define FM10K_PCIE_LINK_WIDTH_2 0x20
#define FM10K_PCIE_LINK_WIDTH_4 0x40
#define FM10K_PCIE_LINK_WIDTH_8 0x80
#define FM10K_PCIE_LINK_SPEED 0xF
#define FM10K_PCIE_LINK_SPEED_2500 0x1
#define FM10K_PCIE_LINK_SPEED_5000 0x2
#define FM10K_PCIE_LINK_SPEED_8000 0x3
/* PCIe payload size */
#define FM10K_PCIE_DEV_CAP 0x74
#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07
#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00
#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01
#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02
#define FM10K_PCIE_DEV_CTRL 0x78
#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0
#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00
#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20
#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40
/* PCIe MSI-X Capability info */
#define FM10K_PCI_MSIX_MSG_CTRL 0xB2
#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF
#define FM10K_MAX_MSIX_VECTORS 256
#define FM10K_MAX_VECTORS_PF 256
#define FM10K_MAX_VECTORS_POOL 32
/* PCIe SR-IOV Info */
#define FM10K_PCIE_SRIOV_CTRL 0x190
#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
#define FM10K_ERR_PARAM -2
#define FM10K_ERR_REQUESTS_PENDING -4
#define FM10K_ERR_RESET_REQUESTED -5
#define FM10K_ERR_DMA_PENDING -6
#define FM10K_ERR_RESET_FAILED -7
#define FM10K_ERR_INVALID_MAC_ADDR -8
#define FM10K_ERR_INVALID_VALUE -9
#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF
/* Start of PF registers */
#define FM10K_CTRL 0x0000
#define FM10K_CTRL_BAR4_ALLOWED 0x00000004
#define FM10K_CTRL_EXT 0x0001
#define FM10K_GCR 0x0003
#define FM10K_GCR_EXT 0x0005
/* Interrupt control registers */
#define FM10K_EICR 0x0006
#define FM10K_EICR_FAULT_MASK 0x0000003F
#define FM10K_EICR_MAILBOX 0x00000040
#define FM10K_EICR_SWITCHREADY 0x00000080
#define FM10K_EICR_SWITCHNOTREADY 0x00000100
#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
#define FM10K_EICR_VFLR 0x00000800
#define FM10K_EICR_MAXHOLDTIME 0x00001000
#define FM10K_EIMR 0x0007
#define FM10K_EIMR_PCA_FAULT 0x00000001
#define FM10K_EIMR_THI_FAULT 0x00000010
#define FM10K_EIMR_FUM_FAULT 0x00000400
#define FM10K_EIMR_MAILBOX 0x00001000
#define FM10K_EIMR_SWITCHREADY 0x00004000
#define FM10K_EIMR_SWITCHNOTREADY 0x00010000
#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000
#define FM10K_EIMR_SRAMERROR 0x00100000
#define FM10K_EIMR_VFLR 0x00400000
#define FM10K_EIMR_MAXHOLDTIME 0x01000000
#define FM10K_EIMR_ALL 0x55555555
#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0)
#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1)
#define FM10K_FAULT_ADDR_LO 0x0
#define FM10K_FAULT_ADDR_HI 0x1
#define FM10K_FAULT_SPECINFO 0x2
#define FM10K_FAULT_FUNC 0x3
#define FM10K_FAULT_SIZE 0x4
#define FM10K_FAULT_FUNC_VALID 0x00008000
#define FM10K_FAULT_FUNC_PF 0x00004000
#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00
#define FM10K_FAULT_FUNC_VF_SHIFT 8
#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF
#define FM10K_PCA_FAULT 0x0008
#define FM10K_THI_FAULT 0x0010
#define FM10K_FUM_FAULT 0x001C
/* Rx queue timeout indicator */
#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020)
/* Switch Manager info */
#define FM10K_SM_AREA(_n) ((_n) + 0x0028)
/* GLORT mapping registers */
#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030)
#define FM10K_DGLORT_COUNT 8
#define FM10K_DGLORTMAP_MASK_SHIFT 16
#define FM10K_DGLORTMAP_ANY 0x00000000
#define FM10K_DGLORTMAP_NONE 0x0000FFFF
#define FM10K_DGLORTMAP_ZERO 0xFFFF0000
#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038)
#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4
#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7
#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14
#define FM10K_DGLORTDEC_QBASE_SHIFT 16
#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
#define FM10K_TUNNEL_CFG 0x0040
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
#define FM10K_SWPRI_MAX 16
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
#define FM10K_RSSRK_SIZE 10
#define FM10K_RSSRK_ENTRIES_PER_REG 4
#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000)
#define FM10K_RETA_SIZE 32
#define FM10K_RETA_ENTRIES_PER_REG 4
#define FM10K_MAX_RSS_INDICES 128
/* Rate limiting registers */
#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000)
#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF
#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040)
#define FM10K_TC_MAXCREDIT_64K 0x00010000
#define FM10K_TC_RATE(_n) ((_n) + 0x2080)
#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF
#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000
#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000
#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000
/* DMA control registers */
#define FM10K_DMA_CTRL 0x20C3
#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001
#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008
#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000
#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000
#define FM10K_DMA_CTRL_32_DESC 0x00000000
#define FM10K_DMA_CTRL2 0x20C4
#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000
/* TSO flags configuration
* First packet contains all flags except for fin and psh
* Middle packet contains only urg and ack
* Last packet contains urg, ack, fin, and psh
*/
#define FM10K_TSO_FLAGS_LOW 0x00300FF6
#define FM10K_TSO_FLAGS_HI 0x00000039
#define FM10K_DTXTCPFLGL 0x20C5
#define FM10K_DTXTCPFLGH 0x20C6
#define FM10K_TPH_CTRL 0x20C7
#define FM10K_MRQC(_n) ((_n) + 0x2100)
#define FM10K_MRQC_TCP_IPV4 0x00000001
#define FM10K_MRQC_IPV4 0x00000002
#define FM10K_MRQC_IPV6 0x00000010
#define FM10K_MRQC_TCP_IPV6 0x00000020
#define FM10K_MRQC_UDP_IPV4 0x00000040
#define FM10K_MRQC_UDP_IPV6 0x00000080
#define FM10K_TQMAP(_n) ((_n) + 0x2800)
#define FM10K_TQMAP_TABLE_SIZE 2048
#define FM10K_RQMAP(_n) ((_n) + 0x3000)
/* Hardware Statistics */
#define FM10K_STATS_TIMEOUT 0x3800
#define FM10K_STATS_UR 0x3801
#define FM10K_STATS_CA 0x3802
#define FM10K_STATS_UM 0x3803
#define FM10K_STATS_XEC 0x3804
#define FM10K_STATS_VLAN_DROP 0x3805
#define FM10K_STATS_LOOPBACK_DROP 0x3806
#define FM10K_STATS_NODESC_DROP 0x3807
/* Timesync registers */
#define FM10K_SYSTIME 0x3814
#define FM10K_SYSTIME_CFG 0x3818
#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
/* PCIe state registers */
#define FM10K_PHYADDR 0x381C
/* Rx ring registers */
#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000)
#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001)
#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002)
#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003)
#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020
#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200
#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000
#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000
#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004)
#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005)
#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006)
#define FM10K_RXQCTL_ENABLE 0x00000001
#define FM10K_RXQCTL_PF 0x000000FC
#define FM10K_RXQCTL_VF_SHIFT 2
#define FM10K_RXQCTL_VF 0x00000100
#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF)
#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007)
#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000
/* Rx Statistics */
#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A)
#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B)
#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C)
#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D)
/* Rx GLORT register */
#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E)
/* Tx ring registers */
#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000)
#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001)
#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002)
#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003)
#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020
#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200
#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800
#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000
#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004)
#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005)
#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006)
#define FM10K_TXDCTL_ENABLE 0x00004000
#define FM10K_TXDCTL_MAX_TIME_SHIFT 16
#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007)
#define FM10K_TXQCTL_PF 0x0000003F
#define FM10K_TXQCTL_VF 0x00000040
#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF)
#define FM10K_TXQCTL_PC_SHIFT 7
#define FM10K_TXQCTL_PC_MASK 0x00000380
#define FM10K_TXQCTL_TC_SHIFT 10
#define FM10K_TXQCTL_VID_SHIFT 16
#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
/* Tx Statistics */
#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A)
#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B)
/* Tx Push registers */
#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C)
#define FM10K_TQDLOC_BASE_32_DESC 0x08
#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000
/* Tx GLORT registers */
#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D)
#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E)
#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001
/* Interrupt moderation and control registers */
#define FM10K_INT_MAP(_n) ((_n) + 0x10080)
#define FM10K_INT_MAP_TIMER0 0x00000000
#define FM10K_INT_MAP_TIMER1 0x00000100
#define FM10K_INT_MAP_IMMEDIATE 0x00000200
#define FM10K_INT_MAP_DISABLE 0x00000300
#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003)
#define FM10K_INT_CTRL 0x12000
#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400
#define FM10K_ITR(_n) ((_n) + 0x12400)
#define FM10K_ITR_INTERVAL1_SHIFT 12
#define FM10K_ITR_PENDING2 0x10000000
#define FM10K_ITR_AUTOMASK 0x20000000
#define FM10K_ITR_MASK_SET 0x40000000
#define FM10K_ITR_MASK_CLEAR 0x80000000
#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800)
#define FM10K_ITR_REG_COUNT 768
#define FM10K_ITR_REG_COUNT_PF 256
/* Switch manager interrupt registers */
#define FM10K_IP 0x13000
#define FM10K_IP_NOTINRESET 0x00000100
/* VLAN registers */
#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
#define FM10K_VLAN_TABLE_SIZE 128
/* VLAN specific message offsets */
#define FM10K_VLAN_TABLE_VID_MAX 4096
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR (1 << 15)
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
/* VF FLR event notification registers */
#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844)
#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846)
/* Defines for size of uncacheable memories */
#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */
#define FM10K_UC_ADDR_END 0x100000 /* end of standard regs */
#define FM10K_UC_ADDR_SIZE (FM10K_UC_ADDR_END - FM10K_UC_ADDR_START)
/* Define timeouts for resets and disables */
#define FM10K_QUEUE_DISABLE_TIMEOUT 100
#define FM10K_RESET_TIMEOUT 100
/* VF registers */
#define FM10K_VFCTRL 0x00000
#define FM10K_VFCTRL_RST 0x00000008
#define FM10K_VFINT_MAP 0x00030
#define FM10K_VFSYSTIME 0x00040
#define FM10K_VFITR(_n) ((_n) + 0x00060)
/* Registers contained in BAR 4 for Switch management */
#define FM10K_SW_SYSTIME_ADJUST 0x0224D
#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000
#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
enum fm10k_int_source {
fm10k_int_Mailbox = 0,
fm10k_int_PCIeFault = 1,
fm10k_int_SwitchUpDown = 2,
fm10k_int_SwitchEvent = 3,
fm10k_int_SRAM = 4,
fm10k_int_VFLR = 5,
fm10k_int_MaxHoldTime = 6,
fm10k_int_sources_max_pf
};
/* PCIe bus speeds */
enum fm10k_bus_speed {
fm10k_bus_speed_unknown = 0,
fm10k_bus_speed_2500 = 2500,
fm10k_bus_speed_5000 = 5000,
fm10k_bus_speed_8000 = 8000,
fm10k_bus_speed_reserved
};
/* PCIe bus widths */
enum fm10k_bus_width {
fm10k_bus_width_unknown = 0,
fm10k_bus_width_pcie_x1 = 1,
fm10k_bus_width_pcie_x2 = 2,
fm10k_bus_width_pcie_x4 = 4,
fm10k_bus_width_pcie_x8 = 8,
fm10k_bus_width_reserved
};
/* PCIe payload sizes */
enum fm10k_bus_payload {
fm10k_bus_payload_unknown = 0,
fm10k_bus_payload_128 = 1,
fm10k_bus_payload_256 = 2,
fm10k_bus_payload_512 = 3,
fm10k_bus_payload_reserved
};
/* Bus parameters */
struct fm10k_bus_info {
enum fm10k_bus_speed speed;
enum fm10k_bus_width width;
enum fm10k_bus_payload payload;
};
/* Statistics related declarations */
struct fm10k_hw_stat {
u64 count;
u32 base_l;
u32 base_h;
};
struct fm10k_hw_stats_q {
struct fm10k_hw_stat tx_bytes;
struct fm10k_hw_stat tx_packets;
#define tx_stats_idx tx_packets.base_h
struct fm10k_hw_stat rx_bytes;
struct fm10k_hw_stat rx_packets;
#define rx_stats_idx rx_packets.base_h
struct fm10k_hw_stat rx_drops;
};
struct fm10k_hw_stats {
struct fm10k_hw_stat timeout;
#define stats_idx timeout.base_h
struct fm10k_hw_stat ur;
struct fm10k_hw_stat ca;
struct fm10k_hw_stat um;
struct fm10k_hw_stat xec;
struct fm10k_hw_stat vlan_drop;
struct fm10k_hw_stat loopback_drop;
struct fm10k_hw_stat nodesc_drop;
struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF];
};
/* Establish DGLORT feature priority */
enum fm10k_dglortdec_idx {
fm10k_dglort_default = 0,
fm10k_dglort_vf_rsvd0 = 1,
fm10k_dglort_vf_rss = 2,
fm10k_dglort_pf_rsvd0 = 3,
fm10k_dglort_pf_queue = 4,
fm10k_dglort_pf_vsi = 5,
fm10k_dglort_pf_rsvd1 = 6,
fm10k_dglort_pf_rss = 7
};
struct fm10k_dglort_cfg {
u16 glort; /* GLORT base */
u16 queue_b; /* Base value for queue */
u8 vsi_b; /* Base value for VSI */
u8 idx; /* index of DGLORTDEC entry */
u8 rss_l; /* RSS indices */
u8 pc_l; /* Priority Class indices */
u8 vsi_l; /* Number of bits from GLORT used to determine VSI */
u8 queue_l; /* Number of bits from GLORT used to determine queue */
u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */
u8 inner_rss; /* Boolean value if inner header is used for RSS */
};
enum fm10k_pca_fault {
PCA_NO_FAULT,
PCA_UNMAPPED_ADDR,
PCA_BAD_QACCESS_PF,
PCA_BAD_QACCESS_VF,
PCA_MALICIOUS_REQ,
PCA_POISONED_TLP,
PCA_TLP_ABORT,
__PCA_MAX
};
enum fm10k_thi_fault {
THI_NO_FAULT,
THI_MAL_DIS_Q_FAULT,
__THI_MAX
};
enum fm10k_fum_fault {
FUM_NO_FAULT,
FUM_UNMAPPED_ADDR,
FUM_POISONED_TLP,
FUM_BAD_VF_QACCESS,
FUM_ADD_DECODE_ERR,
FUM_RO_ERROR,
FUM_QPRC_CRC_ERROR,
FUM_CSR_TIMEOUT,
FUM_INVALID_TYPE,
FUM_INVALID_LENGTH,
FUM_INVALID_BE,
FUM_INVALID_ALIGN,
__FUM_MAX
};
struct fm10k_fault {
u64 address; /* Address at the time fault was detected */
u32 specinfo; /* Extra info on this fault (fault dependent) */
u8 type; /* Fault value dependent on subunit */
u8 func; /* Function number of the fault */
};
struct fm10k_mac_ops {
/* basic bring-up and tear-down */
s32 (*reset_hw)(struct fm10k_hw *);
s32 (*init_hw)(struct fm10k_hw *);
s32 (*start_hw)(struct fm10k_hw *);
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
bool (*is_slot_appropriate)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
u16, bool, u8);
s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool);
s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8);
void (*update_int_moderator)(struct fm10k_hw *);
s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool);
void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *);
s32 (*configure_dglort_map)(struct fm10k_hw *,
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
void (*request_lport_map)(struct fm10k_hw *);
s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
u64 (*read_systime)(struct fm10k_hw *);
};
enum fm10k_mac_type {
fm10k_mac_unknown = 0,
fm10k_mac_pf,
fm10k_mac_vf,
fm10k_num_macs
};
struct fm10k_mac_info {
struct fm10k_mac_ops ops;
enum fm10k_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
u16 default_vid;
u16 max_msix_vectors;
u16 max_queues;
bool vlan_override;
bool get_host_state;
bool tx_ready;
u32 dglort_map;
};
struct fm10k_swapi_table_info {
u32 used;
u32 avail;
};
struct fm10k_swapi_info {
u32 status;
struct fm10k_swapi_table_info mac;
struct fm10k_swapi_table_info nexthop;
struct fm10k_swapi_table_info ffu;
};
enum fm10k_xcast_modes {
FM10K_XCAST_MODE_ALLMULTI = 0,
FM10K_XCAST_MODE_MULTI = 1,
FM10K_XCAST_MODE_PROMISC = 2,
FM10K_XCAST_MODE_NONE = 3,
FM10K_XCAST_MODE_DISABLE = 4
};
#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */
#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */
struct fm10k_vf_info {
/* mbx must be first field in struct unless all default IOV message
* handlers are redone as the assumption is that vf_info starts
* at the same offset as the mailbox
*/
struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
int rate; /* Tx BW cap as defined by OS */
u16 glort; /* resource tag for this VF */
u16 sw_vid; /* Switch API assigned VLAN */
u16 pf_vid; /* PF assigned Default VLAN */
u8 mac[ETH_ALEN]; /* PF Default MAC address */
u8 vsi; /* VSI idenfifier */
u8 vf_idx; /* which VF this is */
u8 vf_flags; /* flags indicating what modes
* are supported for the port
*/
};
#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
#define FM10K_VF_FLAG_SET_MODE_NONE \
FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE)
#define FM10K_VF_FLAG_MULTI_ENABLED \
(FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \
FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \
FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC))
struct fm10k_iov_ops {
/* IOV related bring-up and tear-down */
s32 (*assign_resources)(struct fm10k_hw *, u16, u16);
s32 (*configure_tc)(struct fm10k_hw *, u16, int);
s32 (*assign_int_moderator)(struct fm10k_hw *, u16);
s32 (*assign_default_mac_vlan)(struct fm10k_hw *,
struct fm10k_vf_info *);
s32 (*reset_resources)(struct fm10k_hw *,
struct fm10k_vf_info *);
s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
};
struct fm10k_iov_info {
struct fm10k_iov_ops ops;
u16 total_vfs;
u16 num_vfs;
u16 num_pools;
};
enum fm10k_devices {
fm10k_device_pf,
fm10k_device_vf,
};
struct fm10k_info {
enum fm10k_mac_type mac;
s32 (*get_invariants)(struct fm10k_hw *);
struct fm10k_mac_ops *mac_ops;
struct fm10k_iov_ops *iov_ops;
};
struct fm10k_hw {
u32 __iomem *hw_addr;
u32 __iomem *sw_addr;
void *back;
struct fm10k_mac_info mac;
struct fm10k_bus_info bus;
struct fm10k_bus_info bus_caps;
struct fm10k_iov_info iov;
struct fm10k_mbx_info mbx;
struct fm10k_swapi_info swapi;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
};
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Transmit Descriptor */
struct fm10k_tx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 buflen; /* Length of data to be DMAed */
__le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */
__le16 mss; /* MSS for segmentation offload */
u8 hdrlen; /* Header size for segmentation offload */
u8 flags; /* Status and offload request flags */
};
/* Transmit Descriptor Cache Structure */
struct fm10k_tx_desc_cache {
struct fm10k_tx_desc tx_desc[256];
};
#define FM10K_TXD_FLAG_INT 0x01
#define FM10K_TXD_FLAG_TIME 0x02
#define FM10K_TXD_FLAG_CSUM 0x04
#define FM10K_TXD_FLAG_FTAG 0x10
#define FM10K_TXD_FLAG_RS 0x20
#define FM10K_TXD_FLAG_LAST 0x40
#define FM10K_TXD_FLAG_DONE 0x80
/* These macros are meant to enable optimal placement of the RS and INT
* bits. It will point us to the last descriptor in the cache for either the
* start of the packet, or the end of the packet. If the index is actually
* at the start of the FIFO it will point to the offset for the last index
* in the FIFO to prevent an unnecessary write.
*/
#define FM10K_TXD_WB_FIFO_SIZE 4
/* Receive Descriptor - 32B */
union fm10k_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 reserved; /* Empty space, RSS hash */
__le64 timestamp;
} q; /* Read, Writeback, 64b quad-words */
struct {
__le32 data; /* RSS and header data */
__le32 rss; /* RSS Hash */
__le32 staterr;
__le32 vlan_len;
__le32 glort; /* sglort/dglort */
} d; /* Writeback, 32b double-words */
struct {
__le16 pkt_info; /* RSS, Pkt type */
__le16 hdr_info; /* Splithdr, hdrlen, xC */
__le16 rss_lower;
__le16 rss_upper;
__le16 status; /* status/error */
__le16 csum_err; /* checksum or extended error value */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
__le16 dglort;
__le16 sglort;
} w; /* Writeback, 16b words */
};
#define FM10K_RXD_RSSTYPE_MASK 0x000F
enum fm10k_rdesc_rss_type {
FM10K_RSSTYPE_NONE = 0x0,
FM10K_RSSTYPE_IPV4_TCP = 0x1,
FM10K_RSSTYPE_IPV4 = 0x2,
FM10K_RSSTYPE_IPV6_TCP = 0x3,
/* Reserved 0x4 */
FM10K_RSSTYPE_IPV6 = 0x5,
/* Reserved 0x6 */
FM10K_RSSTYPE_IPV4_UDP = 0x7,
FM10K_RSSTYPE_IPV6_UDP = 0x8
/* Reserved 0x9 - 0xF */
};
#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
enum fm10k_rxdesc_xc {
FM10K_XC_UNICAST = 0x0,
FM10K_XC_MULTICAST = 0x4,
FM10K_XC_BROADCAST = 0x6
};
#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */
#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */
#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
struct fm10k_ftag {
__be16 swpri_type_user;
__be16 vlan;
__be16 sglort;
__be16 dglort;
};
#endif /* _FM10K_TYPE_H */

View file

@ -0,0 +1,578 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k_vf.h"
/**
* fm10k_stop_hw_vf - Stop Tx/Rx units
* @hw: pointer to hardware structure
*
**/
static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
{
u8 *perm_addr = hw->mac.perm_addr;
u32 bal = 0, bah = 0;
s32 err;
u16 i;
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
if (err)
return err;
/* If permenant address is set then we need to restore it */
if (is_valid_ether_addr(perm_addr)) {
bal = (((u32)perm_addr[3]) << 24) |
(((u32)perm_addr[4]) << 16) |
(((u32)perm_addr[5]) << 8);
bah = (((u32)0xFF) << 24) |
(((u32)perm_addr[0]) << 16) |
(((u32)perm_addr[1]) << 8) |
((u32)perm_addr[2]);
}
/* The queues have already been disabled so we just need to
* update their base address registers
*/
for (i = 0; i < hw->mac.max_queues; i++) {
fm10k_write_reg(hw, FM10K_TDBAL(i), bal);
fm10k_write_reg(hw, FM10K_TDBAH(i), bah);
fm10k_write_reg(hw, FM10K_RDBAL(i), bal);
fm10k_write_reg(hw, FM10K_RDBAH(i), bah);
}
return 0;
}
/**
* fm10k_reset_hw_vf - VF hardware reset
* @hw: pointer to hardware structure
*
* This function should return the hardare to a state similar to the
* one it is in after just being initialized.
**/
static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
{
s32 err;
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
if (err)
return err;
/* Inititate VF reset */
fm10k_write_reg(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST);
/* Flush write and allow 100us for reset to complete */
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
/* Clear reset bit and verify it was cleared */
fm10k_write_reg(hw, FM10K_VFCTRL, 0);
if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
err = FM10K_ERR_RESET_FAILED;
return err;
}
/**
* fm10k_init_hw_vf - VF hardware initialization
* @hw: pointer to hardware structure
*
**/
static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
{
u32 tqdloc, tqdloc0 = ~fm10k_read_reg(hw, FM10K_TQDLOC(0));
s32 err;
u16 i;
/* assume we always have at least 1 queue */
for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
/* verify the Descriptor cache offsets are increasing */
tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
if (!tqdloc || (tqdloc == tqdloc0))
break;
/* check to verify the PF doesn't own any of our queues */
if (!~fm10k_read_reg(hw, FM10K_TXQCTL(i)) ||
!~fm10k_read_reg(hw, FM10K_RXQCTL(i)))
break;
}
/* shut down queues we own and reset DMA configuration */
err = fm10k_disable_queues_generic(hw, i);
if (err)
return err;
/* record maximum queue count */
hw->mac.max_queues = i;
return 0;
}
/**
* fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
* @hw: pointer to hardware structure
*
* Looks at the PCIe bus info to confirm whether or not this slot can support
* the necessary bandwidth for this device. Since the VF has no control over
* the "slot" it is in, always indicate that the slot is appropriate.
**/
static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
{
return true;
}
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET),
FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC),
FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC),
FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST),
FM10K_TLV_ATTR_LAST
};
/**
* fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table
* @hw: pointer to hardware structure
* @vid: VLAN ID to add to table
* @vsi: Reserved, should always be 0
* @set: Indicates if this is a set or clear operation
*
* This function adds or removes the corresponding VLAN ID from the VLAN
* filter table for this VF.
**/
static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[4];
/* verify the index is not set */
if (vsi)
return FM10K_ERR_PARAM;
/* verify upper 4 bits of vid and length are 0 */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
/* encode set bit into the VLAN ID */
if (!set)
vid |= FM10K_VLAN_CLEAR;
/* generate VLAN request */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
/**
* fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message
* @hw: pointer to the HW structure
* @results: Attributes for message
* @mbx: unused mailbox data
*
* This function should determine the MAC address for the VF
**/
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
u8 perm_addr[ETH_ALEN];
u16 vid;
s32 err;
/* record MAC address requested */
err = fm10k_tlv_attr_get_mac_vlan(
results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC],
perm_addr, &vid);
if (err)
return err;
ether_addr_copy(hw->mac.perm_addr, perm_addr);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
return 0;
}
/**
* fm10k_read_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
*
* This function should determine the MAC address for the VF
**/
static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
{
u8 perm_addr[ETH_ALEN];
u32 base_addr;
base_addr = fm10k_read_reg(hw, FM10K_TDBAL(0));
/* last byte should be 0 */
if (base_addr << 24)
return FM10K_ERR_INVALID_MAC_ADDR;
perm_addr[3] = (u8)(base_addr >> 24);
perm_addr[4] = (u8)(base_addr >> 16);
perm_addr[5] = (u8)(base_addr >> 8);
base_addr = fm10k_read_reg(hw, FM10K_TDBAH(0));
/* first byte should be all 1's */
if ((~base_addr) >> 24)
return FM10K_ERR_INVALID_MAC_ADDR;
perm_addr[0] = (u8)(base_addr >> 16);
perm_addr[1] = (u8)(base_addr >> 8);
perm_addr[2] = (u8)(base_addr);
ether_addr_copy(hw->mac.perm_addr, perm_addr);
ether_addr_copy(hw->mac.addr, perm_addr);
return 0;
}
/**
* fm10k_update_uc_addr_vf - Update device unicast address
* @hw: pointer to the HW structure
* @glort: unused
* @mac: MAC address to add/remove from table
* @vid: VLAN ID to add/remove from table
* @add: Indicates if this is an add or remove operation
* @flags: flags field to indicate add and secure - unused
*
* This function is used to add or remove unicast MAC addresses for
* the VF.
**/
static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
const u8 *mac, u16 vid, bool add, u8 flags)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
/* verify VLAN ID is valid */
if (vid >= FM10K_VLAN_TABLE_VID_MAX)
return FM10K_ERR_PARAM;
/* verify MAC address is valid */
if (!is_valid_ether_addr(mac))
return FM10K_ERR_PARAM;
/* verify we are not locked down on the MAC address */
if (is_valid_ether_addr(hw->mac.perm_addr) &&
memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
return FM10K_ERR_PARAM;
/* add bit to notify us if this is a set of clear operation */
if (!add)
vid |= FM10K_VLAN_CLEAR;
/* generate VLAN request */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
/**
* fm10k_update_mc_addr_vf - Update device multicast address
* @hw: pointer to the HW structure
* @glort: unused
* @mac: MAC address to add/remove from table
* @vid: VLAN ID to add/remove from table
* @add: Indicates if this is an add or remove operation
*
* This function is used to add or remove multicast MAC addresses for
* the VF.
**/
static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
const u8 *mac, u16 vid, bool add)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
/* verify VLAN ID is valid */
if (vid >= FM10K_VLAN_TABLE_VID_MAX)
return FM10K_ERR_PARAM;
/* verify multicast address is valid */
if (!is_multicast_ether_addr(mac))
return FM10K_ERR_PARAM;
/* add bit to notify us if this is a set of clear operation */
if (!add)
vid |= FM10K_VLAN_CLEAR;
/* generate VLAN request */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST,
mac, vid);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
/**
* fm10k_update_int_moderator_vf - Request update of interrupt moderator list
* @hw: pointer to hardware structure
*
* This function will issue a request to the PF to rescan our MSI-X table
* and to update the interrupt moderator linked list.
**/
static void fm10k_update_int_moderator_vf(struct fm10k_hw *hw)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[1];
/* generate MSI-X request */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX);
/* load onto outgoing mailbox */
mbx->ops.enqueue_tx(hw, mbx, msg);
}
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE),
FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE),
FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY),
FM10K_TLV_ATTR_LAST
};
/**
* fm10k_msg_lport_state_vf - Message handler for lport_state message from PF
* @hw: Pointer to hardware structure
* @results: pointer array containing parsed data
* @mbx: Pointer to mailbox information structure
*
* This handler is meant to capture the indication from the PF that we
* are ready to bring up the interface.
**/
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
return 0;
}
/**
* fm10k_update_lport_state_vf - Update device state in lower device
* @hw: pointer to the HW structure
* @glort: unused
* @count: number of logical ports to enable - unused (always 1)
* @enable: boolean value indicating if this is an enable or disable request
*
* Notify the lower device of a state change. If the lower device is
* enabled we can add filters, if it is disabled all filters for this
* logical port are flushed.
**/
static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
u16 count, bool enable)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[2];
/* reset glort mask 0 as we have to wait to be enabled */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
/* generate port state request */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
if (!enable)
fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
/**
* fm10k_update_xcast_mode_vf - Request update of multicast mode
* @hw: pointer to hardware structure
* @glort: unused
* @mode: integer value indicating mode being requested
*
* This function will attempt to request a higher mode for the port
* so that it can enable either multicast, multicast promiscuous, or
* promiscuous mode of operation.
**/
static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[3];
if (mode > FM10K_XCAST_MODE_NONE)
return FM10K_ERR_PARAM;
/* generate message requesting to change xcast mode */
fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
/* load onto outgoing mailbox */
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
FM10K_TLV_ATTR_LAST
};
/* currently there is no shared 1588 timestamp handler */
/**
* fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
*
* This function collects and aggregates per queue hardware statistics.
**/
static void fm10k_update_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
}
/**
* fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF
* @hw: pointer to hardware structure
* @stats: pointer to the stats structure to update
*
* This function resets the base for queue hardware statistics.
**/
static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
/* Unbind Queue Statistics */
fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_vf(hw, stats);
}
/**
* fm10k_configure_dglort_map_vf - Configures GLORT entry and queues
* @hw: pointer to hardware structure
* @dglort: pointer to dglort configuration structure
*
* Reads the configuration structure contained in dglort_cfg and uses
* that information to then populate a DGLORTMAP/DEC entry and the queues
* to which it has been assigned.
**/
static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
struct fm10k_dglort_cfg *dglort)
{
/* verify the dglort pointer */
if (!dglort)
return FM10K_ERR_PARAM;
/* stub for now until we determine correct message for this */
return 0;
}
/**
* fm10k_adjust_systime_vf - Adjust systime frequency
* @hw: pointer to hardware structure
* @ppb: adjustment rate in parts per billion
*
* This function takes an adjustment rate in parts per billion and will
* verify that this value is 0 as the VF cannot support adjusting the
* systime clock.
*
* If the ppb value is non-zero the return is ERR_PARAM else success
**/
static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
{
/* The VF cannot adjust the clock frequency, however it should
* already have a syntonic clock with whichever host interface is
* running as the master for the host interface clock domain so
* there should be not frequency adjustment necessary.
*/
return ppb ? FM10K_ERR_PARAM : 0;
}
/**
* fm10k_read_systime_vf - Reads value of systime registers
* @hw: pointer to the hardware structure
*
* Function reads the content of 2 registers, combined to represent a 64 bit
* value measured in nanosecods. In order to guarantee the value is accurate
* we check the 32 most significant bits both before and after reading the
* 32 least significant bits to verify they didn't change as we were reading
* the registers.
**/
static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
{
u32 systime_l, systime_h, systime_tmp;
systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
do {
systime_tmp = systime_h;
systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
} while (systime_tmp != systime_h);
return ((u64)systime_h << 32) | systime_l;
}
static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
};
static struct fm10k_mac_ops mac_ops_vf = {
.get_bus_info = &fm10k_get_bus_info_generic,
.reset_hw = &fm10k_reset_hw_vf,
.init_hw = &fm10k_init_hw_vf,
.start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_vf,
.is_slot_appropriate = &fm10k_is_slot_appropriate_vf,
.update_vlan = &fm10k_update_vlan_vf,
.read_mac_addr = &fm10k_read_mac_addr_vf,
.update_uc_addr = &fm10k_update_uc_addr_vf,
.update_mc_addr = &fm10k_update_mc_addr_vf,
.update_xcast_mode = &fm10k_update_xcast_mode_vf,
.update_int_moderator = &fm10k_update_int_moderator_vf,
.update_lport_state = &fm10k_update_lport_state_vf,
.update_hw_stats = &fm10k_update_hw_stats_vf,
.rebind_hw_stats = &fm10k_rebind_hw_stats_vf,
.configure_dglort_map = &fm10k_configure_dglort_map_vf,
.get_host_state = &fm10k_get_host_state_generic,
.adjust_systime = &fm10k_adjust_systime_vf,
.read_systime = &fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
{
fm10k_get_invariants_generic(hw);
return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0);
}
struct fm10k_info fm10k_vf_info = {
.mac = fm10k_mac_vf,
.get_invariants = &fm10k_get_invariants_vf,
.mac_ops = &mac_ops_vf,
};

View file

@ -0,0 +1,78 @@
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_VF_H_
#define _FM10K_VF_H_
#include "fm10k_type.h"
#include "fm10k_common.h"
enum fm10k_vf_tlv_msg_id {
FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */
FM10K_VF_MSG_ID_MSIX,
FM10K_VF_MSG_ID_MAC_VLAN,
FM10K_VF_MSG_ID_LPORT_STATE,
FM10K_VF_MSG_ID_1588,
FM10K_VF_MSG_ID_MAX,
};
enum fm10k_tlv_mac_vlan_attr_id {
FM10K_MAC_VLAN_MSG_VLAN,
FM10K_MAC_VLAN_MSG_SET,
FM10K_MAC_VLAN_MSG_MAC,
FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
FM10K_MAC_VLAN_MSG_MULTICAST,
FM10K_MAC_VLAN_MSG_ID_MAX
};
enum fm10k_tlv_lport_state_attr_id {
FM10K_LPORT_STATE_MSG_DISABLE,
FM10K_LPORT_STATE_MSG_XCAST_MODE,
FM10K_LPORT_STATE_MSG_READY,
FM10K_LPORT_STATE_MSG_MAX
};
enum fm10k_tlv_1588_attr_id {
FM10K_1588_MSG_TIMESTAMP,
FM10K_1588_MSG_MAX
};
#define FM10K_VF_MSG_MSIX_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[];
#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \
fm10k_mac_vlan_msg_attr, func)
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
fm10k_lport_state_msg_attr, func)
extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
#define FM10K_VF_MSG_1588_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
extern struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */

View file

@ -0,0 +1,47 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
#
obj-$(CONFIG_I40E) += i40e.o
i40e-objs := i40e_main.o \
i40e_ethtool.o \
i40e_adminq.o \
i40e_common.o \
i40e_hmc.o \
i40e_lan_hmc.o \
i40e_nvm.o \
i40e_debugfs.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o

View file

@ -0,0 +1,726 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_H_
#define _I40E_H_
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_BASE_PF_SEID 16
#define I40E_BASE_VSI_SEID 512
#define I40E_BASE_VEB_SEID 288
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 32
#define I40E_AQ_WORK_LIMIT 16
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
#define I40E_CURRENT_NVM_VERSION_LO 0x40
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \
? (union i40e_32byte_rx_desc *) \
(&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
: (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
#define I40E_TX_FDIRDESC(R, i) \
(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
/* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/* driver state flags */
enum i40e_state_t {
__I40E_TESTING,
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
__I40E_DOWN,
__I40E_NEEDS_RESTART,
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
};
enum i40e_interrupt_policy {
I40E_INTERRUPT_BEST_CASE,
I40E_INTERRUPT_MEDIUM,
I40E_INTERRUPT_LOWEST
};
struct i40e_lump_tracking {
u16 num_entries;
u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
#define I40E_FD_ATR_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
/* TX packet view of src and dst */
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
/* filter control */
u16 q_index;
u8 flex_off;
u8 pctype;
u16 dest_vsi;
u8 dest_ctl;
u8 fd_status;
u16 cnt_index;
u32 fd_id;
};
#define I40E_ETH_P_LLDP 0x88cc
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
#define I40E_MAX_USER_PRIORITY 8
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
u16 qcount; /* Total Queues */
u8 netdev_tc; /* Netdev TC index if netdev associated */
};
/* TC configuration data structure */
struct i40e_tc_configuration {
u8 numtc; /* Total number of enabled TCs */
u8 enabled_tc; /* TC map */
struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
};
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries;
bool fc_autoneg_status;
u16 eeprom_version;
u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
#ifdef I40E_FCOE
u16 num_fcoe_qps; /* num fcoe queues this pf has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
u16 fd_sb_cnt_idx;
u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp;
u32 fd_flush_cnt;
u32 fd_add_err;
u32 fd_atr_cnt;
u32 fd_tcp_rule;
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_vxlan_bitmap;
#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
u16 msg_enable;
char misc_int_name[IFNAMSIZ + 9];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
int service_timer_period;
struct timer_list service_timer;
struct work_struct service_task;
u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
#endif /* I40E_FCOE */
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
#define I40E_FLAG_PTP (u64)(1 << 25)
#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
#endif /* I40E_FCOE */
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
u16 lan_veb; /* initial relay, if exists */
#define I40E_NO_VEB 0xffff
#define I40E_NO_VSI 0xffff
u16 next_vsi; /* Next unallocated VSI - 0-based! */
struct i40e_vsi **vsi;
struct i40e_veb *veb[I40E_MAX_VEB];
struct i40e_lump_tracking *qp_pile;
struct i40e_lump_tracking *irq_pile;
/* switch config info */
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
u16 instance; /* A unique number per i40e_pf instance in the system */
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
* flavor of DCBx protocol (IEEE/CEE) is supported
* by the device. For now we're supporting IEEE
* mode only.
*/
u16 dcbx_cap;
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size;
};
struct i40e_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1
s16 vlan;
u8 counter; /* number of instances of this filter */
bool is_vf; /* filter belongs to a VF */
bool is_netdev; /* filter belongs to a netdev */
bool changed; /* filter needs to be sync'd to the HW */
bool is_laa; /* filter is a Locally Administered Address */
};
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
bool is_abs_credits;
u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
struct kobject *kobj;
bool stat_offsets_loaded;
struct i40e_eth_stats stats;
struct i40e_eth_stats stats_offsets;
};
/* struct that defines a VSI, associated with a dev */
struct i40e_vsi {
struct net_device *netdev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
bool netdev_registered;
bool stat_offsets_loaded;
u32 current_netdev_flags;
unsigned long state;
#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
unsigned long flags;
struct list_head mac_filter_list;
/* VSI stats */
struct rtnl_link_stats64 net_stats;
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
#ifdef I40E_FCOE
struct i40e_fcoe_stats fcoe_stats;
struct i40e_fcoe_stats fcoe_stats_offsets;
bool fcoe_stat_offsets_loaded;
#endif
u32 tx_restart;
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 max_frame;
u16 rx_hdr_len;
u16 rx_buf_len;
u8 dtype;
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
bool irqs_ready;
u16 seid; /* HW index of this VSI (absolute index) */
u16 id; /* VSI number */
u16 uplink_seid;
u16 base_queue; /* vsi's first queue in hw array */
u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
u16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
/* VSI BW limit (absolute across all TCs) */
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
/* Relative TC credits across VSIs */
u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
/* TC BW limit credits within VSI */
u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
/* TC BW limit max quanta within VSI */
u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
struct i40e_pf *back; /* Backreference to associated PF */
u16 idx; /* index in pf->vsi[] */
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
struct i40e_vsi *vsi;
};
/* struct that defines an interrupt vector */
struct i40e_q_vector {
struct i40e_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx; /* register index of the interrupt */
struct napi_struct napi;
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u8 num_ringpairs; /* total number of ring pairs in vector */
cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;
/* lan device */
struct i40e_device {
struct list_head list;
struct i40e_pf *pf;
};
/**
* i40e_fw_version_str - format the FW and NVM version strings
* @hw: ptr to the hardware info
**/
static inline char *i40e_fw_version_str(struct i40e_hw *hw)
{
static char buf[32];
snprintf(buf, sizeof(buf),
"f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return buf;
}
/**
* i40e_netdev_to_pf: Retrieve the PF struct for given netdev
* @netdev: the corresponding netdev
*
* Return the PF struct for the given netdev
**/
static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
return vsi->back;
}
static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
irqreturn_t (*irq_handler)(int, void *))
{
vsi->irq_handler = irq_handler;
}
/**
* i40e_rx_is_programming_status - check for programming status descriptor
* @qw: the first quad word of the program status descriptor
*
* The value of in the descriptor length field indicate if this
* is a programming status descriptor for flow director or FCoE
* by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
* it is a packet descriptor.
**/
static inline bool i40e_rx_is_programming_status(u64 qw)
{
return I40E_RX_PROG_STATUS_DESC_LENGTH ==
(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
}
/**
* i40e_get_fd_cnt_all - get the total FD filter space available
* @pf: pointer to the pf struct
**/
static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
{
return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
}
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
int i40e_get_current_fd_count(struct i40e_pf *pf);
int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
int i40e_get_current_atr_cnt(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
void i40e_pf_reset_stats(struct i40e_pf *pf);
#ifdef CONFIG_DEBUG_FS
void i40e_dbg_pf_init(struct i40e_pf *pf);
void i40e_dbg_pf_exit(struct i40e_pf *pf);
void i40e_dbg_init(void);
void i40e_dbg_exit(void);
#else
static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
#ifdef I40E_FCOE
struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev);
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_setup_tc(struct net_device *netdev, u8 tc);
void i40e_netpoll(struct net_device *netdev);
int i40e_fcoe_enable(struct net_device *netdev);
int i40e_fcoe_disable(struct net_device *netdev);
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
int i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb);
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id);
#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *new_cfg);
void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
void i40e_dcbnl_setup(struct i40e_vsi *vsi);
bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
#endif /* CONFIG_I40E_DCB */
void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
#endif /* _I40E_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,149 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
struct i40e_virt_mem dma_head; /* space for dma structures */
struct i40e_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
};
/* ASQ transaction details */
struct i40e_asq_cmd_details {
void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
};
#define I40E_ADMINQ_DETAILS(R, i) \
(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
u16 msg_size;
u8 *msg_buf;
};
/* Admin Queue information */
struct i40e_adminq_info {
struct i40e_adminq_ring arq; /* receive queue */
struct i40e_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
u16 arq_buf_size; /* receive queue buffer size */
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
};
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
static inline int i40e_aq_rc_to_posix(u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
-EPERM, /* I40E_AQ_RC_EPERM */
-ENOENT, /* I40E_AQ_RC_ENOENT */
-ESRCH, /* I40E_AQ_RC_ESRCH */
-EINTR, /* I40E_AQ_RC_EINTR */
-EIO, /* I40E_AQ_RC_EIO */
-ENXIO, /* I40E_AQ_RC_ENXIO */
-E2BIG, /* I40E_AQ_RC_E2BIG */
-EAGAIN, /* I40E_AQ_RC_EAGAIN */
-ENOMEM, /* I40E_AQ_RC_ENOMEM */
-EACCES, /* I40E_AQ_RC_EACCES */
-EFAULT, /* I40E_AQ_RC_EFAULT */
-EBUSY, /* I40E_AQ_RC_EBUSY */
-EEXIST, /* I40E_AQ_RC_EEXIST */
-EINVAL, /* I40E_AQ_RC_EINVAL */
-ENOTTY, /* I40E_AQ_RC_ENOTTY */
-ENOSPC, /* I40E_AQ_RC_ENOSPC */
-ENOSYS, /* I40E_AQ_RC_ENOSYS */
-ERANGE, /* I40E_AQ_RC_ERANGE */
-EPIPE, /* I40E_AQ_RC_EFLUSHED */
-ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
-EROFS, /* I40E_AQ_RC_EMODE */
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,58 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
struct i40e_hw;
/* Memory allocation types */
enum i40e_memory_type {
i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
i40e_mem_asq_buf = 1,
i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
i40e_mem_pd = 5, /* Page Descriptor */
i40e_mem_bp = 6, /* Backing Page - 4KB */
i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
i40e_mem_reserved
};
/* prototype for functions used for dynamic memory allocation */
i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
enum i40e_memory_type type,
u64 size, u32 alignment);
i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem);
i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size);
i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,472 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_adminq.h"
#include "i40e_prototype.h"
#include "i40e_dcb.h"
/**
* i40e_get_dcbx_status
* @hw: pointer to the hw struct
* @status: Embedded DCBX Engine Status
*
* Get the DCBX status from the Firmware
**/
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
{
u32 reg;
if (!status)
return I40E_ERR_PARAM;
reg = rd32(hw, I40E_PRTDCB_GENS);
*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
return 0;
}
/**
* i40e_parse_ieee_etscfg_tlv
* @tlv: IEEE 802.1Qaz ETS CFG TLV
* @dcbcfg: Local store to update ETS CFG data
*
* Parses IEEE 802.1Qaz ETS CFG TLV
**/
static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
struct i40e_ieee_ets_config *etscfg;
u8 *buf = tlv->tlvinfo;
u16 offset = 0;
u8 priority;
int i;
/* First Octet post subtype
* --------------------------
* |will-|CBS | Re- | Max |
* |ing | |served| TCs |
* --------------------------
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
I40E_IEEE_ETS_WILLING_SHIFT);
etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
I40E_IEEE_ETS_CBS_SHIFT);
etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
I40E_IEEE_ETS_MAXTC_SHIFT);
/* Move offset to Priority Assignment Table */
offset++;
/* Priority Assignment Table (4 octets)
* Octets:| 1 | 2 | 3 | 4 |
* -----------------------------------------
* |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
* -----------------------------------------
* Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
I40E_IEEE_ETS_PRIO_1_SHIFT);
etscfg->prioritytable[i * 2] = priority;
priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
I40E_IEEE_ETS_PRIO_0_SHIFT);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
/* TC Bandwidth Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* ---------------------------------
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
etscfg->tcbwtable[i] = buf[offset++];
/* TSA Assignment Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* ---------------------------------
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
etscfg->tsatable[i] = buf[offset++];
}
/**
* i40e_parse_ieee_etsrec_tlv
* @tlv: IEEE 802.1Qaz ETS REC TLV
* @dcbcfg: Local store to update ETS REC data
*
* Parses IEEE 802.1Qaz ETS REC TLV
**/
static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u8 *buf = tlv->tlvinfo;
u16 offset = 0;
u8 priority;
int i;
/* Move offset to priority table */
offset++;
/* Priority Assignment Table (4 octets)
* Octets:| 1 | 2 | 3 | 4 |
* -----------------------------------------
* |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
* -----------------------------------------
* Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
I40E_IEEE_ETS_PRIO_1_SHIFT);
dcbcfg->etsrec.prioritytable[i*2] = priority;
priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
I40E_IEEE_ETS_PRIO_0_SHIFT);
dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
offset++;
}
/* TC Bandwidth Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* ---------------------------------
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
/* TSA Assignment Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* ---------------------------------
* |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
* ---------------------------------
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
dcbcfg->etsrec.tsatable[i] = buf[offset++];
}
/**
* i40e_parse_ieee_pfccfg_tlv
* @tlv: IEEE 802.1Qaz PFC CFG TLV
* @dcbcfg: Local store to update PFC CFG data
*
* Parses IEEE 802.1Qaz PFC CFG TLV
**/
static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u8 *buf = tlv->tlvinfo;
/* ----------------------------------------
* |will-|MBC | Re- | PFC | PFC Enable |
* |ing | |served| cap | |
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
I40E_IEEE_PFC_WILLING_SHIFT);
dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
I40E_IEEE_PFC_MBC_SHIFT);
dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
I40E_IEEE_PFC_CAP_SHIFT);
dcbcfg->pfc.pfcenable = buf[1];
}
/**
* i40e_parse_ieee_app_tlv
* @tlv: IEEE 802.1Qaz APP TLV
* @dcbcfg: Local store to update APP PRIO data
*
* Parses IEEE 802.1Qaz APP PRIO TLV
**/
static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u16 typelength;
u16 offset = 0;
u16 length;
int i = 0;
u8 *buf;
typelength = ntohs(tlv->typelength);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
buf = tlv->tlvinfo;
/* The App priority table starts 5 octets after TLV header */
length -= (sizeof(tlv->ouisubtype) + 1);
/* Move offset to App Priority Table */
offset++;
/* Application Priority Table (3 octets)
* Octets:| 1 | 2 | 3 |
* -----------------------------------------
* |Priority|Rsrvd| Sel | Protocol ID |
* -----------------------------------------
* Bits:|23 21|20 19|18 16|15 0|
* -----------------------------------------
*/
while (offset < length) {
dcbcfg->app[i].priority = (u8)((buf[offset] &
I40E_IEEE_APP_PRIO_MASK) >>
I40E_IEEE_APP_PRIO_SHIFT);
dcbcfg->app[i].selector = (u8)((buf[offset] &
I40E_IEEE_APP_SEL_MASK) >>
I40E_IEEE_APP_SEL_SHIFT);
dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
offset += 3;
i++;
if (i >= I40E_DCBX_MAX_APPS)
break;
}
dcbcfg->numapps = i;
}
/**
* i40e_parse_ieee_etsrec_tlv
* @tlv: IEEE 802.1Qaz TLV
* @dcbcfg: Local store to update ETS REC data
*
* Get the TLV subtype and send it to parsing function
* based on the subtype value
**/
static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u32 ouisubtype;
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
I40E_LLDP_TLV_SUBTYPE_SHIFT);
switch (subtype) {
case I40E_IEEE_SUBTYPE_ETS_CFG:
i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
break;
case I40E_IEEE_SUBTYPE_ETS_REC:
i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
break;
case I40E_IEEE_SUBTYPE_PFC_CFG:
i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
break;
case I40E_IEEE_SUBTYPE_APP_PRI:
i40e_parse_ieee_app_tlv(tlv, dcbcfg);
break;
default:
break;
}
}
/**
* i40e_parse_org_tlv
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
*
* Currently only IEEE 802.1Qaz TLV is supported, all others
* will be returned
**/
static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u32 ouisubtype;
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
I40E_LLDP_TLV_OUI_SHIFT);
switch (oui) {
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
break;
default:
break;
}
}
/**
* i40e_lldp_to_dcb_config
* @lldpmib: LLDPDU to be parsed
* @dcbcfg: store for LLDPDU data
*
* Parse DCB configuration from the LLDPDU
**/
i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
struct i40e_dcbx_config *dcbcfg)
{
i40e_status ret = 0;
struct i40e_lldp_org_tlv *tlv;
u16 type;
u16 length;
u16 typelength;
u16 offset = 0;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
while (1) {
typelength = ntohs(tlv->typelength);
type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
I40E_LLDP_TLV_TYPE_SHIFT);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
offset += sizeof(typelength) + length;
/* END TLV or beyond LLDPDU size */
if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
break;
switch (type) {
case I40E_TLV_TYPE_ORG:
i40e_parse_org_tlv(tlv, dcbcfg);
break;
default:
break;
}
/* Move to next TLV */
tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
sizeof(tlv->typelength) +
length);
}
return ret;
}
/**
* i40e_aq_get_dcb_config
* @hw: pointer to the hw struct
* @mib_type: mib type for the query
* @bridgetype: bridge type for the query (remote)
* @dcbcfg: store for LLDPDU data
*
* Query DCB configuration from the Firmware
**/
i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg)
{
i40e_status ret = 0;
struct i40e_virt_mem mem;
u8 *lldpmib;
/* Allocate the LLDPDU */
ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
if (ret)
return ret;
lldpmib = (u8 *)mem.va;
ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
(void *)lldpmib, I40E_LLDPDU_SIZE,
NULL, NULL, NULL);
if (ret)
goto free_mem;
/* Parse LLDP MIB to get dcb configuration */
ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
free_mem:
i40e_free_virt_mem(hw, &mem);
return ret;
}
/**
* i40e_get_dcb_config
* @hw: pointer to the hw struct
*
* Get DCB configuration from the Firmware
**/
i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
{
i40e_status ret = 0;
/* Get Local DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
&hw->local_dcbx_config);
if (ret)
goto out;
/* Get Remote DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
out:
return ret;
}
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
*
* Update DCB configuration from the Firmware
**/
i40e_status i40e_init_dcb(struct i40e_hw *hw)
{
i40e_status ret = 0;
if (!hw->func_caps.dcb)
return ret;
/* Get DCBX status */
ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
if (ret)
return ret;
/* Check the DCBX Status */
switch (hw->dcbx_status) {
case I40E_DCBX_STATUS_DONE:
case I40E_DCBX_STATUS_IN_PROGRESS:
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
break;
case I40E_DCBX_STATUS_DISABLED:
return ret;
case I40E_DCBX_STATUS_NOT_STARTED:
case I40E_DCBX_STATUS_MULTIPLE_PEERS:
default:
break;
}
/* Configure the LLDP MIB change event */
ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
if (ret)
return ret;
return ret;
}

View file

@ -0,0 +1,107 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
#include "i40e_type.h"
#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
#define I40E_DCBX_STATUS_DISABLED 7
#define I40E_TLV_TYPE_END 0
#define I40E_TLV_TYPE_ORG 127
#define I40E_IEEE_8021QAZ_OUI 0x0080C2
#define I40E_IEEE_SUBTYPE_ETS_CFG 9
#define I40E_IEEE_SUBTYPE_ETS_REC 10
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
#define I40E_LLDP_TLV_TYPE_SHIFT 9
#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
#define I40E_IEEE_TSA_ETS 2
/* Defines for IEEE PFC TLV */
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
#define I40E_IEEE_APP_PRIO_SHIFT 5
#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
#pragma pack(1)
/* IEEE 802.1AB LLDP Organization specific TLV */
struct i40e_lldp_org_tlv {
__be16 typelength;
__be32 ouisubtype;
u8 tlvinfo[1];
};
#pragma pack()
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
u16 *status);
i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
i40e_status i40e_init_dcb(struct i40e_hw *hw);
#endif /* _I40E_DCB_H_ */

View file

@ -0,0 +1,316 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifdef CONFIG_I40E_DCB
#include "i40e.h"
#include <net/dcbnl.h>
/**
* i40e_get_pfc_delay - retrieve PFC Link Delay
* @hw: pointer to hardware struct
* @delay: holds the PFC Link delay value
*
* Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
**/
static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
{
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
*delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
/**
* i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
* @netdev: the corresponding netdev
* @ets: structure to hold the ETS information
*
* Returns local IEEE ETS configuration
**/
static int i40e_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
dcbxcfg = &hw->local_dcbx_config;
ets->willing = dcbxcfg->etscfg.willing;
ets->ets_cap = dcbxcfg->etscfg.maxtcs;
ets->cbs = dcbxcfg->etscfg.cbs;
memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
sizeof(ets->tc_rx_bw));
memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
sizeof(ets->prio_tc));
memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
sizeof(ets->tc_reco_bw));
memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
sizeof(ets->tc_reco_tsa));
memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
sizeof(ets->reco_prio_tc));
return 0;
}
/**
* i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
* @netdev: the corresponding netdev
* @ets: structure to hold the PFC information
*
* Returns local IEEE PFC configuration
**/
static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
int i;
if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
dcbxcfg = &hw->local_dcbx_config;
pfc->pfc_cap = dcbxcfg->pfc.pfccap;
pfc->pfc_en = dcbxcfg->pfc.pfcenable;
pfc->mbc = dcbxcfg->pfc.mbc;
i40e_get_pfc_delay(hw, &pfc->delay);
/* Get Requests/Indicatiosn */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
pfc->requests[i] = pf->stats.priority_xoff_tx[i];
pfc->indications[i] = pf->stats.priority_xoff_rx[i];
}
return 0;
}
/**
* i40e_dcbnl_getdcbx - retrieve current DCBx capability
* @netdev: the corresponding netdev
*
* Returns DCBx capability features
**/
static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
return pf->dcbx_cap;
}
/**
* i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
* @netdev: the corresponding netdev
*
* Returns the SAN MAC address used for LLDP exchange
**/
static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
u8 *perm_addr)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < dev->addr_len; i++)
perm_addr[i] = pf->hw.mac.perm_addr[i];
for (j = 0; j < dev->addr_len; j++, i++)
perm_addr[i] = pf->hw.mac.san_addr[j];
}
static const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getets = i40e_dcbnl_ieee_getets,
.ieee_getpfc = i40e_dcbnl_ieee_getpfc,
.getdcbx = i40e_dcbnl_getdcbx,
.getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
};
/**
* i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
* @vsi: the corresponding vsi
*
* Set up all the IEEE APPs in the DCBNL App Table and generate event for
* other settings
**/
void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
{
struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
struct dcb_app sapp;
u8 prio, tc_map;
int i;
/* DCB not enabled */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return;
dcbxcfg = &hw->local_dcbx_config;
/* Set up all the App TLVs if DCBx is negotiated */
for (i = 0; i < dcbxcfg->numapps; i++) {
prio = dcbxcfg->app[i].priority;
tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
/* Add APP only if the TC is enabled for this VSI */
if (tc_map & vsi->tc_config.enabled_tc) {
sapp.selector = dcbxcfg->app[i].selector;
sapp.protocol = dcbxcfg->app[i].protocolid;
sapp.priority = prio;
dcb_ieee_setapp(dev, &sapp);
}
}
/* Notify user-space of the changes */
dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
}
/**
* i40e_dcbnl_vsi_del_app - Delete APP for given VSI
* @vsi: the corresponding vsi
* @app: APP to delete
*
* Delete given APP from the DCBNL APP table for given
* VSI
**/
static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
struct i40e_ieee_app_priority_table *app)
{
struct net_device *dev = vsi->netdev;
struct dcb_app sapp;
if (!dev)
return -EINVAL;
sapp.selector = app->selector;
sapp.protocol = app->protocolid;
sapp.priority = app->priority;
return dcb_ieee_delapp(dev, &sapp);
}
/**
* i40e_dcbnl_del_app - Delete APP on all VSIs
* @pf: the corresponding pf
* @app: APP to delete
*
* Delete given APP from all the VSIs for given PF
**/
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_ieee_app_priority_table *app)
{
int v, err;
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
__func__, pf->vsi[v]->seid,
err, app->selector,
app->protocolid, app->priority);
}
}
}
/**
* i40e_dcbnl_find_app - Search APP in given DCB config
* @cfg: DCBX configuration data
* @app: APP to search for
*
* Find given APP in the DCB configuration
**/
static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
struct i40e_ieee_app_priority_table *app)
{
int i;
for (i = 0; i < cfg->numapps; i++) {
if (app->selector == cfg->app[i].selector &&
app->protocolid == cfg->app[i].protocolid &&
app->priority == cfg->app[i].priority)
return true;
}
return false;
}
/**
* i40e_dcbnl_flush_apps - Delete all removed APPs
* @pf: the corresponding pf
* @new_cfg: new DCBX configuration data
*
* Find and delete all APPs that are not present in the passed
* DCB configuration
**/
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *new_cfg)
{
struct i40e_ieee_app_priority_table app;
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
int i;
dcbxcfg = &hw->local_dcbx_config;
for (i = 0; i < dcbxcfg->numapps; i++) {
app = dcbxcfg->app[i];
/* The APP is not available anymore delete it */
if (!i40e_dcbnl_find_app(new_cfg, &app))
i40e_dcbnl_del_app(pf, &app);
}
}
/**
* i40e_dcbnl_setup - DCBNL setup
* @vsi: the corresponding vsi
*
* Set up DCBNL ops and initial APP TLVs
**/
void i40e_dcbnl_setup(struct i40e_vsi *vsi)
{
struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* Not DCB capable */
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
/* Do not setup DCB NL ops for MFP mode */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
dev->dcbnl_ops = &dcbnl_ops;
/* Set initial IEEE DCB settings */
i40e_dcbnl_set_all(vsi);
}
#endif /* CONFIG_I40E_DCB */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,154 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_diag.h"
#include "i40e_prototype.h"
/**
* i40e_diag_reg_pattern_test
* @hw: pointer to the hw struct
* @reg: reg to be tested
* @mask: bits to be touched
**/
static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
u32 reg, u32 mask)
{
const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
u32 pat, val, orig_val;
int i;
orig_val = rd32(hw, reg);
for (i = 0; i < ARRAY_SIZE(patterns); i++) {
pat = patterns[i];
wr32(hw, reg, (pat & mask));
val = rd32(hw, reg);
if ((val & mask) != (pat & mask)) {
i40e_debug(hw, I40E_DEBUG_DIAG,
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
__func__, reg, pat, val);
return I40E_ERR_DIAG_TEST_FAILED;
}
}
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
i40e_debug(hw, I40E_DEBUG_DIAG,
"%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
__func__, reg, orig_val, val);
return I40E_ERR_DIAG_TEST_FAILED;
}
return 0;
}
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3,
I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
{I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
{I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
{I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
{I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
{I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
{I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
{I40E_QINT_TQCTL(0), 0x000000FF, 1,
I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
{I40E_QINT_RQCTL(0), 0x000000FF, 1,
I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
{I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
/**
* i40e_diag_reg_test
* @hw: pointer to the hw struct
*
* Perform registers diagnostic test
**/
i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg, mask;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
i40e_reg_list[i].elements =
hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
}
}
return ret_code;
}
/**
* i40e_diag_eeprom_test
* @hw: pointer to the hw struct
*
* Perform EEPROM diagnostic test
**/
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
{
i40e_status ret_code;
u16 reg_val;
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
} else {
ret_code = I40E_ERR_DIAG_TEST_FAILED;
}
return ret_code;
}

View file

@ -0,0 +1,51 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
#include "i40e_type.h"
enum i40e_lb_mode {
I40E_LB_MODE_NONE = 0x0,
I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
u32 offset; /* the base register */
u32 mask; /* bits that can be tested */
u32 elements; /* number of elements if array */
u32 stride; /* bytes between each element */
};
extern struct i40e_diag_reg_test_info i40e_reg_list[];
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
#endif /* _I40E_DIAG_H_ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,128 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_FCOE_H_
#define _I40E_FCOE_H_
/* FCoE HW context helper macros */
#define I40E_DDP_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i]))
#define I40E_QUEUE_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i]))
#define I40E_FILTER_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
/* receive queue descriptor filter status for FCoE */
#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3
#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */
#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */
#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */
#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */
/* receive queue descriptor error codes for FCoE */
#define I40E_RX_DESC_FCOE_ERROR_MASK \
(I40E_RX_DESC_ERROR_L3L4E_PROT | \
I40E_RX_DESC_ERROR_L3L4E_FC | \
I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \
I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN)
/* receive queue descriptor programming error */
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \
I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT
/* FCoE DDP related definitions */
#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */
#define I40E_FCOE_DDP_PTR_ALIGN 16
#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t))
#define I40E_FCOE_DDP_BUF_MIN 4096
#define I40E_FCOE_DDP_MAX 2048
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
/* supported netdev features for FCoE */
#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_FILTER)
/* DDP context flags */
enum i40e_fcoe_ddp_flags {
__I40E_FCOE_DDP_NONE = 1,
__I40E_FCOE_DDP_TARGET,
__I40E_FCOE_DDP_INITALIZED,
__I40E_FCOE_DDP_PROGRAMMED,
__I40E_FCOE_DDP_DONE,
__I40E_FCOE_DDP_ABORTED,
__I40E_FCOE_DDP_UNMAPPED,
};
/* DDP SW context struct */
struct i40e_fcoe_ddp {
int len;
u16 xid;
u16 firstoff;
u16 lastsize;
u16 list_len;
u8 fcerr;
u8 prerr;
unsigned long flags;
unsigned int sgc;
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
struct dma_pool *pool;
};
struct i40e_fcoe_ddp_pool {
struct dma_pool *pool;
};
struct i40e_fcoe {
unsigned long mode;
atomic_t refcnt;
struct i40e_fcoe_ddp_pool __percpu *ddp_pool;
struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX];
};
#endif /* _I40E_FCOE_H_ */

View file

@ -0,0 +1,360 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_osdep.h"
#include "i40e_register.h"
#include "i40e_status.h"
#include "i40e_alloc.h"
#include "i40e_hmc.h"
#include "i40e_type.h"
/**
* i40e_add_sd_table_entry - Adds a segment descriptor to the table
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: segment descriptor index to manipulate
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
**/
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 sd_index,
enum i40e_sd_entry_type type,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
ret_code = I40E_ERR_BAD_PTR;
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
goto exit;
}
if (sd_index >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_SD_INDEX;
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
goto exit;
}
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
if (I40E_SD_TYPE_PAGED == type) {
mem_type = i40e_mem_pd;
alloc_len = I40E_HMC_PAGED_BP_SIZE;
} else {
mem_type = i40e_mem_bp_jumbo;
alloc_len = direct_mode_sz;
}
/* allocate a 4K pd page or 2M backing page */
ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
I40E_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
goto exit;
dma_mem_alloc_done = true;
if (I40E_SD_TYPE_PAGED == type) {
ret_code = i40e_allocate_virt_mem(hw,
&sd_entry->u.pd_table.pd_entry_virt_mem,
sizeof(struct i40e_hmc_pd_entry) * 512);
if (ret_code)
goto exit;
sd_entry->u.pd_table.pd_entry =
(struct i40e_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
sd_entry->u.pd_table.pd_page_addr = mem;
} else {
sd_entry->u.bp.addr = mem;
sd_entry->u.bp.sd_pd_index = sd_index;
}
/* initialize the sd entry */
hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
/* increment the ref count */
I40E_INC_SD_REFCNT(&hmc_info->sd_table);
}
/* Increment backing page reference count */
if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
I40E_INC_BP_REFCNT(&sd_entry->u.bp);
exit:
if (ret_code)
if (dma_mem_alloc_done)
i40e_free_dma_mem(hw, &mem);
return ret_code;
}
/**
* i40e_add_pd_table_entry - Adds page descriptor to the specified table
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
*
* This function:
* 1. Initializes the pd entry
* 2. Adds pd_entry in the pd_table
* 3. Mark the entry valid in i40e_hmc_pd_entry structure
* 4. Initializes the pd_entry's ref count to 1
* assumptions:
* 1. The memory for pd should be pinned down, physically contiguous and
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
**/
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 pd_index)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
goto exit;
}
/* find corresponding sd */
sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
if (I40E_SD_TYPE_PAGED !=
hmc_info->sd_table.sd_entry[sd_idx].entry_type)
goto exit;
rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
/* allocate a 4K backing page */
ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
I40E_HMC_PAGED_BP_SIZE,
I40E_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
goto exit;
pd_entry->bp.addr = mem;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
page_desc = mem.pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
/* Add the backing page physical address in the pd entry */
memcpy(pd_addr, &page_desc, sizeof(u64));
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
I40E_INC_PD_REFCNT(pd_table);
}
I40E_INC_BP_REFCNT(&pd_entry->bp);
exit:
return ret_code;
}
/**
* i40e_remove_pd_bp - remove a backing page from a page descriptor
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
* @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
* (for direct address mode) invalid.
* 2. Write to register PMPDINV to invalidate the backing page in FV cache
* 3. Decrement the ref count for the pd _entry
* assumptions:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
**/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
/* calculate index */
sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
goto exit;
}
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
ret_code = I40E_ERR_INVALID_SD_TYPE;
hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
goto exit;
}
/* get the entry and decrease its ref counter */
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
I40E_DEC_BP_REFCNT(&pd_entry->bp);
if (pd_entry->bp.ref_cnt)
goto exit;
/* mark the entry invalid */
pd_entry->valid = false;
I40E_DEC_PD_REFCNT(pd_table);
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
if (ret_code)
goto exit;
if (!pd_table->ref_cnt)
i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
exit:
return ret_code;
}
/**
* i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
**/
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
if (sd_entry->u.bp.ref_cnt) {
ret_code = I40E_ERR_NOT_READY;
goto exit;
}
I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
/* mark the entry invalid */
sd_entry->valid = false;
exit:
return ret_code;
}
/**
* i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
* @is_pf: used to distinguish between VF and PF
**/
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
i40e_status ret_code = 0;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (is_pf) {
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
} else {
ret_code = I40E_NOT_SUPPORTED;
goto exit;
}
ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
if (ret_code)
goto exit;
exit:
return ret_code;
}
/**
* i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
**/
i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.ref_cnt) {
ret_code = I40E_ERR_NOT_READY;
goto exit;
}
/* mark the entry invalid */
sd_entry->valid = false;
I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
exit:
return ret_code;
}
/**
* i40e_remove_pd_page_new - Removes a PD page from sd entry.
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
* @is_pf: used to distinguish between VF and PF
**/
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (is_pf) {
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
} else {
ret_code = I40E_NOT_SUPPORTED;
goto exit;
}
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
if (ret_code)
goto exit;
exit:
return ret_code;
}

View file

@ -0,0 +1,236 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
#define I40E_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
#define I40E_HMC_PD_CNT_IN_SD 512
#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define I40E_HMC_PAGED_BP_SIZE 4096
#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40E_FIRST_VF_FPM_ID 16
struct i40e_hmc_obj_info {
u64 base; /* base addr in FPM */
u32 max_cnt; /* max count available for this hmc func */
u32 cnt; /* count of objects driver actually wants to create */
u64 size; /* size in bytes of one object */
};
enum i40e_sd_entry_type {
I40E_SD_TYPE_INVALID = 0,
I40E_SD_TYPE_PAGED = 1,
I40E_SD_TYPE_DIRECT = 2
};
struct i40e_hmc_bp {
enum i40e_sd_entry_type entry_type;
struct i40e_dma_mem addr; /* populate to be used by hw */
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
bool valid;
};
struct i40e_hmc_pd_table {
struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
u32 ref_cnt;
u32 sd_index;
};
struct i40e_hmc_sd_entry {
enum i40e_sd_entry_type entry_type;
bool valid;
union {
struct i40e_hmc_pd_table pd_table;
struct i40e_hmc_bp bp;
} u;
};
struct i40e_hmc_sd_table {
struct i40e_virt_mem addr; /* used to track sd_entry allocations */
u32 sd_cnt;
u32 ref_cnt;
struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
};
struct i40e_hmc_info {
u32 signature;
/* equals to pci func num for PF and dynamically allocated for VFs */
u8 hmc_fn_id;
u16 first_sd_index; /* index of the first available SD */
/* hmc objects */
struct i40e_hmc_obj_info *hmc_obj;
struct i40e_virt_mem hmc_obj_virt_mem;
struct i40e_hmc_sd_table sd_table;
};
#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
{ \
u32 val1, val2, val3; \
val1 = (u32)(upper_32_bits(pa)); \
val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
{ \
u32 val2, val3; \
val2 = (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @index: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
{ \
u64 fpm_addr, fpm_limit; \
fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (index); \
fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(sd_limit) += 1; \
}
/**
* I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_index: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
{ \
u64 fpm_adr, fpm_limit; \
fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (idx); \
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 sd_index,
enum i40e_sd_entry_type type,
u64 direct_mode_sz);
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,181 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
/* HMC element context information */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_rxq {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u16 dbuff; /* bigger than needed, see above for reason */
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 fc_ena;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
u64 base;
u8 fc_ena;
u8 timesync_ena;
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
u8 tphrpacket_ena;
u8 tphwdesc_ena;
u64 head_wb_addr;
u32 crc;
u16 rdylist;
u8 rdylist_act;
};
/* for hsplit_0 field of Rx HMC context */
enum i40e_hmc_obj_rx_hsplit_0 {
I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* fcoe_cntx and fcoe_filt are for debugging purpose only */
struct i40e_hmc_obj_fcoe_cntx {
u32 rsv[32];
};
struct i40e_hmc_obj_fcoe_filt {
u32 rsv[8];
};
/* Context sizes for LAN objects */
enum i40e_hmc_lan_object_size {
I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
};
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
I40E_HMC_LAN_TX = 1,
I40E_HMC_LAN_RX = 2,
I40E_HMC_FCOE_CTX = 3,
I40E_HMC_FCOE_FILT = 4,
I40E_HMC_LAN_MAX = 5
};
enum i40e_hmc_model {
I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
I40E_HMC_MODEL_DIRECT_ONLY = 1,
I40E_HMC_MODEL_PAGED_ONLY = 2,
I40E_HMC_MODEL_UNKNOWN,
};
struct i40e_hmc_lan_create_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
enum i40e_sd_entry_type entry_type;
u64 direct_mode_sz;
};
struct i40e_hmc_lan_delete_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
};
i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
u32 rxq_num, u32 fcoe_cntx_num,
u32 fcoe_filt_num);
i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
enum i40e_hmc_model model);
i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_txq *s);
i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,859 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_prototype.h"
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
*
* Setup the function pointers and the NVM info structure. Should be called
* once per NVM initialization, e.g. inside the i40e_init_shared_code().
* Please notice that the NVM term is used here (& in all methods covered
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
i40e_status i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
i40e_status ret_code = 0;
u32 fla, gens;
u8 sr_size;
/* The SR size is stored regardless of the nvm programming mode
* as the blank mode may be used in the factory line.
*/
gens = rd32(hw, I40E_GLNVM_GENS);
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
/* Max NVM timeout */
nvm->timeout = I40E_MAX_NVM_TIMEOUT;
nvm->blank_nvm_mode = false;
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
}
/**
* i40e_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
*
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access)
{
i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
if (ret_code) {
/* Set the polling timeout */
if (time > I40E_MAX_NVM_TIMEOUT)
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ gtime;
else
timeout = hw->nvm.hw_semaphore_timeout;
/* Poll until the current NVM owner timeouts */
while (gtime < timeout) {
usleep_range(10000, 20000);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
access, 0, &time,
NULL);
if (!ret_code) {
hw->nvm.hw_semaphore_timeout =
I40E_MS_TO_GTIME(time) + gtime;
break;
}
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
}
if (ret_code) {
hw->nvm.hw_semaphore_timeout = 0;
hw->nvm.hw_semaphore_wait =
I40E_MS_TO_GTIME(time) + gtime;
hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
time);
}
}
i40e_i40e_acquire_nvm_exit:
return ret_code;
}
/**
* i40e_release_nvm - Generic request for releasing the NVM ownership
* @hw: pointer to the HW structure
*
* This function will release NVM resource via the proper Admin Command.
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
if (!hw->nvm.blank_nvm_mode)
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
}
/**
* i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
* @hw: pointer to the HW structure
*
* Polls the SRCTL Shadow RAM register done bit.
**/
static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
srctl = rd32(hw, I40E_GLNVM_SRCTL);
if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
ret_code = 0;
break;
}
udelay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
return ret_code;
}
/**
* i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
/* Poll the done bit first */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
/* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
*data = (u16)((sr_reg &
I40E_GLNVM_SRDATA_RDDATA_MASK)
>> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
}
}
if (ret_code)
hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
offset);
read_nvm_exit:
return ret_code;
}
/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code = 0;
u16 index, word;
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
ret_code = i40e_read_nvm_word(hw, index, &data[word]);
if (ret_code)
break;
}
/* Update the number of words read from the Shadow RAM */
*words = word;
return ret_code;
}
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
else
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, NULL);
return ret_code;
}
/**
* i40e_calc_nvm_checksum - Calculates and returns the checksum
* @hw: pointer to hardware structure
* @checksum: pointer to the checksum
*
* This function calculates SW Checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
u16 word = 0;
u32 i = 0;
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* Calculate SW checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
i++;
/* Skip VPD module (convert byte size to word count) */
if (i == (u32)vpd_module) {
i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
if (i >= hw->nvm.sr_size)
break;
}
/* Skip PCIe ALT module (convert byte size to word count) */
if (i == (u32)pcie_alt_module) {
i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
if (i >= hw->nvm.sr_size)
break;
}
ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
checksum_local += word;
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
return ret_code;
}
/**
* i40e_update_nvm_checksum - Updates the NVM checksum
* @hw: pointer to hardware structure
*
* NVM ownership must be acquired before calling this function and released
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u16 checksum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &checksum, true);
return ret_code;
}
/**
* i40e_validate_nvm_checksum - Validate EEPROM checksum
* @hw: pointer to hardware structure
* @checksum: calculated checksum
*
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
u16 checksum_local = 0;
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
if (ret_code)
goto i40e_validate_nvm_checksum_exit;
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (checksum_local != checksum_sr)
ret_code = I40E_ERR_NVM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum)
*checksum = checksum_local;
i40e_validate_nvm_checksum_exit:
return ret_code;
}
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno);
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno);
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
static inline u8 i40e_nvmupd_get_transaction(u32 val)
{
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
/**
* i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* Dispatches command depending on what update state is current
**/
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
/* assume success */
*errno = 0;
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_STATE_READING:
status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_STATE_WRITING:
status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
break;
default:
/* invalid state, should never happen */
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
}
break;
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
}
break;
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
if (status)
i40e_release_nvm(hw);
else
hw->aq.nvm_release_on_done = true;
}
break;
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (status)
i40e_release_nvm(hw);
else
hw->aq.nvm_release_on_done = true;
}
break;
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = true;
}
}
break;
default:
status = I40E_ERR_NVM;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_reading - Handle NVM update state Reading
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
case I40E_NVMUPD_READ_CON:
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_READ_LCB:
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_state_writing - Handle NVM update state Writing
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
break;
case I40E_NVMUPD_WRITE_LCB:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (!status) {
hw->aq.nvm_release_on_done = true;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
if (status)
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
-EIO;
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
-EIO;
} else {
hw->aq.nvm_release_on_done = true;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
break;
default:
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
}
return status;
}
/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @errno: pointer to return error code
*
* Return one of the valid command types or I40E_NVMUPD_INVALID
**/
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno)
{
enum i40e_nvmupd_cmd upd_cmd;
u8 transaction, module;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
(cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
cmd->data_size);
*errno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
switch (cmd->command) {
case I40E_NVM_READ:
switch (transaction) {
case I40E_NVM_CON:
upd_cmd = I40E_NVMUPD_READ_CON;
break;
case I40E_NVM_SNT:
upd_cmd = I40E_NVMUPD_READ_SNT;
break;
case I40E_NVM_LCB:
upd_cmd = I40E_NVMUPD_READ_LCB;
break;
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_READ_SA;
break;
}
break;
case I40E_NVM_WRITE:
switch (transaction) {
case I40E_NVM_CON:
upd_cmd = I40E_NVMUPD_WRITE_CON;
break;
case I40E_NVM_SNT:
upd_cmd = I40E_NVMUPD_WRITE_SNT;
break;
case I40E_NVM_LCB:
upd_cmd = I40E_NVMUPD_WRITE_LCB;
break;
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_WRITE_SA;
break;
case I40E_NVM_ERA:
upd_cmd = I40E_NVMUPD_WRITE_ERA;
break;
case I40E_NVM_CSUM:
upd_cmd = I40E_NVMUPD_CSUM_CON;
break;
case (I40E_NVM_CSUM|I40E_NVM_SA):
upd_cmd = I40E_NVMUPD_CSUM_SA;
break;
case (I40E_NVM_CSUM|I40E_NVM_LCB):
upd_cmd = I40E_NVMUPD_CSUM_LCB;
break;
}
break;
}
if (upd_cmd == I40E_NVMUPD_INVALID) {
*errno = -EFAULT;
hw_dbg(hw,
"i40e_nvmupd_validate_command returns %d errno: %d\n",
upd_cmd, *errno);
}
return upd_cmd;
}
/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
bytes, last, NULL);
hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
if (status)
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
return status;
}
/**
* i40e_nvmupd_nvm_erase - Erase an NVM module
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @errno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *errno)
{
i40e_status status = 0;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
last, NULL);
hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
if (status)
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
return status;
}
/**
* i40e_nvmupd_nvm_write - Write NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @errno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno)
{
i40e_status status = 0;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
module, cmd->offset, cmd->data_size);
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last, NULL);
hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
if (status)
*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
return status;
}

View file

@ -0,0 +1,84 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/tcp.h>
#include <linux/pci.h>
#include <linux/highuid.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <asm-generic/io-64-nonatomic-lo-hi.h>
/* File to be the magic between shared code and
* actual OS primitives
*/
#define hw_dbg(hw, S, A...) do {} while (0)
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
/* memory allocation tracking */
struct i40e_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
} __packed;
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40e_allocate_dma_mem_d(h, m, s, a)
#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
struct i40e_virt_mem {
void *va;
u32 size;
} __packed;
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
#define i40e_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
pr_info("i40e %02x.%x " s, \
(h)->bus.device, (h)->bus.func, \
##__VA_ARGS__); \
} while (0)
typedef enum i40e_status_code i40e_status;
#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
#endif
#endif /* _I40E_OSDEP_H_ */

View file

@ -0,0 +1,288 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include "i40e_virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
i40e_status i40e_init_adminq(struct i40e_hw *hw);
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40e_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
bool atomic_reset);
i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
bool default_port, bool enable_l2_filtering,
u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
u16 *statistic_index, u16 *vebs_used,
u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
void *buff, u16 buff_size, u16 *data_size,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_port_ets_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
#endif
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
u16 *data);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40e_ptype_lookup[ptype];
}
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */

View file

@ -0,0 +1,694 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e.h"
#include <linux/ptp_classify.h>
/* The XL710 timesync is very much like Intel's 82599 design when it comes to
* the fundamental clock design. However, the clock operations are much simpler
* in the XL710 because the device supports a full 64 bits of nanoseconds.
* Because the field is so wide, we can forgo the cycle counter and just
* operate with the nanosecond field directly without fear of overflow.
*
* Much like the 82599, the update period is dependent upon the link speed:
* At 40Gb link or no link, the period is 1.6ns.
* At 10Gb link, the period is multiplied by 2. (3.2ns)
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
/**
* i40e_ptp_read - Read the PHC time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
*
* This function reads the PRTTSYN_TIME registers and stores them in a
* timespec. However, since the registers are 64 bits of nanoseconds, we must
* convert the result to a timespec before we can return.
**/
static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
{
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
/* The timer latches on the lowest register read. */
lo = rd32(hw, I40E_PRTTSYN_TIME_L);
hi = rd32(hw, I40E_PRTTSYN_TIME_H);
ns = (((u64)hi) << 32) | lo;
*ts = ns_to_timespec(ns);
}
/**
* i40e_ptp_write - Write the PHC time to the device
* @pf: Board private structure
* @ts: timespec structure that holds the new time value
*
* This function writes the PRTTSYN_TIME registers with the user value. Since
* we receive a timespec from the stack, we must convert that timespec into
* nanoseconds before programming the registers.
**/
static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
{
struct i40e_hw *hw = &pf->hw;
u64 ns = timespec_to_ns(ts);
/* The timer will not update until the high register is written, so
* write the low register first.
*/
wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
}
/**
* i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
* @hwtstamps: Timestamp structure to update
* @timestamp: Timestamp from the hardware
*
* We need to convert the NIC clock value into a hwtstamp which can be used by
* the upper level timestamping functions. Since the timestamp is simply a 64-
* bit nanosecond value, we can call ns_to_ktime directly to handle this.
**/
static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
u64 timestamp)
{
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(timestamp);
}
/**
* i40e_ptp_adjfreq - Adjust the PHC frequency
* @ptp: The PTP clock structure
* @ppb: Parts per billion adjustment from the base
*
* Adjust the frequency of the PHC by the indicated parts per billion from the
* base frequency.
**/
static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
u64 adj, freq, diff;
int neg_adj = 0;
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
smp_mb(); /* Force any pending update before accessing. */
adj = ACCESS_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
diff = div_u64(freq, 1000000000ULL);
if (neg_adj)
adj -= diff;
else
adj += diff;
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
return 0;
}
/**
* i40e_ptp_adjtime - Adjust the PHC time
* @ptp: The PTP clock structure
* @delta: Offset in nanoseconds to adjust the PHC time by
*
* Adjust the frequency of the PHC by the indicated parts per billion from the
* base frequency.
**/
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec now, then = ns_to_timespec(delta);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
now = timespec_add(now, then);
i40e_ptp_write(pf, (const struct timespec *)&now);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
return 0;
}
/**
* i40e_ptp_gettime - Get the time of the PHC
* @ptp: The PTP clock structure
* @ts: timespec structure to hold the current time value
*
* Read the device clock and return the correct value on ns, after converting it
* into a timespec struct.
**/
static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
return 0;
}
/**
* i40e_ptp_settime - Set the time of the PHC
* @ptp: The PTP clock structure
* @ts: timespec structure that holds the new time value
*
* Set the device clock to the user input value. The conversion from timespec
* to ns happens in the write function.
**/
static int i40e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_write(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
return 0;
}
/**
* i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
* @on: Enable/disable flag
*
* The XL710 does not support any of the ancillary features of the PHC
* subsystem, so this function may just return.
**/
static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
**/
void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_ring *rx_ring;
unsigned long rx_event;
u32 prttsyn_stat;
int n;
if (!(pf->flags & I40E_FLAG_PTP))
return;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
/* Unless all four receive timestamp registers are latched, we are not
* concerned about a possible PTP Rx hang, so just update the timeout
* counter and exit.
*/
if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT1_MASK <<
I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT2_MASK <<
I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT3_MASK <<
I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
pf->last_rx_ptp_check = jiffies;
return;
}
/* Determine the most recent watchdog or rx_timestamp event. */
rx_event = pf->last_rx_ptp_check;
for (n = 0; n < vsi->num_queue_pairs; n++) {
rx_ring = vsi->rx_rings[n];
if (time_after(rx_ring->last_rx_timestamp, rx_event))
rx_event = rx_ring->last_rx_timestamp;
}
/* Only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
dev_warn(&vsi->back->pdev->dev,
"%s: clearing Rx timestamp hang\n",
__func__);
}
}
/**
* i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
* @pf: Board private structure
*
* Read the value of the Tx timestamp from the registers, convert it into a
* value consumable by the stack, and store that result into the shhwtstamps
* struct before returning it up the stack.
**/
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
{
struct skb_shared_hwtstamps shhwtstamps;
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
}
/**
* i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
* @pf: Board private structure
* @skb: Particular skb to send timestamp with
* @index: Index into the receive timestamp registers for the timestamp
*
* The XL710 receives a notification in the receive descriptor with an offset
* into the set of RXTIME registers where the timestamp is for that skb. This
* function goes and fetches the receive timestamp from that offset, if a valid
* one exists. The RXTIME registers are in ns, so we must convert the result
* first.
**/
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
{
u32 prttsyn_stat, hi, lo;
struct i40e_hw *hw;
u64 ns;
/* Since we cannot turn off the Rx timestamp logic if the device is
* doing Tx timestamping, check if Rx timestamping is configured.
*/
if (!pf->ptp_rx)
return;
hw = &pf->hw;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
if (!(prttsyn_stat & (1 << index)))
return;
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
}
/**
* i40e_ptp_set_increment - Utility function to update clock increment rate
* @pf: Board private structure
*
* During a link change, the DMA frequency that drives the 1588 logic will
* change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
* we must update the increment value per clock tick.
**/
void i40e_ptp_set_increment(struct i40e_pf *pf)
{
struct i40e_link_status *hw_link_info;
struct i40e_hw *hw = &pf->hw;
u64 incval;
hw_link_info = &hw->phy.link_info;
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
switch (hw_link_info->link_speed) {
case I40E_LINK_SPEED_10GB:
incval = I40E_PTP_10GB_INCVAL;
break;
case I40E_LINK_SPEED_1GB:
incval = I40E_PTP_1GB_INCVAL;
break;
case I40E_LINK_SPEED_100MB:
dev_warn(&pf->pdev->dev,
"%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
__func__);
incval = 0;
break;
case I40E_LINK_SPEED_40GB:
default:
incval = I40E_PTP_40GB_INCVAL;
break;
}
/* Write the new increment value into the increment register. The
* hardware will not update the clock until both registers have been
* written.
*/
wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
ACCESS_ONCE(pf->ptp_base_adj) = incval;
smp_mb(); /* Force the above update. */
}
/**
* i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
* @pf: Board private structure
* @ifreq: ioctl data
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
* deconstruct it from the registers.
**/
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
/**
* i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
*
* Control hardware registers to enter the specific mode requested by the
* user. Also used during reset path to ensure that timestamp settings are
* maintained.
*
* Note: modifies config in place, and may update the requested mode to be
* more broad if the specific filter is not directly supported.
**/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
struct hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
u32 pf_id, tsyntype, regval;
/* Reserved for future extensions. */
if (config->flags)
return -EINVAL;
/* Confirm that 1588 is supported on this PF. */
pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
if (hw->pf_id != pf_id) {
dev_err(&pf->pdev->dev,
"PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
hw->pf_id, hw->port, pf_id);
return -EPERM;
}
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
break;
case HWTSTAMP_TX_ON:
pf->ptp_tx = true;
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
pf->ptp_rx = false;
tsyntype = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_ALL:
default:
return -ERANGE;
}
/* Clear out all 1588-related registers to clear and unlatch them. */
rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
/* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
if (pf->ptp_tx)
regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
else
regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
wr32(hw, I40E_PRTTSYN_CTL0, regval);
regval = rd32(hw, I40E_PFINT_ICR0_ENA);
if (pf->ptp_tx)
regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
else
regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, regval);
/* There is no simple on/off switch for Rx. To "disable" Rx support,
* ignore any received timestamps, rather than turn off the clock.
*/
if (pf->ptp_rx) {
regval = rd32(hw, I40E_PRTTSYN_CTL1);
/* clear everything but the enable bit */
regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
/* now enable bits for desired Rx timestamps */
regval |= tsyntype;
wr32(hw, I40E_PRTTSYN_CTL1, regval);
}
return 0;
}
/**
* i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
* @pf: Board private structure
* @ifreq: ioctl data
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
* logic, so keep track in software of whether to indicate these timestamps
* or not.
*
* It is permissible to "upgrade" the user request to a broader filter, as long
* as the user receives the timestamps they care about and the user is notified
* the filter has been broadened.
**/
int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config config;
int err;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = i40e_ptp_set_timestamp_mode(pf, &config);
if (err)
return err;
/* save these settings for future reference */
pf->tstamp_config = config;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/**
* i40e_ptp_create_clock - Create PTP clock device for userspace
* @pf: Board private structure
*
* This function creates a new PTP clock device. It only creates one if we
* don't already have one, so it is safe to call. Will return error if it
* can't create one, but success if we already have a device. Should be used
* by i40e_ptp_init to create clock initially, and prevent global resets from
* creating new clock devices.
**/
static long i40e_ptp_create_clock(struct i40e_pf *pf)
{
/* no need to create a clock device if we already have one */
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
pf->ptp_caps.n_ext_ts = 0;
pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettime = i40e_ptp_gettime;
pf->ptp_caps.settime = i40e_ptp_settime;
pf->ptp_caps.enable = i40e_ptp_feature_enable;
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
if (IS_ERR(pf->ptp_clock)) {
return PTR_ERR(pf->ptp_clock);
}
/* clear the hwtstamp settings here during clock create, instead of
* during regular init, so that we can maintain settings across a
* reset or suspend.
*/
pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
return 0;
}
/**
* i40e_ptp_init - Initialize the 1588 support after device probe or reset
* @pf: Board private structure
*
* This function sets device up for 1588 support. The first time it is run, it
* will create a PHC clock device. It does not create a clock device if one
* already exists. It also reconfigures the device after a reset.
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
struct i40e_hw *hw = &pf->hw;
long err;
/* we have to initialize the lock first, since we can't control
* when the user will enter the PHC device entry points
*/
spin_lock_init(&pf->tmreg_lock);
/* ensure we have a clock device */
err = i40e_ptp_create_clock(pf);
if (err) {
pf->ptp_clock = NULL;
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else {
struct timespec ts;
u32 regval;
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
pf->flags |= I40E_FLAG_PTP;
/* Ensure the clocks are running. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
wr32(hw, I40E_PRTTSYN_CTL0, regval);
regval = rd32(hw, I40E_PRTTSYN_CTL1);
regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
wr32(hw, I40E_PRTTSYN_CTL1, regval);
/* Set the increment value per clock tick. */
i40e_ptp_set_increment(pf);
/* reset timestamping mode */
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
/* Set the clock value. */
ts = ktime_to_timespec(ktime_get_real());
i40e_ptp_settime(&pf->ptp_caps, &ts);
}
}
/**
* i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
* @pf: Board private structure
*
* This function handles the cleanup work required from the initialization by
* clearing out the important information and unregistering the PHC.
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
pf->flags &= ~I40E_FLAG_PTP;
pf->ptp_tx = false;
pf->ptp_rx = false;
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
}
if (pf->ptp_clock) {
ptp_clock_unregister(pf->ptp_clock);
pf->ptp_clock = NULL;
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
pf->vsi[pf->lan_vsi]->netdev->name);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,100 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
/* Error Codes */
enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
I40E_ERR_LINK_SETUP = -8,
I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
I40E_ERR_MASTER_REQUESTS_PENDING = -12,
I40E_ERR_INVALID_LINK_SETTINGS = -13,
I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
I40E_ERR_RING_FULL = -20,
I40E_ERR_INVALID_PD_ID = -21,
I40E_ERR_INVALID_QP_ID = -22,
I40E_ERR_INVALID_CQ_ID = -23,
I40E_ERR_INVALID_CEQ_ID = -24,
I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
I40E_ERR_INVALID_ARP_INDEX = -27,
I40E_ERR_INVALID_FPM_FUNC_ID = -28,
I40E_ERR_QP_INVALID_MSG_SIZE = -29,
I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
I40E_ERR_INVALID_ALIGNMENT = -33,
I40E_ERR_FLUSHED_QUEUE = -34,
I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
I40E_ERR_OPCODE_MISMATCH = -38,
I40E_ERR_CQP_COMPL_ERROR = -39,
I40E_ERR_INVALID_VF_ID = -40,
I40E_ERR_INVALID_HMCFN_ID = -41,
I40E_ERR_BACKING_PAGE_ERROR = -42,
I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
};
#endif /* _I40E_STATUS_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,303 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
#define I40E_MAX_IRATE 0x03F
#define I40E_MIN_IRATE 0x001
#define I40E_IRATE_USEC_RESOLUTION 4
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_ITR_RX_DEF I40E_ITR_8K
#define I40E_ITR_TX_DEF I40E_ITR_4K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
#define I40E_DEFAULT_IRQ_WORK 256
#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
#define I40E_QUEUE_END_OF_LIST 0x7FF
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
enum i40e_dyn_idx_t {
I40E_IDX_ITR0 = 0,
I40E_IDX_ITR1 = 1,
I40E_IDX_ITR2 = 2,
I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
#define I40E_PE_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
#define I40E_RXBUFFER_8192 8192
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
* i.e. RXBUFFER_512 --> size-1024 slab
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define I40E_RX_NEXT_DESC(r, i, n) \
do { \
(i)++; \
if ((i) == (r)->count) \
i = 0; \
(n) = I40E_RX_DESC((r), (i)); \
} while (0)
#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
do { \
I40E_RX_NEXT_DESC((r), (i), (n)); \
prefetch((n)); \
} while (0)
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
};
unsigned int bytecount;
unsigned short gso_segs;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
struct i40e_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
unsigned int page_offset;
};
struct i40e_queue_stats {
u64 packets;
u64 bytes;
};
struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
};
struct i40e_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
#define ring_is_ps_enabled(ring) \
test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define set_ring_ps_enabled(ring) \
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define clear_ring_16byte_desc_enabled(ring) \
clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
};
unsigned long state;
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_hdr_len;
u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
#define I40E_RX_DTYPE_HEADER_SPLIT 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
u8 atr_sample_rate;
u8 atr_count;
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
union {
struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats;
};
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
};
struct i40e_ring_container {
/* array of pointers to rings */
struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 count;
enum i40e_latency_range latency_range;
u16 itr;
};
/* iterator for handling rings in ring container */
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
#ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
#endif /* _I40E_TXRX_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,363 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_VIRTCHNL_H_
#define _I40E_VIRTCHNL_H_
#include "i40e_type.h"
/* Description:
* This header file describes the VF-PF communication protocol used
* by the various i40e drivers.
*
* Admin queue buffer usage:
* desc->opcode is always i40e_aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* Firmware copies the cookie fields when sending messages between the PF and
* VF, but uses all other fields internally. Due to this limitation, we
* must send all messages as "indirect", i.e. using an external buffer.
*
* All the vsi indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value is of
* i40e_status_code type, defined in the i40e_type.h.
*
* In general, VF driver initialization should roughly follow the order of these
* opcodes. The VF driver must first validate the API version of the PF driver,
* then request a reset, then get resources, then configure queues and
* interrupts. After these operations are complete, the VF driver may start
* its queues, optionally add MAC and VLAN filters, and process traffic.
*/
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
/* VF sends req. to pf for the following
* ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
I40E_VIRTCHNL_OP_RESET_VF,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_VIRTCHNL_OP_ENABLE_QUEUES,
I40E_VIRTCHNL_OP_DISABLE_QUEUES,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
I40E_VIRTCHNL_OP_ADD_VLAN,
I40E_VIRTCHNL_OP_DEL_VLAN,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
/* PF sends status change events to vfs using
* the following op.
*/
I40E_VIRTCHNL_OP_EVENT,
};
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct i40e_virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
i40e_status v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
/* Message descriptions and data structures.*/
/* I40E_VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
#define I40E_VIRTCHNL_VERSION_MINOR 0
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
};
/* I40E_VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
* VF sends this request to PF with no parameters
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
*/
struct i40e_virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_offload_flags;
u32 max_fcoe_contexts;
u32 max_fcoe_filters;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of i40e_virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct i40e_virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled;
u64 dma_ring_addr;
u64 dma_headwb_addr;
};
/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of i40e_virtchnl_rxq_info.
* PF configures requested queue and returns a status code.
*/
/* Rx queue config info */
struct i40e_virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled;
u32 databuffer_size;
u32 max_pkt_size;
u64 dma_ring_addr;
enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
};
/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
*/
struct i40e_virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct i40e_virtchnl_txq_info txq;
struct i40e_virtchnl_rxq_info rxq;
};
struct i40e_virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
struct i40e_virtchnl_queue_pair_info qpair[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status.
*/
struct i40e_virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
struct i40e_virtchnl_irq_map_info {
u16 num_vectors;
struct i40e_virtchnl_vector_map vecmap[1];
};
/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
* I40E_VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
*/
struct i40e_virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct i40e_virtchnl_ether_addr {
u8 addr[ETH_ALEN];
u8 pad[2];
};
struct i40e_virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct i40e_virtchnl_ether_addr list[1];
};
/* I40E_VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* I40E_VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct i40e_virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct i40e_virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
/* I40E_VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct i40e_eth_stats in an external buffer.
*/
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum i40e_virtchnl_event_codes {
I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
I40E_VIRTCHNL_EVENT_LINK_CHANGE,
I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define I40E_PF_EVENT_SEVERITY_INFO 0
#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct i40e_virtchnl_pf_event {
enum i40e_virtchnl_event_codes event;
union {
struct {
enum i40e_aq_link_speed link_speed;
bool link_status;
} link_event;
} event_data;
int severity;
};
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum i40e_vfr_states {
I40E_VFR_INPROGRESS = 0,
I40E_VFR_COMPLETED,
I40E_VFR_VFACTIVE,
I40E_VFR_UNKNOWN,
};
#endif /* _I40E_VIRTCHNL_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,130 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_VIRTCHNL_PF_H_
#define _I40E_VIRTCHNL_PF_H_
#include "i40e.h"
#define I40E_MAX_MACVLAN_FILTERS 256
#define I40E_MAX_VLAN_FILTERS 256
#define I40E_MAX_VLANID 4095
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
#define I40E_VLAN_PRIORITY_SHIFT 12
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
I40E_QUEUE_CTRL_ENABLE,
I40E_QUEUE_CTRL_ENABLECHECK,
I40E_QUEUE_CTRL_DISABLE,
I40E_QUEUE_CTRL_DISABLECHECK,
I40E_QUEUE_CTRL_FASTDISABLE,
I40E_QUEUE_CTRL_FASTDISABLECHECK,
};
/* VF states */
enum i40e_vf_states {
I40E_VF_STAT_INIT = 0,
I40E_VF_STAT_ACTIVE,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
};
/* VF capabilities */
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
};
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
/* vf id in the pf space */
u16 vf_id;
/* all vf vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
/* vf Port Extender (PE) stag if used */
u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
u8 lan_vsi_index; /* index into PF struct */
u8 lan_vsi_id; /* ID as used by firmware */
u8 num_queue_pairs; /* num of qps assigned to vf vsis */
u64 num_mdd_events; /* num of mdd events detected */
u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
#endif /* _I40E_VIRTCHNL_PF_H_ */

View file

@ -0,0 +1,36 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
## Makefile for the Intel(R) 40GbE VF driver
#
#
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
i40e_txrx.o i40e_common.o i40e_adminq.o

View file

@ -0,0 +1,990 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_status.h"
#include "i40e_type.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
/**
* i40e_is_nvm_update_op - return true if this is an NVM update operation
* @desc: API request descriptor
**/
static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
{
return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
(desc->opcode == i40e_aqc_opc_nvm_update);
}
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.asq.bal = I40E_VF_ATQBAL1;
hw->aq.asq.bah = I40E_VF_ATQBAH1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
hw->aq.arq.len = I40E_VF_ARQLEN1;
hw->aq.arq.bal = I40E_VF_ARQBAL1;
hw->aq.arq.bah = I40E_VF_ARQBAH1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.asq.bal = I40E_PF_ATQBAL;
hw->aq.asq.bah = I40E_PF_ATQBAH;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
hw->aq.arq.len = I40E_PF_ARQLEN;
hw->aq.arq.bal = I40E_PF_ARQBAL;
hw->aq.arq.bah = I40E_PF_ARQBAH;
}
}
/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
return ret_code;
}
/**
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
* i40e_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
* i40e_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
int i;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
/* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
i40e_mem_arq_buf,
hw->aq.arq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_arq_bufs;
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16((u16)bi->size);
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
desc->params.external.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
desc->params.external.param0 = 0;
desc->params.external.param1 = 0;
}
alloc_arq_bufs:
return ret_code;
unwind_alloc_arq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
i40e_mem_asq_buf,
hw->aq.asq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_asq_bufs;
}
alloc_asq_bufs:
return ret_code;
unwind_alloc_asq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
int i;
/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */
i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* i40e_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
* i40e_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static i40e_status i40e_init_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
ret_code = I40E_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
hw->aq.asq.count = hw->aq.num_asq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_asq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = i40e_alloc_asq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = i40e_config_asq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
init_adminq_exit:
return ret_code;
}
/**
* i40e_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.arq_buf_size
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
static i40e_status i40e_init_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
ret_code = I40E_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
hw->aq.arq.count = hw->aq.num_arq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_arq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
ret_code = i40e_alloc_arq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
ret_code = i40e_config_arq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
init_adminq_free_rings:
i40e_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
* i40e_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.asq.count == 0)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
mutex_unlock(&hw->aq.asq_mutex);
return ret_code;
}
/**
* i40e_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (hw->aq.arq.count == 0)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
/**
* i40evf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
* - hw->aq.num_asq_entries
* - hw->aq.num_arq_entries
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
i40e_status i40evf_init_adminq(struct i40e_hw *hw)
{
i40e_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
/* initialize locks */
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
/* Set up register offsets */
i40e_adminq_init_regs(hw);
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
/* allocate the ASQ */
ret_code = i40e_init_asq(hw);
if (ret_code)
goto init_adminq_destroy_locks;
/* allocate the ARQ */
ret_code = i40e_init_arq(hw);
if (ret_code)
goto init_adminq_free_asq;
/* success! */
goto init_adminq_exit;
init_adminq_free_asq:
i40e_shutdown_asq(hw);
init_adminq_destroy_locks:
init_adminq_exit:
return ret_code;
}
/**
* i40evf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (i40evf_check_asq_alive(hw))
i40evf_aq_queue_shutdown(hw, true);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
/* destroy the locks */
return ret_code;
}
/**
* i40e_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
static u16 i40e_clean_asq(struct i40e_hw *hw)
{
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct i40e_aq_desc desc_cb;
struct i40e_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __func__, ntc,
rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
memset((void *)details, 0,
sizeof(struct i40e_asq_cmd_details));
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
return I40E_DESC_UNUSED(asq);
}
/**
* i40evf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
bool i40evf_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
/**
* i40evf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
u32 val = 0;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_exit;
}
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_exit;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
/* If the cmd_details are defined copy the cookie. The
* cpu_to_le32 is not needed here because the data is ignored
* by the FW, only used by the driver
*/
if (details->cookie) {
desc->cookie_high =
cpu_to_le32(upper_32_bits(details->cookie));
desc->cookie_low =
cpu_to_le32(lower_32_bits(details->cookie));
}
} else {
memset(details, 0, sizeof(struct i40e_asq_cmd_details));
}
/* clear requested flags and then set additional flags if defined */
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
mutex_lock(&hw->aq.asq_mutex);
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
status = I40E_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
if (details->postpone && !details->async) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
status = I40E_ERR_PARAM;
goto asq_send_command_error;
}
/* call clean and check queue available function to reclaim the
* descriptors that were processed by FW, the function returns the
* number of desc available
*/
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
if (i40e_clean_asq(hw) == 0) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
/* initialize the temp desc pointer with the right desc */
desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
*desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
if (buff != NULL) {
dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
/* copy the user buff into the respective DMA buff */
memcpy(dma_buff->va, buff, buff_size);
desc_on_ring->datalen = cpu_to_le16(buff_size);
/* Update the address values in the desc with the pa value
* for respective buffer
*/
desc_on_ring->params.external.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
desc_on_ring->params.external.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (i40evf_asq_done(hw))
break;
/* ugh! delay while spin_lock */
udelay(delay_len);
total_delay += delay_len;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
/* if ready, copy the desc back to temp */
if (i40evf_asq_done(hw)) {
*desc = *desc_on_ring;
if (buff != NULL)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
if (retval != 0) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
if (i40e_is_nvm_update_op(desc))
hw->aq.nvm_busy = true;
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
buff_size);
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
asq_send_command_exit:
return status;
}
/**
* i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
* i40evf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
{
i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
u16 desc_idx;
u16 datalen;
u16 flags;
u16 ntu;
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
/* now clean the next descriptor */
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
}
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_size);
if (i40e_is_nvm_update_op(&e->desc))
hw->aq.nvm_busy = false;
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, hw->aq.arq.tail, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
ntc = 0;
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
}
void i40evf_resume_aq(struct i40e_hw *hw)
{
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
}

View file

@ -0,0 +1,149 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
struct i40e_virt_mem dma_head; /* space for dma structures */
struct i40e_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
};
/* ASQ transaction details */
struct i40e_asq_cmd_details {
void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
};
#define I40E_ADMINQ_DETAILS(R, i) \
(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
u16 msg_size;
u8 *msg_buf;
};
/* Admin Queue information */
struct i40e_adminq_info {
struct i40e_adminq_ring arq; /* receive queue */
struct i40e_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
u16 arq_buf_size; /* receive queue buffer size */
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
};
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
static inline int i40e_aq_rc_to_posix(u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
-EPERM, /* I40E_AQ_RC_EPERM */
-ENOENT, /* I40E_AQ_RC_ENOENT */
-ESRCH, /* I40E_AQ_RC_ESRCH */
-EINTR, /* I40E_AQ_RC_EINTR */
-EIO, /* I40E_AQ_RC_EIO */
-ENXIO, /* I40E_AQ_RC_ENXIO */
-E2BIG, /* I40E_AQ_RC_E2BIG */
-EAGAIN, /* I40E_AQ_RC_EAGAIN */
-ENOMEM, /* I40E_AQ_RC_ENOMEM */
-EACCES, /* I40E_AQ_RC_EACCES */
-EFAULT, /* I40E_AQ_RC_EFAULT */
-EBUSY, /* I40E_AQ_RC_EBUSY */
-EEXIST, /* I40E_AQ_RC_EEXIST */
-EINVAL, /* I40E_AQ_RC_EINVAL */
-ENOTTY, /* I40E_AQ_RC_ENOTTY */
-ENOSPC, /* I40E_AQ_RC_ENOSPC */
-ENOSYS, /* I40E_AQ_RC_ENOSYS */
-ERANGE, /* I40E_AQ_RC_ERANGE */
-EPIPE, /* I40E_AQ_RC_EFLUSHED */
-ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
-EROFS, /* I40E_AQ_RC_EMODE */
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,58 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
struct i40e_hw;
/* Memory allocation types */
enum i40e_memory_type {
i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
i40e_mem_asq_buf = 1,
i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
i40e_mem_pd = 5, /* Page Descriptor */
i40e_mem_bp = 6, /* Backing Page - 4KB */
i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
i40e_mem_reserved
};
/* prototype for functions used for dynamic memory allocation */
i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
enum i40e_memory_type type,
u64 size, u32 alignment);
i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem);
i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size);
i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */

View file

@ -0,0 +1,628 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
*
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
break;
default:
hw->mac.type = I40E_MAC_GENERIC;
break;
}
} else {
status = I40E_ERR_DEVICE_NOT_SUPPORTED;
}
hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
hw->mac.type, status);
return status;
}
/**
* i40evf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
* @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
* @buf_len: max length of buffer
*
* Dumps debug log about adminq command with descriptor contents.
**/
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = le16_to_cpu(aq_desc->datalen);
u8 *aq_buffer = (u8 *)buffer;
u32 data[4];
u32 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
aq_desc->retval);
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
aq_desc->cookie_high, aq_desc->cookie_low);
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
aq_desc->params.internal.param0,
aq_desc->params.internal.param1);
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
aq_desc->params.external.addr_high,
aq_desc->params.external.addr_low);
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
for (i = 0; i < len; i++) {
data[((i % 16) / 4)] |=
((u32)aq_buffer[i]) << (8 * (i % 4));
if ((i % 16) == 15) {
i40e_debug(hw, mask,
"\t0x%04X %08X %08X %08X %08X\n",
i - 15, data[0], data[1], data[2],
data[3]);
memset(data, 0, sizeof(data));
}
}
if ((i % 16) != 0)
i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
i - (i % 16), data[0], data[1], data[2],
data[3]);
}
}
/**
* i40evf_check_asq_alive
* @hw: pointer to the hw struct
*
* Returns true if Queue is enabled else false.
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
return !!(rd32(hw, hw->aq.asq.len) &
I40E_PF_ATQLEN_ATQENABLE_MASK);
else
return false;
}
/**
* i40evf_aq_queue_shutdown
* @hw: pointer to the hw struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
}
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
*
* Macros are used to shorten the table lines and make this table human
* readable.
*
* We store the PTYPE in the top byte of the bit field - this is just so that
* we can check that the table doesn't have a row missing, as the index into
* the table should be the PTYPE.
*
* Typical work flow:
*
* IF NOT i40evf_ptype_lookup[ptype].known
* THEN
* Packet is unknown
* ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
* Use the rest of the fields to look at the tunnels, inner protocols, etc
* ELSE
* Use the enum i40e_rx_l2_ptype to decode the packet type
* ENDIF
*/
/* macro to make the table lines short */
#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
{ PTYPE, \
1, \
I40E_RX_PTYPE_OUTER_##OUTER_IP, \
I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
I40E_RX_PTYPE_##OUTER_FRAG, \
I40E_RX_PTYPE_TUNNEL_##T, \
I40E_RX_PTYPE_TUNNEL_END_##TE, \
I40E_RX_PTYPE_##TEF, \
I40E_RX_PTYPE_INNER_PROT_##I, \
I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
/* L2 Packet types */
I40E_PTT_UNUSED_ENTRY(0),
I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT_UNUSED_ENTRY(4),
I40E_PTT_UNUSED_ENTRY(5),
I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT_UNUSED_ENTRY(8),
I40E_PTT_UNUSED_ENTRY(9),
I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
/* Non Tunneled IPv4 */
I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(25),
I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv4 --> IPv4 */
I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(32),
I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> IPv6 */
I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(39),
I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT */
I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> IPv4 */
I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(47),
I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> IPv6 */
I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(54),
I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC */
I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(62),
I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(69),
I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC/VLAN */
I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(77),
I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(84),
I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv6 --> IPv4 */
I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(98),
I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> IPv6 */
I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(105),
I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT */
I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> IPv4 */
I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(113),
I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> IPv6 */
I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(120),
I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC */
I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(128),
I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(135),
I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN */
I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(143),
I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(150),
I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */
I40E_PTT_UNUSED_ENTRY(154),
I40E_PTT_UNUSED_ENTRY(155),
I40E_PTT_UNUSED_ENTRY(156),
I40E_PTT_UNUSED_ENTRY(157),
I40E_PTT_UNUSED_ENTRY(158),
I40E_PTT_UNUSED_ENTRY(159),
I40E_PTT_UNUSED_ENTRY(160),
I40E_PTT_UNUSED_ENTRY(161),
I40E_PTT_UNUSED_ENTRY(162),
I40E_PTT_UNUSED_ENTRY(163),
I40E_PTT_UNUSED_ENTRY(164),
I40E_PTT_UNUSED_ENTRY(165),
I40E_PTT_UNUSED_ENTRY(166),
I40E_PTT_UNUSED_ENTRY(167),
I40E_PTT_UNUSED_ENTRY(168),
I40E_PTT_UNUSED_ENTRY(169),
I40E_PTT_UNUSED_ENTRY(170),
I40E_PTT_UNUSED_ENTRY(171),
I40E_PTT_UNUSED_ENTRY(172),
I40E_PTT_UNUSED_ENTRY(173),
I40E_PTT_UNUSED_ENTRY(174),
I40E_PTT_UNUSED_ENTRY(175),
I40E_PTT_UNUSED_ENTRY(176),
I40E_PTT_UNUSED_ENTRY(177),
I40E_PTT_UNUSED_ENTRY(178),
I40E_PTT_UNUSED_ENTRY(179),
I40E_PTT_UNUSED_ENTRY(180),
I40E_PTT_UNUSED_ENTRY(181),
I40E_PTT_UNUSED_ENTRY(182),
I40E_PTT_UNUSED_ENTRY(183),
I40E_PTT_UNUSED_ENTRY(184),
I40E_PTT_UNUSED_ENTRY(185),
I40E_PTT_UNUSED_ENTRY(186),
I40E_PTT_UNUSED_ENTRY(187),
I40E_PTT_UNUSED_ENTRY(188),
I40E_PTT_UNUSED_ENTRY(189),
I40E_PTT_UNUSED_ENTRY(190),
I40E_PTT_UNUSED_ENTRY(191),
I40E_PTT_UNUSED_ENTRY(192),
I40E_PTT_UNUSED_ENTRY(193),
I40E_PTT_UNUSED_ENTRY(194),
I40E_PTT_UNUSED_ENTRY(195),
I40E_PTT_UNUSED_ENTRY(196),
I40E_PTT_UNUSED_ENTRY(197),
I40E_PTT_UNUSED_ENTRY(198),
I40E_PTT_UNUSED_ENTRY(199),
I40E_PTT_UNUSED_ENTRY(200),
I40E_PTT_UNUSED_ENTRY(201),
I40E_PTT_UNUSED_ENTRY(202),
I40E_PTT_UNUSED_ENTRY(203),
I40E_PTT_UNUSED_ENTRY(204),
I40E_PTT_UNUSED_ENTRY(205),
I40E_PTT_UNUSED_ENTRY(206),
I40E_PTT_UNUSED_ENTRY(207),
I40E_PTT_UNUSED_ENTRY(208),
I40E_PTT_UNUSED_ENTRY(209),
I40E_PTT_UNUSED_ENTRY(210),
I40E_PTT_UNUSED_ENTRY(211),
I40E_PTT_UNUSED_ENTRY(212),
I40E_PTT_UNUSED_ENTRY(213),
I40E_PTT_UNUSED_ENTRY(214),
I40E_PTT_UNUSED_ENTRY(215),
I40E_PTT_UNUSED_ENTRY(216),
I40E_PTT_UNUSED_ENTRY(217),
I40E_PTT_UNUSED_ENTRY(218),
I40E_PTT_UNUSED_ENTRY(219),
I40E_PTT_UNUSED_ENTRY(220),
I40E_PTT_UNUSED_ENTRY(221),
I40E_PTT_UNUSED_ENTRY(222),
I40E_PTT_UNUSED_ENTRY(223),
I40E_PTT_UNUSED_ENTRY(224),
I40E_PTT_UNUSED_ENTRY(225),
I40E_PTT_UNUSED_ENTRY(226),
I40E_PTT_UNUSED_ENTRY(227),
I40E_PTT_UNUSED_ENTRY(228),
I40E_PTT_UNUSED_ENTRY(229),
I40E_PTT_UNUSED_ENTRY(230),
I40E_PTT_UNUSED_ENTRY(231),
I40E_PTT_UNUSED_ENTRY(232),
I40E_PTT_UNUSED_ENTRY(233),
I40E_PTT_UNUSED_ENTRY(234),
I40E_PTT_UNUSED_ENTRY(235),
I40E_PTT_UNUSED_ENTRY(236),
I40E_PTT_UNUSED_ENTRY(237),
I40E_PTT_UNUSED_ENTRY(238),
I40E_PTT_UNUSED_ENTRY(239),
I40E_PTT_UNUSED_ENTRY(240),
I40E_PTT_UNUSED_ENTRY(241),
I40E_PTT_UNUSED_ENTRY(242),
I40E_PTT_UNUSED_ENTRY(243),
I40E_PTT_UNUSED_ENTRY(244),
I40E_PTT_UNUSED_ENTRY(245),
I40E_PTT_UNUSED_ENTRY(246),
I40E_PTT_UNUSED_ENTRY(247),
I40E_PTT_UNUSED_ENTRY(248),
I40E_PTT_UNUSED_ENTRY(249),
I40E_PTT_UNUSED_ENTRY(250),
I40E_PTT_UNUSED_ENTRY(251),
I40E_PTT_UNUSED_ENTRY(252),
I40E_PTT_UNUSED_ENTRY(253),
I40E_PTT_UNUSED_ENTRY(254),
I40E_PTT_UNUSED_ENTRY(255)
};
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
*
* Send message to PF driver using admin queue. By default, this message
* is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_asq_cmd_details details;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
| I40E_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
memset(&details, 0, sizeof(details));
details.async = true;
cmd_details = &details;
}
status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
return status;
}
/**
* i40e_vf_parse_hw_config
* @hw: pointer to the hardware structure
* @msg: pointer to the virtual channel VF resource structure
*
* Given a VF resource message from the PF, populate the hw struct
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg)
{
struct i40e_virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
hw->dev_caps.num_vsis = msg->num_vsis;
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.fcoe = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
ETH_ALEN);
memcpy(hw->mac.addr, vsi_res->default_mac_addr,
ETH_ALEN);
}
vsi_res++;
}
}
/**
* i40e_vf_reset
* @hw: pointer to the hardware structure
*
* Send a VF_RESET message to the PF. Does not wait for response from PF
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
{
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}

View file

@ -0,0 +1,236 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
#define I40E_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
#define I40E_HMC_PD_CNT_IN_SD 512
#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define I40E_HMC_PAGED_BP_SIZE 4096
#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
#define I40E_FIRST_VF_FPM_ID 16
struct i40e_hmc_obj_info {
u64 base; /* base addr in FPM */
u32 max_cnt; /* max count available for this hmc func */
u32 cnt; /* count of objects driver actually wants to create */
u64 size; /* size in bytes of one object */
};
enum i40e_sd_entry_type {
I40E_SD_TYPE_INVALID = 0,
I40E_SD_TYPE_PAGED = 1,
I40E_SD_TYPE_DIRECT = 2
};
struct i40e_hmc_bp {
enum i40e_sd_entry_type entry_type;
struct i40e_dma_mem addr; /* populate to be used by hw */
u32 sd_pd_index;
u32 ref_cnt;
};
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
bool valid;
};
struct i40e_hmc_pd_table {
struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
u32 ref_cnt;
u32 sd_index;
};
struct i40e_hmc_sd_entry {
enum i40e_sd_entry_type entry_type;
bool valid;
union {
struct i40e_hmc_pd_table pd_table;
struct i40e_hmc_bp bp;
} u;
};
struct i40e_hmc_sd_table {
struct i40e_virt_mem addr; /* used to track sd_entry allocations */
u32 sd_cnt;
u32 ref_cnt;
struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
};
struct i40e_hmc_info {
u32 signature;
/* equals to pci func num for PF and dynamically allocated for VFs */
u8 hmc_fn_id;
u16 first_sd_index; /* index of the first available SD */
/* hmc objects */
struct i40e_hmc_obj_info *hmc_obj;
struct i40e_virt_mem hmc_obj_virt_mem;
struct i40e_hmc_sd_table sd_table;
};
#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
{ \
u32 val1, val2, val3; \
val1 = (u32)(upper_32_bits(pa)); \
val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
{ \
u32 val2, val3; \
val2 = (I40E_HMC_MAX_BP_COUNT << \
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
}
/**
* I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @index: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
{ \
u64 fpm_addr, fpm_limit; \
fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (index); \
fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(sd_limit) += 1; \
}
/**
* I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_index: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by i40e_hmc_rsrc_type.
**/
#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
{ \
u64 fpm_adr, fpm_limit; \
fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (idx); \
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 sd_index,
enum i40e_sd_entry_type type,
u64 direct_mode_sz);
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */

View file

@ -0,0 +1,181 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
/* HMC element context information */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_rxq {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
u16 dbuff; /* bigger than needed, see above for reason */
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 fc_ena;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct i40e_hmc_obj_txq {
u16 head;
u8 new_context;
u64 base;
u8 fc_ena;
u8 timesync_ena;
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
u8 tphrpacket_ena;
u8 tphwdesc_ena;
u64 head_wb_addr;
u32 crc;
u16 rdylist;
u8 rdylist_act;
};
/* for hsplit_0 field of Rx HMC context */
enum i40e_hmc_obj_rx_hsplit_0 {
I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* fcoe_cntx and fcoe_filt are for debugging purpose only */
struct i40e_hmc_obj_fcoe_cntx {
u32 rsv[32];
};
struct i40e_hmc_obj_fcoe_filt {
u32 rsv[8];
};
/* Context sizes for LAN objects */
enum i40e_hmc_lan_object_size {
I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
};
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
I40E_HMC_LAN_TX = 1,
I40E_HMC_LAN_RX = 2,
I40E_HMC_FCOE_CTX = 3,
I40E_HMC_FCOE_FILT = 4,
I40E_HMC_LAN_MAX = 5
};
enum i40e_hmc_model {
I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
I40E_HMC_MODEL_DIRECT_ONLY = 1,
I40E_HMC_MODEL_PAGED_ONLY = 2,
I40E_HMC_MODEL_UNKNOWN,
};
struct i40e_hmc_lan_create_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
enum i40e_sd_entry_type entry_type;
u64 direct_mode_sz;
};
struct i40e_hmc_lan_delete_obj_info {
struct i40e_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
};
i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
u32 rxq_num, u32 fcoe_cntx_num,
u32 fcoe_filt_num);
i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
enum i40e_hmc_model model);
i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_txq *s);
i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue);
i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
u16 queue,
struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */

View file

@ -0,0 +1,75 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/tcp.h>
#include <linux/pci.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <asm-generic/io-64-nonatomic-lo-hi.h>
/* File to be the magic between shared code and
* actual OS primitives
*/
#define hw_dbg(hw, S, A...) do {} while (0)
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
/* memory allocation tracking */
struct i40e_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
} __packed;
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40evf_allocate_dma_mem_d(h, m, s, a)
#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
struct i40e_virt_mem {
void *va;
u32 size;
} __packed;
#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__attribute__ ((format(gnu_printf, 3, 4)));
typedef enum i40e_status_code i40e_status;
#endif /* _I40E_OSDEP_H_ */

View file

@ -0,0 +1,92 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include "i40e_virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
i40e_status i40evf_init_adminq(struct i40e_hw *hw);
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool i40evf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40evf_ptype_lookup[ptype];
}
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,100 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
/* Error Codes */
enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
I40E_ERR_LINK_SETUP = -8,
I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
I40E_ERR_MASTER_REQUESTS_PENDING = -12,
I40E_ERR_INVALID_LINK_SETTINGS = -13,
I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
I40E_ERR_RING_FULL = -20,
I40E_ERR_INVALID_PD_ID = -21,
I40E_ERR_INVALID_QP_ID = -22,
I40E_ERR_INVALID_CQ_ID = -23,
I40E_ERR_INVALID_CEQ_ID = -24,
I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
I40E_ERR_INVALID_ARP_INDEX = -27,
I40E_ERR_INVALID_FPM_FUNC_ID = -28,
I40E_ERR_QP_INVALID_MSG_SIZE = -29,
I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
I40E_ERR_INVALID_ALIGNMENT = -33,
I40E_ERR_FLUSHED_QUEUE = -34,
I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
I40E_ERR_OPCODE_MISMATCH = -38,
I40E_ERR_CQP_COMPL_ERROR = -39,
I40E_ERR_INVALID_VF_ID = -40,
I40E_ERR_INVALID_HMCFN_ID = -41,
I40E_ERR_BACKING_PAGE_ERROR = -42,
I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
};
#endif /* _I40E_STATUS_H_ */

Some files were not shown because too many files have changed in this diff Show more