Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,7 @@
#
# Chelsio T4 SR-IOV Virtual Function Driver
#
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o

View file

@ -0,0 +1,541 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file should not be included directly. Include t4vf_common.h instead.
*/
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include "../cxgb4/t4_hw.h"
/*
* Constants of the implementation.
*/
enum {
MAX_NPORTS = 1, /* max # of "ports" */
MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
/*
* MSI-X interrupt index usage.
*/
MSIX_FW = 0, /* MSI-X index for firmware Q */
MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
MSIX_EXTRAS = 1,
MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
/*
* The maximum number of Ingress and Egress Queues is determined by
* the maximum number of "Queue Sets" which we support plus any
* ancillary queues. Each "Queue Set" requires one Ingress Queue
* for RX Packet Ingress Event notifications and two Egress Queues for
* a Free List and an Ethernet TX list.
*/
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
MAX_EGRQ = MAX_ETH_QSETS*2,
};
/*
* Forward structure definition references.
*/
struct adapter;
struct sge_eth_rxq;
struct sge_rspq;
/*
* Per-"port" information. This is really per-Virtual Interface information
* but the use of the "port" nomanclature makes it easier to go back and forth
* between the PF and VF drivers ...
*/
struct port_info {
struct adapter *adapter; /* our adapter */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
struct link_config link_cfg; /* physical port configuration */
};
/*
* Scatter Gather Engine resources for the "adapter". Our ingress and egress
* queues are organized into "Queue Sets" with one ingress and one egress
* queue per Queue Set. These Queue Sets are aportionable between the "ports"
* (Virtual Interfaces). One extra ingress queue is used to receive
* asynchronous messages from the firmware. Note that the "Queue IDs" that we
* use here are really "Relative Queue IDs" which are returned as part of the
* firmware command to allocate queues. These queue IDs are relative to the
* absolute Queue ID base of the section of the Queue ID space allocated to
* the PF/VF.
*/
/*
* SGE free-list queue state.
*/
struct rx_sw_desc;
struct sge_fl {
unsigned int avail; /* # of available RX buffers */
unsigned int pend_cred; /* new buffers since last FL DB ring */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned long alloc_failed; /* # of buffer allocation failures */
unsigned long large_alloc_failed;
unsigned long starving; /* # of times FL was found starving */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
unsigned int cntxt_id; /* SGE relative QID for the free list */
unsigned int abs_id; /* SGE absolute QID for the free list */
unsigned int size; /* capacity of free list */
struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
__be64 *desc; /* address of HW RX descriptor ring */
dma_addr_t addr; /* PCI bus address of hardware ring */
};
/*
* An ingress packet gather list.
*/
struct pkt_gl {
struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
};
typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
const struct pkt_gl *);
/*
* State for an SGE Response Queue.
*/
struct sge_rspq {
struct napi_struct napi; /* NAPI scheduling control */
const __be64 *cur_desc; /* current descriptor in queue */
unsigned int cidx; /* consumer index */
u8 gen; /* current generation bit */
u8 next_intr_params; /* holdoff params for next interrupt */
int offset; /* offset into current FL buffer */
unsigned int unhandled_irqs; /* bogus interrupts */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
u8 intr_params; /* interrupt holdoff parameters */
u8 pktcnt_idx; /* interrupt packet threshold */
u8 idx; /* queue index within its group */
u16 cntxt_id; /* SGE rel QID for the response Q */
u16 abs_id; /* SGE abs QID for the response Q */
__be64 *desc; /* address of hardware response ring */
dma_addr_t phys_addr; /* PCI bus address of ring */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capcity of response Q */
struct adapter *adapter; /* our adapter */
struct net_device *netdev; /* associated net device */
rspq_handler_t handler; /* the handler for this response Q */
};
/*
* Ethernet queue statistics
*/
struct sge_eth_stats {
unsigned long pkts; /* # of ethernet packets */
unsigned long lro_pkts; /* # of LRO super packets */
unsigned long lro_merged; /* # of wire packets merged by LRO */
unsigned long rx_cso; /* # of Rx checksum offloads */
unsigned long vlan_ex; /* # of Rx VLAN extractions */
unsigned long rx_drops; /* # of packets dropped due to no mem */
};
/*
* State for an Ethernet Receive Queue.
*/
struct sge_eth_rxq {
struct sge_rspq rspq; /* Response Queue */
struct sge_fl fl; /* Free List */
struct sge_eth_stats stats; /* receive statistics */
};
/*
* SGE Transmit Queue state. This contains all of the resources associated
* with the hardware status of a TX Queue which is a circular ring of hardware
* TX Descriptors. For convenience, it also contains a pointer to a parallel
* "Software Descriptor" array but we don't know anything about it here other
* than its type name.
*/
struct tx_desc {
/*
* Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
* hardware: Sizes, Producer and Consumer indices, etc.
*/
__be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
};
struct tx_sw_desc;
struct sge_txq {
unsigned int in_use; /* # of in-use TX descriptors */
unsigned int size; /* # of descriptors */
unsigned int cidx; /* SW consumer index */
unsigned int pidx; /* producer index */
unsigned long stops; /* # of times queue has been stopped */
unsigned long restarts; /* # of queue restarts */
/*
* Write-once/infrequently fields.
* -------------------------------
*/
unsigned int cntxt_id; /* SGE relative QID for the TX Q */
unsigned int abs_id; /* SGE absolute QID for the TX Q */
struct tx_desc *desc; /* address of HW TX descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* PCI bus address of hardware ring */
};
/*
* State for an Ethernet Transmit Queue.
*/
struct sge_eth_txq {
struct sge_txq q; /* SGE TX Queue */
struct netdev_queue *txq; /* associated netdev TX queue */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of TX checksum offloads */
unsigned long vlan_ins; /* # of TX VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
};
/*
* The complete set of Scatter/Gather Engine resources.
*/
struct sge {
/*
* Our "Queue Sets" ...
*/
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
/*
* Extra ingress queues for asynchronous firmware events and
* forwarded interrupts (when in MSI mode).
*/
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
/*
* State for managing "starving Free Lists" -- Free Lists which have
* fallen below a certain threshold of buffers available to the
* hardware and attempts to refill them up to that threshold have
* failed. We have a regular "slow tick" timer process which will
* make periodic attempts to refill these starving Free Lists ...
*/
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
struct timer_list rx_timer;
/*
* State for cleaning up completed TX descriptors.
*/
struct timer_list tx_timer;
/*
* Write-once/infrequently fields.
* -------------------------------
*/
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
/* Decoded Adapter Parameters.
*/
u32 fl_pg_order; /* large page allocation size */
u32 stat_len; /* length of status page at ring end */
u32 pktshift; /* padding between CPL & packet data */
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
/*
* Reverse maps from Absolute Queue IDs to associated queue pointers.
* The absolute Queue IDs are in a compact range which start at a
* [potentially large] Base Queue ID. We perform the reverse map by
* first converting the Absolute Queue ID into a Relative Queue ID by
* subtracting off the Base Queue ID and then use a Relative Queue ID
* indexed table to get the pointer to the corresponding software
* queue structure.
*/
unsigned int egr_base;
unsigned int ingr_base;
void *egr_map[MAX_EGRQ];
struct sge_rspq *ingr_map[MAX_INGQ];
};
/*
* Utility macros to convert Absolute- to Relative-Queue indices and Egress-
* and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
* pointers to Ingress- and Egress-Queues can be used as both L- and R-values
*/
#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
/*
* Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
*/
#define for_each_ethrxq(sge, iter) \
for (iter = 0; iter < (sge)->ethqsets; iter++)
/*
* Per-"adapter" (Virtual Function) information.
*/
struct adapter {
/* PCI resources */
void __iomem *regs;
struct pci_dev *pdev;
struct device *pdev_dev;
/* "adapter" resources */
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
struct adapter_params params;
/* queue and interrupt resources */
struct {
unsigned short vec;
char desc[22];
} msix_info[MSIX_ENTRIES];
struct sge sge;
/* Linux network device resources */
struct net_device *port[MAX_NPORTS];
const char *name;
unsigned int msg_enable;
/* debugfs resources */
struct dentry *debugfs_root;
/* various locks */
spinlock_t stats_lock;
};
enum { /* adapter flags */
FULL_INIT_DONE = (1UL << 0),
USING_MSI = (1UL << 1),
USING_MSIX = (1UL << 2),
QUEUES_BOUND = (1UL << 3),
};
/*
* The following register read/write routine definitions are required by
* the common code.
*/
/**
* t4_read_reg - read a HW register
* @adapter: the adapter
* @reg_addr: the register address
*
* Returns the 32-bit value of the given HW register.
*/
static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
{
return readl(adapter->regs + reg_addr);
}
/**
* t4_write_reg - write a HW register
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
*
* Write a 32-bit value into the given HW register.
*/
static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
{
writel(val, adapter->regs + reg_addr);
}
#ifndef readq
static inline u64 readq(const volatile void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, volatile void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
/**
* t4_read_reg64 - read a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
*
* Returns the 64-bit value of the given HW register.
*/
static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
{
return readq(adapter->regs + reg_addr);
}
/**
* t4_write_reg64 - write a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
*
* Write a 64-bit value into the given HW register.
*/
static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
u64 val)
{
writeq(val, adapter->regs + reg_addr);
}
/**
* port_name - return the string name of a port
* @adapter: the adapter
* @pidx: the port index
*
* Return the string name of the selected port.
*/
static inline const char *port_name(struct adapter *adapter, int pidx)
{
return adapter->port[pidx]->name;
}
/**
* t4_os_set_hw_addr - store a port's MAC address in SW
* @adapter: the adapter
* @pidx: the port index
* @hw_addr: the Ethernet address
*
* Store the Ethernet address of the given port in SW. Called by the common
* code when it retrieves a port's Ethernet address from EEPROM.
*/
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
}
/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
* Return the struct port_info associated with a net_device
*/
static inline struct port_info *netdev2pinfo(const struct net_device *dev)
{
return netdev_priv(dev);
}
/**
* adap2pinfo - return the port_info of a port
* @adap: the adapter
* @pidx: the port index
*
* Return the port_info structure for the adapter.
*/
static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
{
return netdev_priv(adapter->port[pidx]);
}
/**
* netdev2adap - return the adapter structure associated with a net_device
* @dev: the netdev
*
* Return the struct adapter associated with a net_device
*/
static inline struct adapter *netdev2adap(const struct net_device *dev)
{
return netdev2pinfo(dev)->adapter;
}
/*
* OS "Callback" function declarations. These are functions that the OS code
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
/*
* SGE function prototype declarations.
*/
int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
struct net_device *, int,
struct sge_fl *, rspq_handler_t);
int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
struct net_device *, struct netdev_queue *,
unsigned int);
void t4vf_free_sge_resources(struct adapter *);
int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
const struct pkt_gl *);
irq_handler_t t4vf_intr_handler(struct adapter *);
irqreturn_t t4vf_sge_intr_msix(int, void *);
int t4vf_sge_init(struct adapter *);
void t4vf_sge_start(struct adapter *);
void t4vf_sge_stop(struct adapter *);
#endif /* __CXGB4VF_ADAPTER_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,313 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4VF_COMMON_H__
#define __T4VF_COMMON_H__
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
*
* V = "4" for T4; "5" for T5, etc. or
* = "a" for T4 FPGA; "b" for T4 FPGA, etc.
* F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
* PP = adapter product designation
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
T4_FIRST_REV = T4_A1,
T4_LAST_REV = T4_A2,
T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
};
/*
* The "len16" field of a Firmware Command Structure ...
*/
#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
/*
* Per-VF statistics.
*/
struct t4vf_port_stats {
/*
* TX statistics.
*/
u64 tx_bcast_bytes; /* broadcast */
u64 tx_bcast_frames;
u64 tx_mcast_bytes; /* multicast */
u64 tx_mcast_frames;
u64 tx_ucast_bytes; /* unicast */
u64 tx_ucast_frames;
u64 tx_drop_frames; /* TX dropped frames */
u64 tx_offload_bytes; /* offload */
u64 tx_offload_frames;
/*
* RX statistics.
*/
u64 rx_bcast_bytes; /* broadcast */
u64 rx_bcast_frames;
u64 rx_mcast_bytes; /* multicast */
u64 rx_mcast_frames;
u64 rx_ucast_bytes;
u64 rx_ucast_frames; /* unicast */
u64 rx_err_frames; /* RX error frames */
};
/*
* Per-"port" (Virtual Interface) link configuration ...
*/
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
};
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
/*
* General device parameters ...
*/
struct dev_params {
u32 fwrev; /* firmware version */
u32 tprev; /* TP Microcode Version */
};
/*
* Scatter Gather Engine parameters. These are almost all determined by the
* Physical Function Driver. We just need to grab them to see within which
* environment we're playing ...
*/
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
u32 sge_control2; /* T5: more of the same */
u32 sge_host_page_size; /* RDMA page sizes */
u32 sge_queues_per_page; /* RDMA queues/page */
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
u32 sge_congestion_control; /* congestion thresholds, etc. */
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
u32 sge_timer_value_2_and_3;
u32 sge_timer_value_4_and_5;
};
/*
* Vital Product Data parameters.
*/
struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
struct rss_params {
unsigned int mode; /* RSS mode */
union {
struct {
unsigned int synmapen:1; /* SYN Map Enable */
unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
unsigned int ofdmapen:1; /* Offload Map Enable */
unsigned int tnlmapen:1; /* Tunnel Map Enable */
unsigned int tnlalllookup:1; /* Tunnel All Lookup */
unsigned int hashtoeplitz:1; /* use Toeplitz hash */
} basicvirtual;
} u;
};
/*
* Virtual Interface RSS Configuration in host-native format.
*/
union rss_vi_config {
struct {
u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
int udpen; /* hash 4-tuple UDP ingress packets */
} basicvirtual;
};
/*
* Maximum resources provisioned for a PCI VF.
*/
struct vf_resources {
unsigned int nvi; /* N virtual interfaces */
unsigned int neq; /* N egress Qs */
unsigned int nethctrl; /* N egress ETH or CTRL Qs */
unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
unsigned int niq; /* N ingress Qs */
unsigned int tc; /* PCI-E traffic class */
unsigned int pmask; /* port access rights mask */
unsigned int nexactf; /* N exact MPS filters */
unsigned int r_caps; /* read capabilities */
unsigned int wx_caps; /* write/execute capabilities */
};
/*
* Per-"adapter" (Virtual Function) parameters.
*/
struct adapter_params {
struct dev_params dev; /* general device parameters */
struct sge_params sge; /* Scatter Gather Engine */
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; iter++)
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
{
return adapter->params.vpd.cclk / 1000;
}
static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
unsigned int us)
{
return (us * adapter->params.vpd.cclk) / 1000;
}
static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
unsigned int ticks)
{
return (ticks * 1000) / adapter->params.vpd.cclk;
}
int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
int size, void *rpl)
{
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
}
static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int size, void *rpl)
{
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
static inline int is_t4(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
int t4vf_get_rss_glb_config(struct adapter *);
int t4vf_get_vfres(struct adapter *);
int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
union rss_vi_config *);
int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
union rss_vi_config *);
int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
const u16 *, int);
int t4vf_alloc_vi(struct adapter *, int);
int t4vf_free_vi(struct adapter *, int);
int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
bool);
int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
const u8 **, u16 *, u64 *, bool);
int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
unsigned int);
int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
#endif /* __T4VF_COMMON_H__ */

View file

@ -0,0 +1,121 @@
/*
* This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
* driver for Linux.
*
* Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4VF_DEFS_H__
#define __T4VF_DEFS_H__
#include "../cxgb4/t4_regs.h"
/*
* The VF Register Map.
*
* The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
* bus module (PL) and CPU Interface Module (CIM) components are mapped via
* the Slice to Module Map Table (see below) in the Physical Function Register
* Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
* and Offset registers in the PF Register Map. The MBDATA base address is
* quite constrained as it determines the Mailbox Data addresses for both PFs
* and VFs, and therefore must fit in both the VF and PF Register Maps without
* overlapping other registers.
*/
#define T4VF_SGE_BASE_ADDR 0x0000
#define T4VF_MPS_BASE_ADDR 0x0100
#define T4VF_PL_BASE_ADDR 0x0200
#define T4VF_MBDATA_BASE_ADDR 0x0240
#define T4VF_CIM_BASE_ADDR 0x0300
#define T4VF_REGMAP_START 0x0000
#define T4VF_REGMAP_SIZE 0x0400
/*
* There's no hardware limitation which requires that the addresses of the
* Mailbox Data in the fixed CIM PF map and the programmable VF map must
* match. However, it's a useful convention ...
*/
#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
#endif
/*
* Virtual Function "Slice to Module Map Table" definitions.
*
* This table allows us to map subsets of the various module register sets
* into the T4VF Register Map. Each table entry identifies the index of the
* module whose registers are being mapped, the offset within the module's
* register set that the mapping should start at, the limit of the mapping,
* and the offset within the T4VF Register Map to which the module's registers
* are being mapped. All addresses and qualtities are in terms of 32-bit
* words. The "limit" value is also in terms of 32-bit words and is equal to
* the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
* relation rather than a "<").
*/
#define T4VF_MOD_MAP(module, index, first, last) \
T4VF_MOD_MAP_##module##_INDEX = (index), \
T4VF_MOD_MAP_##module##_FIRST = (first), \
T4VF_MOD_MAP_##module##_LAST = (last), \
T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
T4VF_MOD_MAP_##module##_BASE = \
(T4VF_##module##_BASE_ADDR/4 + (first)/4), \
T4VF_MOD_MAP_##module##_LIMIT = \
(T4VF_##module##_BASE_ADDR/4 + (last)/4),
#define SGE_VF_KDOORBELL 0x0
#define SGE_VF_GTS 0x4
#define MPS_VF_CTL 0x0
#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
#define PL_VF_WHOAMI 0x0
#define CIM_VF_EXT_MAILBOX_CTRL 0x0
#define CIM_VF_EXT_MAILBOX_STATUS 0x4
enum {
T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI)
T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
};
/*
* There isn't a Slice to Module Map Table entry for the Mailbox Data
* registers, but it's convenient to use similar names as above. There are 8
* little-endian 64-bit Mailbox Data registers. Note that the "instances"
* value below is in terms of 32-bit words which matches the "word" addressing
* space we use above for the Slice to Module Map Space.
*/
#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
#define T4VF_MBDATA_FIRST 0
#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
#endif /* __T4T4VF_DEFS_H__ */

File diff suppressed because it is too large Load diff