Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,23 @@
#
# Brocade device configuration
#
config NET_VENDOR_BROCADE
bool "Brocade devices"
default y
depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Brocade cards. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_BROCADE
source "drivers/net/ethernet/brocade/bna/Kconfig"
endif # NET_VENDOR_BROCADE

View file

@ -0,0 +1,5 @@
#
# Makefile for the Brocade device drivers.
#
obj-$(CONFIG_BNA) += bna/

View file

@ -0,0 +1,17 @@
#
# Brocade network device configuration
#
config BNA
tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
depends on PCI
---help---
This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
cards.
To compile this driver as a module, choose M here: the module
will be called bna.
For general information and support, go to the Brocade support
website at:
<http://support.brocade.com>

View file

@ -0,0 +1,12 @@
#
# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
# All rights reserved.
#
obj-$(CONFIG_BNA) += bna.o
bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
EXTRA_CFLAGS := -Idrivers/net/bna

View file

@ -0,0 +1,287 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bfa_cee.h"
#include "bfi_cna.h"
#include "bfa_ioc.h"
static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
static void bfa_cee_format_cee_cfg(void *buffer);
static void
bfa_cee_format_cee_cfg(void *buffer)
{
struct bfa_cee_attr *cee_cfg = buffer;
bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
}
static void
bfa_cee_stats_swap(struct bfa_cee_stats *stats)
{
u32 *buffer = (u32 *)stats;
int i;
for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
i++) {
buffer[i] = ntohl(buffer[i]);
}
}
static void
bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
{
lldp_cfg->time_to_live =
ntohs(lldp_cfg->time_to_live);
lldp_cfg->enabled_system_cap =
ntohs(lldp_cfg->enabled_system_cap);
}
/**
* bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
*/
static u32
bfa_cee_attr_meminfo(void)
{
return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
}
/**
* bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
*/
static u32
bfa_cee_stats_meminfo(void)
{
return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
}
/**
* bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
*
* @cee: Pointer to the CEE module
* @status: Return status from the f/w
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
{
cee->get_attr_status = status;
if (status == BFA_STATUS_OK) {
memcpy(cee->attr, cee->attr_dma.kva,
sizeof(struct bfa_cee_attr));
bfa_cee_format_cee_cfg(cee->attr);
}
cee->get_attr_pending = false;
if (cee->cbfn.get_attr_cbfn)
cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
}
/**
* bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
*
* @cee: Pointer to the CEE module
* @status: Return status from the f/w
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
{
cee->get_stats_status = status;
if (status == BFA_STATUS_OK) {
memcpy(cee->stats, cee->stats_dma.kva,
sizeof(struct bfa_cee_stats));
bfa_cee_stats_swap(cee->stats);
}
cee->get_stats_pending = false;
if (cee->cbfn.get_stats_cbfn)
cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
}
/**
* bfa_cee_get_attr_isr()
*
* @brief CEE ISR for reset-stats responses from f/w
*
* @param[in] cee - Pointer to the CEE module
* status - Return status from the f/w
*
* @return void
*/
static void
bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
{
cee->reset_stats_status = status;
cee->reset_stats_pending = false;
if (cee->cbfn.reset_stats_cbfn)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
* bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
*/
u32
bfa_nw_cee_meminfo(void)
{
return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
}
/**
* bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
*
* @cee: CEE module pointer
* @dma_kva: Kernel Virtual Address of CEE DMA Memory
* @dma_pa: Physical Address of CEE DMA Memory
*/
void
bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
{
cee->attr_dma.kva = dma_kva;
cee->attr_dma.pa = dma_pa;
cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
cee->attr = (struct bfa_cee_attr *) dma_kva;
cee->stats = (struct bfa_cee_stats *)
(dma_kva + bfa_cee_attr_meminfo());
}
/**
* bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
* @cee: Pointer to the CEE module data structure.
*
* Return: status
*/
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
struct bfi_cee_get_req *cmd;
BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
if (!bfa_nw_ioc_is_operational(cee->ioc))
return BFA_STATUS_IOC_FAILURE;
if (cee->get_attr_pending)
return BFA_STATUS_DEVBUSY;
cee->get_attr_pending = true;
cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
cee->attr = attr;
cee->cbfn.get_attr_cbfn = cbfn;
cee->cbfn.get_attr_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
bfa_ioc_portid(cee->ioc));
bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
return BFA_STATUS_OK;
}
/**
* bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
*/
static void
bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
{
union bfi_cee_i2h_msg_u *msg;
struct bfi_cee_get_rsp *get_rsp;
struct bfa_cee *cee = (struct bfa_cee *) cbarg;
msg = (union bfi_cee_i2h_msg_u *) m;
get_rsp = (struct bfi_cee_get_rsp *) m;
switch (msg->mh.msg_id) {
case BFI_CEE_I2H_GET_CFG_RSP:
bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
break;
case BFI_CEE_I2H_GET_STATS_RSP:
bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
break;
case BFI_CEE_I2H_RESET_STATS_RSP:
bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
break;
default:
BUG_ON(1);
}
}
/**
* bfa_cee_notify - CEE module heart-beat failure handler.
*
* @event: IOC event type
*/
static void
bfa_cee_notify(void *arg, enum bfa_ioc_event event)
{
struct bfa_cee *cee;
cee = (struct bfa_cee *) arg;
switch (event) {
case BFA_IOC_E_DISABLED:
case BFA_IOC_E_FAILED:
if (cee->get_attr_pending) {
cee->get_attr_status = BFA_STATUS_FAILED;
cee->get_attr_pending = false;
if (cee->cbfn.get_attr_cbfn) {
cee->cbfn.get_attr_cbfn(
cee->cbfn.get_attr_cbarg,
BFA_STATUS_FAILED);
}
}
if (cee->get_stats_pending) {
cee->get_stats_status = BFA_STATUS_FAILED;
cee->get_stats_pending = false;
if (cee->cbfn.get_stats_cbfn) {
cee->cbfn.get_stats_cbfn(
cee->cbfn.get_stats_cbarg,
BFA_STATUS_FAILED);
}
}
if (cee->reset_stats_pending) {
cee->reset_stats_status = BFA_STATUS_FAILED;
cee->reset_stats_pending = false;
if (cee->cbfn.reset_stats_cbfn) {
cee->cbfn.reset_stats_cbfn(
cee->cbfn.reset_stats_cbarg,
BFA_STATUS_FAILED);
}
}
break;
default:
break;
}
}
/**
* bfa_nw_cee_attach - CEE module-attach API
*
* @cee: Pointer to the CEE module data structure
* @ioc: Pointer to the ioc module data structure
* @dev: Pointer to the device driver module data structure.
* The device driver specific mbox ISR functions have
* this pointer as one of the parameters.
*/
void
bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
void *dev)
{
BUG_ON(!(cee != NULL));
cee->dev = dev;
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
bfa_q_qe_init(&cee->ioc_notify);
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}

View file

@ -0,0 +1,65 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_CEE_H__
#define __BFA_CEE_H__
#include "bfa_defs_cna.h"
#include "bfa_ioc.h"
typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
struct bfa_cee_cbfn {
bfa_cee_get_attr_cbfn_t get_attr_cbfn;
void *get_attr_cbarg;
bfa_cee_get_stats_cbfn_t get_stats_cbfn;
void *get_stats_cbarg;
bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
void *reset_stats_cbarg;
};
struct bfa_cee {
void *dev;
bool get_attr_pending;
bool get_stats_pending;
bool reset_stats_pending;
enum bfa_status get_attr_status;
enum bfa_status get_stats_status;
enum bfa_status reset_stats_status;
struct bfa_cee_cbfn cbfn;
struct bfa_ioc_notify ioc_notify;
struct bfa_cee_attr *attr;
struct bfa_cee_stats *stats;
struct bfa_dma attr_dma;
struct bfa_dma stats_dma;
struct bfa_ioc *ioc;
struct bfa_mbox_cmd get_cfg_mb;
struct bfa_mbox_cmd get_stats_mb;
struct bfa_mbox_cmd reset_stats_mb;
};
u32 bfa_nw_cee_meminfo(void);
void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa);
void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
struct bfa_cee_attr *attr,
bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */

View file

@ -0,0 +1,124 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/* BFA common services */
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "cna.h"
/* BFA state machine interfaces */
typedef void (*bfa_sm_t)(void *sm, int event);
/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
int state; /*!< state machine encoding */
char *name; /*!< state name for display */
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
#define bfa_fsm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event); \
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
(_fsm)->fsm = (bfa_fsm_t)(_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
static inline int
bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
{
int i = 0;
while (smt[i].sm && smt[i].sm != sm)
i++;
return smt[i].state;
}
/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
struct bfa_wc {
bfa_wc_resume_t wc_resume;
void *wc_cbarg;
int wc_count;
};
static inline void
bfa_wc_up(struct bfa_wc *wc)
{
wc->wc_count++;
}
static inline void
bfa_wc_down(struct bfa_wc *wc)
{
wc->wc_count--;
if (wc->wc_count == 0)
wc->wc_resume(wc->wc_cbarg);
}
/* Initialize a waiting counter. */
static inline void
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
wc->wc_resume = wc_resume;
wc->wc_cbarg = wc_cbarg;
wc->wc_count = 0;
bfa_wc_up(wc);
}
/* Wait for counter to reach zero */
static inline void
bfa_wc_wait(struct bfa_wc *wc)
{
bfa_wc_down(wc);
}
#endif /* __BFA_CS_H__ */

View file

@ -0,0 +1,295 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_DEFS_H__
#define __BFA_DEFS_H__
#include "cna.h"
#include "bfa_defs_status.h"
#include "bfa_defs_mfg_comm.h"
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/* ---------------------- adapter definitions ------------ */
/* BFA adapter level attributes. */
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
*!< adapter serial num length
*/
BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
};
struct bfa_adapter_attr {
char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
u32 card_type;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
u64 pwwn;
char node_symname[FC_SYMNAME_MAX];
char hw_ver[BFA_VERSION_LEN];
char fw_ver[BFA_VERSION_LEN];
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd vpd;
struct mac mac;
u8 nports;
u8 max_speed;
u8 prototype;
char asic_rev;
u8 pcie_gen;
u8 pcie_lanes_orig;
u8 pcie_lanes;
u8 cna_capable;
u8 is_mezz;
u8 trunk_capable;
};
/* ---------------------- IOC definitions ------------ */
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
/* Driver and firmware versions. */
struct bfa_ioc_driver_attr {
char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
};
/* IOC PCI device attributes */
struct bfa_ioc_pci_attr {
u16 vendor_id; /*!< PCI vendor ID */
u16 device_id; /*!< PCI device ID */
u16 ssid; /*!< subsystem ID */
u16 ssvid; /*!< subsystem vendor ID */
u32 pcifn; /*!< PCI device function */
u32 rsvd; /* padding */
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
};
/* IOC states */
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
/* IOC firmware stats */
struct bfa_fw_ioc_stats {
u32 enable_reqs;
u32 disable_reqs;
u32 get_attr_reqs;
u32 dbg_sync;
u32 dbg_dump;
u32 unknown_reqs;
};
/* IOC driver stats */
struct bfa_ioc_drv_stats {
u32 ioc_isrs;
u32 ioc_enables;
u32 ioc_disables;
u32 ioc_hbfails;
u32 ioc_boots;
u32 stats_tmos;
u32 hb_count;
u32 disable_reqs;
u32 enable_reqs;
u32 disable_replies;
u32 enable_replies;
u32 rsvd;
};
/* IOC statistics */
struct bfa_ioc_stats {
struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
};
enum bfa_ioc_type {
BFA_IOC_TYPE_FC = 1,
BFA_IOC_TYPE_FCoE = 2,
BFA_IOC_TYPE_LL = 3,
};
/* IOC attributes returned in queries */
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
enum bfa_ioc_state state; /*!< IOC state */
struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
struct bfa_ioc_pci_attr pci_attr;
u8 port_id; /*!< port number */
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability */
u8 port_mode_cfg; /*!< enum bfa_mode */
u8 def_fn; /*!< 1 if default fn */
u8 rsvd[3]; /*!< 64bit align */
};
/* Adapter capability mask definition */
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
BFA_CM_NIC = 0x04,
};
/* ---------------------- mfg definitions ------------ */
/* Checksum size */
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
#define BFA_MFG_SUPPLIER_ID_SIZE 10
#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
#pragma pack(1)
/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block {
u8 version; /* manufacturing block version */
u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
u16 mfgsize; /* mfg block size */
u16 u16_chksum; /* old u16 checksum */
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
u8 mfg_day; /* manufacturing day */
u8 mfg_month; /* manufacturing month */
u16 mfg_year; /* manufacturing year */
u64 mfg_wwn; /* wwn base for this adapter */
u8 num_wwn; /* number of wwns assigned */
u8 mfg_speeds; /* speeds allowed for this adapter */
u8 rsv[2];
char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
mac_t mfg_mac; /* base mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 card_type; /* card type */
char cap_nic; /* capability nic */
char cap_cna; /* capability cna */
char cap_hba; /* capability hba */
char cap_fc16g; /* capability fc 16g */
char cap_sriov; /* capability sriov */
char cap_mezz; /* capability mezz */
u8 rsv3;
u8 mfg_nports; /* number of ports */
char media[8]; /* xfi/xaui */
char initial_mode[8]; /* initial mode: hba/cna/nic */
u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
/* ---------------------- pci definitions ------------ */
/*
* PCI device ID information
*/
enum {
BFA_PCI_DEVICE_ID_CT2 = 0x22,
};
#define bfa_asic_id_ct(device) \
((device) == PCI_DEVICE_ID_BROCADE_CT || \
(device) == PCI_DEVICE_ID_BROCADE_CT_FC)
#define bfa_asic_id_ct2(device) \
((device) == BFA_PCI_DEVICE_ID_CT2)
#define bfa_asic_id_ctc(device) \
(bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
/* PCI sub-system device and vendor ID information */
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
BFA_PCI_CT2_SSID_FCoE = 0x22,
BFA_PCI_CT2_SSID_ETH = 0x23,
BFA_PCI_CT2_SSID_FC = 0x24,
};
enum bfa_mode {
BFA_MODE_HBA = 1,
BFA_MODE_CNA = 2,
BFA_MODE_NIC = 3
};
/*
* Flash module specific
*/
#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
#define BFA_TOTAL_FLASH_SIZE 0x400000
#define BFA_FLASH_PART_FWIMG 2
#define BFA_FLASH_PART_MFG 7
/*
* flash partition attributes
*/
struct bfa_flash_part_attr {
u32 part_type; /* partition type */
u32 part_instance; /* partition instance */
u32 part_off; /* partition offset */
u32 part_size; /* partition size */
u32 part_len; /* partition content length */
u32 part_status; /* partition status */
char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
};
/*
* flash attributes
*/
struct bfa_flash_attr {
u32 status; /* flash overall status */
u32 npart; /* num of partitions */
struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
};
#endif /* __BFA_DEFS_H__ */

View file

@ -0,0 +1,220 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_DEFS_CNA_H__
#define __BFA_DEFS_CNA_H__
#include "bfa_defs.h"
/* FC physical port statistics. */
struct bfa_port_fc_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 tx_frames; /*!< Tx frames */
u64 tx_words; /*!< Tx words */
u64 tx_lip; /*!< Tx LIP */
u64 tx_nos; /*!< Tx NOS */
u64 tx_ols; /*!< Tx OLS */
u64 tx_lr; /*!< Tx LR */
u64 tx_lrr; /*!< Tx LRR */
u64 rx_frames; /*!< Rx frames */
u64 rx_words; /*!< Rx words */
u64 lip_count; /*!< Rx LIP */
u64 nos_count; /*!< Rx NOS */
u64 ols_count; /*!< Rx OLS */
u64 lr_count; /*!< Rx LR */
u64 lrr_count; /*!< Rx LRR */
u64 invalid_crcs; /*!< Rx CRC err frames */
u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
u64 undersized_frm; /*!< Rx undersized frames */
u64 oversized_frm; /*!< Rx oversized frames */
u64 bad_eof_frm; /*!< Rx frames with bad EOF */
u64 error_frames; /*!< Errored frames */
u64 dropped_frames; /*!< Dropped frames */
u64 link_failures; /*!< Link Failure (LF) count */
u64 loss_of_syncs; /*!< Loss of sync count */
u64 loss_of_signals; /*!< Loss of signal count */
u64 primseq_errs; /*!< Primitive sequence protocol err. */
u64 bad_os_count; /*!< Invalid ordered sets */
u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
u64 err_enc; /*!< Encoding err frame_8b10b */
u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */
u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */
u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
/* Eth Physical Port statistics. */
struct bfa_port_eth_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 frame_64; /*!< Frames 64 bytes */
u64 frame_65_127; /*!< Frames 65-127 bytes */
u64 frame_128_255; /*!< Frames 128-255 bytes */
u64 frame_256_511; /*!< Frames 256-511 bytes */
u64 frame_512_1023; /*!< Frames 512-1023 bytes */
u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
u64 tx_bytes; /*!< Tx bytes */
u64 tx_packets; /*!< Tx packets */
u64 tx_mcast_packets; /*!< Tx multicast packets */
u64 tx_bcast_packets; /*!< Tx broadcast packets */
u64 tx_control_frame; /*!< Tx control frame */
u64 tx_drop; /*!< Tx drops */
u64 tx_jabber; /*!< Tx jabber */
u64 tx_fcs_error; /*!< Tx FCS errors */
u64 tx_fragments; /*!< Tx fragments */
u64 rx_bytes; /*!< Rx bytes */
u64 rx_packets; /*!< Rx packets */
u64 rx_mcast_packets; /*!< Rx multicast packets */
u64 rx_bcast_packets; /*!< Rx broadcast packets */
u64 rx_control_frames; /*!< Rx control frames */
u64 rx_unknown_opcode; /*!< Rx unknown opcode */
u64 rx_drop; /*!< Rx drops */
u64 rx_jabber; /*!< Rx jabber */
u64 rx_fcs_error; /*!< Rx FCS errors */
u64 rx_alignment_error; /*!< Rx alignment errors */
u64 rx_frame_length_error; /*!< Rx frame len errors */
u64 rx_code_error; /*!< Rx code errors */
u64 rx_fragments; /*!< Rx fragments */
u64 rx_pause; /*!< Rx pause */
u64 rx_zero_pause; /*!< Rx zero pause */
u64 tx_pause; /*!< Tx pause */
u64 tx_zero_pause; /*!< Tx zero pause */
u64 rx_fcoe_pause; /*!< Rx FCoE pause */
u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
u64 tx_fcoe_pause; /*!< Tx FCoE pause */
u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
u64 rx_iscsi_pause; /*!< Rx iSCSI pause */
u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */
u64 tx_iscsi_pause; /*!< Tx iSCSI pause */
u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
/* Port statistics. */
union bfa_port_stats_u {
struct bfa_port_fc_stats fc;
struct bfa_port_eth_stats eth;
};
#pragma pack(1)
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
#define BFA_CEE_DCBX_MAX_PRIORITY (8)
#define BFA_CEE_DCBX_MAX_PGID (8)
#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
/* LLDP string type */
struct bfa_cee_lldp_str {
u8 sub_type;
u8 len;
u8 rsvd[2];
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
};
/* LLDP paramters */
struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str chassis_id;
struct bfa_cee_lldp_str port_id;
struct bfa_cee_lldp_str port_desc;
struct bfa_cee_lldp_str sys_name;
struct bfa_cee_lldp_str sys_desc;
struct bfa_cee_lldp_str mgmt_addr;
u16 time_to_live;
u16 enabled_system_cap;
};
enum bfa_cee_dcbx_version {
DCBX_PROTOCOL_PRECEE = 1,
DCBX_PROTOCOL_CEE = 2,
};
enum bfa_cee_lls {
/* LLS is down because the TLV not sent by the peer */
CEE_LLS_DOWN_NO_TLV = 0,
/* LLS is down as advertised by the peer */
CEE_LLS_DOWN = 1,
CEE_LLS_UP = 2,
};
/* CEE/DCBX parameters */
struct bfa_cee_dcbx_cfg {
u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
u8 pfc_primap; /* bitmap of priorties with PFC enabled */
u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
u8 dcbx_version; /* operating version:CEE or preCEE */
u8 lls_fcoe; /* FCoE Logical Link Status */
u8 lls_lan; /* LAN Logical Link Status */
u8 rsvd[2];
};
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
enum bfa_cee_status {
CEE_UP = 0,
CEE_PHY_UP = 1,
CEE_LOOPBACK = 2,
CEE_PHY_DOWN = 3,
};
/* CEE Query */
struct bfa_cee_attr {
u8 cee_status;
u8 error_reason;
struct bfa_cee_lldp_cfg lldp_remote;
struct bfa_cee_dcbx_cfg dcbx_remote;
mac_t src_mac;
u8 link_speed;
u8 nw_priority;
u8 filler[2];
};
/* LLDP/DCBX/CEE Statistics */
struct bfa_cee_stats {
u32 lldp_tx_frames; /*!< LLDP Tx Frames */
u32 lldp_rx_frames; /*!< LLDP Rx Frames */
u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
u32 cee_status_down; /*!< CEE status down */
u32 cee_status_up; /*!< CEE status up */
u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
};
#pragma pack()
#endif /* __BFA_DEFS_CNA_H__ */

View file

@ -0,0 +1,154 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_DEFS_MFG_COMM_H__
#define __BFA_DEFS_MFG_COMM_H__
#include "bfa_defs.h"
/* Manufacturing block version */
#define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF
/* Manufacturing block encrypted version */
#define BFA_MFG_ENC_VER 2
/* Manufacturing block version 1 length */
#define BFA_MFG_VER1_LEN 128
/* Manufacturing block header length */
#define BFA_MFG_HDR_LEN 4
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
/* Manufacturing card type */
enum {
BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */
BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */
BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */
BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */
BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */
BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
};
#pragma pack(1)
/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
(type) == BFA_MFG_TYPE_ASTRA || \
(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
(type) == BFA_MFG_TYPE_LIGHTNING || \
(type) == BFA_MFG_TYPE_CHINOOK))
enum {
CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
};
#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
do { \
if ((gpio) & CB_GPIO_PROTO) { \
(prop) |= BFI_ADAPTER_PROTO; \
(gpio) &= ~CB_GPIO_PROTO; \
} \
switch ((gpio)) { \
case CB_GPIO_TTV: \
(prop) |= BFI_ADAPTER_TTV; \
case CB_GPIO_DFLY: \
case CB_GPIO_FC8P2: \
(prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
(prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
(card_type) = BFA_MFG_TYPE_FC8P2; \
break; \
case CB_GPIO_FC8P1: \
(prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
(prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
(card_type) = BFA_MFG_TYPE_FC8P1; \
break; \
case CB_GPIO_FC4P2: \
(prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
(prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
(card_type) = BFA_MFG_TYPE_FC4P2; \
break; \
case CB_GPIO_FC4P1: \
(prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
(prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
(card_type) = BFA_MFG_TYPE_FC4P1; \
break; \
default: \
(prop) |= BFI_ADAPTER_UNSUPP; \
(card_type) = BFA_MFG_TYPE_INVALID; \
} \
} while (0)
/* VPD data length */
#define BFA_MFG_VPD_LEN 512
#define BFA_MFG_VPD_LEN_INVALID 0
#define BFA_MFG_VPD_PCI_HDR_OFF 137
#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
/* VPD vendor tag */
enum {
BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
BFA_MFG_VPD_HP = 2, /*!< vendor HP */
BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
/* BFA adapter flash vpd data definition.
*
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_vpd {
u8 version; /*!< vpd data version */
u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
u8 chksum; /*!< u8 checksum */
u8 vendor; /*!< vendor */
u8 len; /*!< vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
};
#pragma pack()
#endif /* __BFA_DEFS_MFG_H__ */

View file

@ -0,0 +1,215 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
/* API status return values
*
* NOTE: The error msgs are auto generated from the comments. Only singe line
* comments are supported
*/
enum bfa_status {
BFA_STATUS_OK = 0,
BFA_STATUS_FAILED = 1,
BFA_STATUS_EINVAL = 2,
BFA_STATUS_ENOMEM = 3,
BFA_STATUS_ENOSYS = 4,
BFA_STATUS_ETIMER = 5,
BFA_STATUS_EPROTOCOL = 6,
BFA_STATUS_ENOFCPORTS = 7,
BFA_STATUS_NOFLASH = 8,
BFA_STATUS_BADFLASH = 9,
BFA_STATUS_SFP_UNSUPP = 10,
BFA_STATUS_UNKNOWN_VFID = 11,
BFA_STATUS_DATACORRUPTED = 12,
BFA_STATUS_DEVBUSY = 13,
BFA_STATUS_ABORTED = 14,
BFA_STATUS_NODEV = 15,
BFA_STATUS_HDMA_FAILED = 16,
BFA_STATUS_FLASH_BAD_LEN = 17,
BFA_STATUS_UNKNOWN_LWWN = 18,
BFA_STATUS_UNKNOWN_RWWN = 19,
BFA_STATUS_FCPT_LS_RJT = 20,
BFA_STATUS_VPORT_EXISTS = 21,
BFA_STATUS_VPORT_MAX = 22,
BFA_STATUS_UNSUPP_SPEED = 23,
BFA_STATUS_INVLD_DFSZ = 24,
BFA_STATUS_CNFG_FAILED = 25,
BFA_STATUS_CMD_NOTSUPP = 26,
BFA_STATUS_NO_ADAPTER = 27,
BFA_STATUS_LINKDOWN = 28,
BFA_STATUS_FABRIC_RJT = 29,
BFA_STATUS_UNKNOWN_VWWN = 30,
BFA_STATUS_NSLOGIN_FAILED = 31,
BFA_STATUS_NO_RPORTS = 32,
BFA_STATUS_NSQUERY_FAILED = 33,
BFA_STATUS_PORT_OFFLINE = 34,
BFA_STATUS_RPORT_OFFLINE = 35,
BFA_STATUS_TGTOPEN_FAILED = 36,
BFA_STATUS_BAD_LUNS = 37,
BFA_STATUS_IO_FAILURE = 38,
BFA_STATUS_NO_FABRIC = 39,
BFA_STATUS_EBADF = 40,
BFA_STATUS_EINTR = 41,
BFA_STATUS_EIO = 42,
BFA_STATUS_ENOTTY = 43,
BFA_STATUS_ENXIO = 44,
BFA_STATUS_EFOPEN = 45,
BFA_STATUS_VPORT_WWN_BP = 46,
BFA_STATUS_PORT_NOT_DISABLED = 47,
BFA_STATUS_BADFRMHDR = 48,
BFA_STATUS_BADFRMSZ = 49,
BFA_STATUS_MISSINGFRM = 50,
BFA_STATUS_LINKTIMEOUT = 51,
BFA_STATUS_NO_FCPIM_NEXUS = 52,
BFA_STATUS_CHECKSUM_FAIL = 53,
BFA_STATUS_GZME_FAILED = 54,
BFA_STATUS_SCSISTART_REQD = 55,
BFA_STATUS_IOC_FAILURE = 56,
BFA_STATUS_INVALID_WWN = 57,
BFA_STATUS_MISMATCH = 58,
BFA_STATUS_IOC_ENABLED = 59,
BFA_STATUS_ADAPTER_ENABLED = 60,
BFA_STATUS_IOC_NON_OP = 61,
BFA_STATUS_ADDR_MAP_FAILURE = 62,
BFA_STATUS_SAME_NAME = 63,
BFA_STATUS_PENDING = 64,
BFA_STATUS_8G_SPD = 65,
BFA_STATUS_4G_SPD = 66,
BFA_STATUS_AD_IS_ENABLE = 67,
BFA_STATUS_EINVAL_TOV = 68,
BFA_STATUS_EINVAL_QDEPTH = 69,
BFA_STATUS_VERSION_FAIL = 70,
BFA_STATUS_DIAG_BUSY = 71,
BFA_STATUS_BEACON_ON = 72,
BFA_STATUS_BEACON_OFF = 73,
BFA_STATUS_LBEACON_ON = 74,
BFA_STATUS_LBEACON_OFF = 75,
BFA_STATUS_PORT_NOT_INITED = 76,
BFA_STATUS_RPSC_ENABLED = 77,
BFA_STATUS_ENOFSAVE = 78,
BFA_STATUS_BAD_FILE = 79,
BFA_STATUS_RLIM_EN = 80,
BFA_STATUS_RLIM_DIS = 81,
BFA_STATUS_IOC_DISABLED = 82,
BFA_STATUS_ADAPTER_DISABLED = 83,
BFA_STATUS_BIOS_DISABLED = 84,
BFA_STATUS_AUTH_ENABLED = 85,
BFA_STATUS_AUTH_DISABLED = 86,
BFA_STATUS_ERROR_TRL_ENABLED = 87,
BFA_STATUS_ERROR_QOS_ENABLED = 88,
BFA_STATUS_NO_SFP_DEV = 89,
BFA_STATUS_MEMTEST_FAILED = 90,
BFA_STATUS_INVALID_DEVID = 91,
BFA_STATUS_QOS_ENABLED = 92,
BFA_STATUS_QOS_DISABLED = 93,
BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
BFA_STATUS_REG_FAIL = 95,
BFA_STATUS_IM_INV_CODE = 96,
BFA_STATUS_IM_INV_VLAN = 97,
BFA_STATUS_IM_INV_ADAPT_NAME = 98,
BFA_STATUS_IM_LOW_RESOURCES = 99,
BFA_STATUS_IM_VLANID_IS_PVID = 100,
BFA_STATUS_IM_VLANID_EXISTS = 101,
BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
BFA_STATUS_PORTLOG_ENABLED = 103,
BFA_STATUS_PORTLOG_DISABLED = 104,
BFA_STATUS_FILE_NOT_FOUND = 105,
BFA_STATUS_QOS_FC_ONLY = 106,
BFA_STATUS_RLIM_FC_ONLY = 107,
BFA_STATUS_CT_SPD = 108,
BFA_STATUS_LEDTEST_OP = 109,
BFA_STATUS_CEE_NOT_DN = 110,
BFA_STATUS_10G_SPD = 111,
BFA_STATUS_IM_INV_TEAM_NAME = 112,
BFA_STATUS_IM_DUP_TEAM_NAME = 113,
BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
BFA_STATUS_IM_PVID_MISMATCH = 116,
BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
BFA_STATUS_IM_MTU_MISMATCH = 118,
BFA_STATUS_IM_RSS_MISMATCH = 119,
BFA_STATUS_IM_HDS_MISMATCH = 120,
BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
BFA_STATUS_IM_PORT_PARAMS = 122,
BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
BFA_STATUS_IM_CANNOT_REM_PRI = 124,
BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
BFA_STATUS_IM_LAST_PORT_DELETE = 126,
BFA_STATUS_IM_NO_DRIVER = 127,
BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
BFA_STATUS_NO_MINPORT_DRIVER = 130,
BFA_STATUS_CARD_TYPE_MISMATCH = 131,
BFA_STATUS_BAD_ASICBLK = 132,
BFA_STATUS_NO_DRIVER = 133,
BFA_STATUS_INVALID_MAC = 134,
BFA_STATUS_IM_NO_VLAN = 135,
BFA_STATUS_IM_ETH_LB_FAILED = 136,
BFA_STATUS_IM_PVID_REMOVE = 137,
BFA_STATUS_IM_PVID_EDIT = 138,
BFA_STATUS_CNA_NO_BOOT = 139,
BFA_STATUS_IM_PVID_NON_ZERO = 140,
BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
BFA_STATUS_IM_NOT_BOUND = 143,
BFA_STATUS_INSUFFICIENT_PERMS = 144,
BFA_STATUS_IM_INV_VLAN_NAME = 145,
BFA_STATUS_CMD_NOTSUPP_CNA = 146,
BFA_STATUS_IM_PASSTHRU_EDIT = 147,
BFA_STATUS_IM_BIND_FAILED = 148,
BFA_STATUS_IM_UNBIND_FAILED = 149,
BFA_STATUS_IM_PORT_IN_TEAM = 150,
BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
BFA_STATUS_PBC = 154,
BFA_STATUS_DEVID_MISSING = 155,
BFA_STATUS_BAD_FWCFG = 156,
BFA_STATUS_CREATE_FILE = 157,
BFA_STATUS_INVALID_VENDOR = 158,
BFA_STATUS_SFP_NOT_READY = 159,
BFA_STATUS_FLASH_UNINIT = 160,
BFA_STATUS_FLASH_EMPTY = 161,
BFA_STATUS_FLASH_CKFAIL = 162,
BFA_STATUS_TRUNK_UNSUPP = 163,
BFA_STATUS_TRUNK_ENABLED = 164,
BFA_STATUS_TRUNK_DISABLED = 165,
BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
BFA_STATUS_BOOT_CODE_UPDATED = 167,
BFA_STATUS_BOOT_VERSION = 168,
BFA_STATUS_CARDTYPE_MISSING = 169,
BFA_STATUS_INVALID_CARDTYPE = 170,
BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
BFA_STATUS_ETHBOOT_ENABLED = 173,
BFA_STATUS_ETHBOOT_DISABLED = 174,
BFA_STATUS_IOPROFILE_OFF = 175,
BFA_STATUS_NO_PORT_INSTANCE = 176,
BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
BFA_STATUS_NO_VPORT_LOCK = 178,
BFA_STATUS_VPORT_NO_CNFG = 179,
BFA_STATUS_MAX_VAL
};
enum bfa_eproto_status {
BFA_EPROTO_BAD_ACCEPT = 0,
BFA_EPROTO_UNKNOWN_RSP = 1
};
#endif /* __BFA_DEFS_STATUS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,368 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_IOC_H__
#define __BFA_IOC_H__
#include "bfa_cs.h"
#include "bfi.h"
#include "cna.h"
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_POLL_TOV 200 /* msecs */
#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
BFI_IOC_TRC_HDR_SZ)
/* PCI device information required by IOC */
struct bfa_pcidev {
int pci_slot;
u8 pci_func;
u16 device_id;
u16 ssid;
void __iomem *pci_bar_kva;
};
/* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
struct bfa_dma {
void *kva; /* ! Kernel virtual address */
u64 pa; /* ! Physical address */
};
#define BFA_DMA_ALIGN_SZ 256
/* smem size for Crossbow and Catapult */
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
/* BFA dma address assignment macro. (big endian format) */
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
{
dma_addr->a32.addr_lo = (u32) htonl(pa);
dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
}
#define bfa_alen_set(__alen, __len, __pa) \
__bfa_alen_set(__alen, __len, (u64)__pa)
static inline void
__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
{
alen->al_len = cpu_to_be32(len);
bfa_dma_be_addr_set(alen->al_addr, pa);
}
struct bfa_ioc_regs {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
void __iomem *lpu_mbox_cmd;
void __iomem *lpu_mbox;
void __iomem *lpu_read_stat;
void __iomem *pss_ctl_reg;
void __iomem *pss_err_status_reg;
void __iomem *app_pll_fast_ctl_reg;
void __iomem *app_pll_slow_ctl_reg;
void __iomem *ioc_sem_reg;
void __iomem *ioc_usage_sem_reg;
void __iomem *ioc_init_sem_reg;
void __iomem *ioc_usage_reg;
void __iomem *host_page_num_fn;
void __iomem *heartbeat;
void __iomem *ioc_fwstate;
void __iomem *alt_ioc_fwstate;
void __iomem *ll_halt;
void __iomem *alt_ll_halt;
void __iomem *err_set;
void __iomem *ioc_fail_sync;
void __iomem *shirq_isr_next;
void __iomem *shirq_msk_next;
void __iomem *smem_page_start;
u32 smem_pg0;
};
/* IOC Mailbox structures */
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
bfa_mbox_cmd_cbfn_t cbfn;
void *cbarg;
u32 msg[BFI_IOC_MSGSZ];
};
/* IOC mailbox module */
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
struct bfa_ioc_mbox_mod {
struct list_head cmd_q; /*!< pending mbox queue */
int nmclass; /*!< number of handlers */
struct {
bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
void *cbarg;
} mbhdlr[BFI_MC_MAX];
};
/* IOC callback function interfaces */
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
struct bfa_ioc_cbfn {
bfa_ioc_enable_cbfn_t enable_cbfn;
bfa_ioc_disable_cbfn_t disable_cbfn;
bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
bfa_ioc_reset_cbfn_t reset_cbfn;
};
/* IOC event notification mechanism. */
enum bfa_ioc_event {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
BFA_IOC_E_FAILED = 3,
};
typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
struct bfa_ioc_notify {
struct list_head qe;
bfa_ioc_notify_cbfn_t cbfn;
void *cbarg;
};
/* Initialize a IOC event notification structure */
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
} while (0)
struct bfa_iocpf {
bfa_fsm_t fsm;
struct bfa_ioc *ioc;
bool fw_mismatch_notified;
bool auto_recover;
u32 poll_time;
};
struct bfa_ioc {
bfa_fsm_t fsm;
struct bfa *bfa;
struct bfa_pcidev pcidev;
struct timer_list ioc_timer;
struct timer_list iocpf_timer;
struct timer_list sem_timer;
struct timer_list hb_timer;
u32 hb_count;
struct list_head notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
bool dbg_fwsave_once;
enum bfi_pcifn_class clscode;
struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
bool fcmode;
bool pllinit;
bool stats_busy; /*!< outstanding stats */
u8 port_id;
struct bfa_dma attr_dma;
struct bfi_ioc_attr *attr;
struct bfa_ioc_cbfn *cbfn;
struct bfa_ioc_mbox_mod mbox_mod;
const struct bfa_ioc_hwif *ioc_hwif;
struct bfa_iocpf iocpf;
enum bfi_asic_gen asic_gen;
enum bfi_asic_mode asic_mode;
enum bfi_port_mode port0_mode;
enum bfi_port_mode port1_mode;
enum bfa_mode port_mode;
u8 ad_cap_bm; /*!< adapter cap bit mask */
u8 port_mode_cfg; /*!< config port mode */
};
struct bfa_ioc_hwif {
enum bfa_status (*ioc_pll_init) (void __iomem *rb,
enum bfi_asic_mode m);
bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
void (*ioc_reg_init) (struct bfa_ioc *ioc);
void (*ioc_map_port) (struct bfa_ioc *ioc);
void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
void (*ioc_set_fwstate) (struct bfa_ioc *ioc,
enum bfi_ioc_state fwstate);
enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc,
enum bfi_ioc_state fwstate);
enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_is_default(__ioc) \
(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
#define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \
memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
((_ioc)->stats.hb_count = (_hb_count))
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
/* IOC mailbox interface */
bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
struct bfa_mbox_cmd *cmd,
bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
/* IOC interfaces */
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
} while (0)
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
} while (0)
void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc);
void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc);
void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
struct bfa_ioc_cbfn *cbfn);
void bfa_nw_ioc_auto_recover(bool auto_recover);
void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
enum bfi_pcifn_class clscode);
u32 bfa_nw_ioc_meminfo(void);
void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
*/
void bfa_nw_ioc_timeout(void *ioc);
void bfa_nw_ioc_hb_check(void *ioc);
void bfa_nw_iocpf_timeout(void *ioc);
void bfa_nw_iocpf_sem_timeout(void *ioc);
/*
* F/W Image Size & Chunk
*/
u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
/*
* Flash module specific
*/
typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
struct bfa_flash {
struct bfa_ioc *ioc; /* back pointer to ioc */
u32 type; /* partition type */
u8 instance; /* partition instance */
u8 rsv[3];
u32 op_busy; /* operation busy flag */
u32 residue; /* residual length */
u32 offset; /* offset */
enum bfa_status status; /* status */
u8 *dbuf_kva; /* dma buf virtual address */
u64 dbuf_pa; /* dma buf physical address */
bfa_cb_flash cbfn; /* user callback function */
void *cbarg; /* user callback arg */
u8 *ubuf; /* user supplied buffer */
u32 addr_off; /* partition address offset */
struct bfa_mbox_cmd mb; /* mailbox */
struct bfa_ioc_notify ioc_notify; /* ioc event notify */
};
enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
struct bfa_flash_attr *attr,
bfa_cb_flash cbfn, void *cbarg);
enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
u32 type, u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash cbfn, void *cbarg);
enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
u32 type, u8 instance, void *buf, u32 len, u32 offset,
bfa_cb_flash cbfn, void *cbarg);
u32 bfa_nw_flash_meminfo(void);
void bfa_nw_flash_attach(struct bfa_flash *flash,
struct bfa_ioc *ioc, void *dev);
void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
#endif /* __BFA_IOC_H__ */

View file

@ -0,0 +1,944 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bfa_ioc.h"
#include "cna.h"
#include "bfi.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
#define bfa_ioc_ct_sync_pos(__ioc) \
((u32) (1 << bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
/*
* forward declarations
*/
static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
static void bfa_ioc_ct_set_cur_ioc_fwstate(
struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
static void bfa_ioc_ct_set_alt_ioc_fwstate(
struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_pll_init = bfa_ioc_ct_pll_init,
.ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
.ioc_reg_init = bfa_ioc_ct_reg_init,
.ioc_map_port = bfa_ioc_ct_map_port,
.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
.ioc_notify_fail = bfa_ioc_ct_notify_fail,
.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
.ioc_sync_start = bfa_ioc_ct_sync_start,
.ioc_sync_join = bfa_ioc_ct_sync_join,
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
.ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
.ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
.ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
.ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_pll_init = bfa_ioc_ct2_pll_init,
.ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
.ioc_reg_init = bfa_ioc_ct2_reg_init,
.ioc_map_port = bfa_ioc_ct2_map_port,
.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
.ioc_isr_mode_set = NULL,
.ioc_notify_fail = bfa_ioc_ct_notify_fail,
.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
.ioc_sync_start = bfa_ioc_ct_sync_start,
.ioc_sync_join = bfa_ioc_ct_sync_join,
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
.ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
.ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
.ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
.ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
};
/* Called from bfa_ioc_attach() to map asic specific calls. */
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
ioc->ioc_hwif = &nw_hwif_ct;
}
void
bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
{
ioc->ioc_hwif = &nw_hwif_ct2;
}
/* Return true if firmware of current driver matches the running firmware. */
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
enum bfi_ioc_state ioc_fwstate;
u32 usecnt;
struct bfi_ioc_image_hdr fwhdr;
/**
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return true;
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
/**
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_fail_sync);
return true;
}
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
/**
* Use count cannot be non-zero and chip in uninitialized state.
*/
BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
/**
* Check if another driver with a different firmware is active
*/
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return false;
}
/**
* Same firmware version. Increment the reference count.
*/
usecnt++;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return true;
}
static void
bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
{
u32 usecnt;
/**
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
/**
* decrement usage count
*/
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
BUG_ON(!(usecnt > 0));
usecnt--;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
/* Notify other functions on HB failure. */
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt);
}
/* Host to LPU mailbox message addresses */
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
u32 hfn_pgn;
} ct_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
/* Host <-> LPU mailbox command/status registers - port 0 */
static const struct {
u32 hfn;
u32 lpu;
} ct_p0reg[] = {
{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
/* Host <-> LPU mailbox command/status registers - port 1 */
static const struct {
u32 hfn;
u32 lpu;
} ct_p1reg[] = {
{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
};
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
u32 hfn_pgn;
u32 hfn;
u32 lpu;
u32 lpu_read;
} ct2_reg[] = {
{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU0_READ_STAT},
{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU1_READ_STAT},
};
static void
bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
{
void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
/**
* sram memory access
*/
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
static void
bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
{
void __iomem *rb;
int port = bfa_ioc_portid(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
if (port == 0) {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
/**
* sram memory access
*/
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
/* Initialize IOC to port mapping. */
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
/**
* For catapult, base port id on personality register and IOC type
*/
r32 = readl(rb + FNC_PERS_REG);
r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
}
static void
bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
/* Set interrupt mode for a function: INTX or MSIX */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32, mode;
r32 = readl(rb + FNC_PERS_REG);
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
/**
* If already in desired mode, do not change anything
*/
if ((!msix && mode) || (msix && !mode))
return;
if (msix)
mode = __F0_INTX_STATUS_MSIX;
else
mode = __F0_INTX_STATUS_INTA;
r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
writel(r32, rb + FNC_PERS_REG);
}
static bool
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_read_stat);
if (r32) {
writel(1, ioc->ioc_regs.lpu_read_stat);
return true;
}
return false;
}
/* MSI-X resource allocation for 1860 with no asic block */
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
#define __MSIX_VT_NUMVT__MK 0x003ff800
#define __MSIX_VT_NUMVT__SH 11
#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
#define __MSIX_VT_OFST_ 0x000007ff
void
bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
if (r32 & __MSIX_VT_NUMVT__MK) {
writel(r32 & __MSIX_VT_OFST_,
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
return;
}
writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_OFST_NUMVT);
writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
/* Cleanup hw semaphore and usecnt registers */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
/*
* Read the hw sem reg to make sure that it is locked
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
readl(ioc->ioc_regs.ioc_sem_reg);
bfa_nw_ioc_hw_sem_release(ioc);
}
/* Synchronized IOC failure processing routines */
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
/*
* Driver load time. If the sync required bit for this PCI fn
* is set, it is due to an unclean exit by the driver for this
* PCI fn in the previous incarnation. Whoever comes here first
* should clean it up, no matter which PCI fn.
*/
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
writel(0, ioc->ioc_regs.ioc_fail_sync);
writel(1, ioc->ioc_regs.ioc_usage_reg);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
return bfa_ioc_ct_sync_complete(ioc);
}
/* Synchronized IOC failure processing routines */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
bfa_ioc_ct_sync_pos(ioc);
writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
}
static bool
bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
u32 tmp_ackd;
if (sync_ackd == 0)
return true;
/**
* The check below is to see whether any other PCI fn
* has reinitialized the ASIC (reset sync_ackd bits)
* and failed again while this IOC was waiting for hw
* semaphore (in bfa_iocpf_sm_semwait()).
*/
tmp_ackd = sync_ackd;
if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
if (sync_reqd == sync_ackd) {
writel(bfa_ioc_ct_clear_sync_ackd(r32),
ioc->ioc_regs.ioc_fail_sync);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
/**
* If another PCI fn reinitialized and failed again while
* this IOC was waiting for hw sem, the sync_ackd bit for
* this IOC need to be set again to allow reinitialization.
*/
if (tmp_ackd != sync_ackd)
writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
return false;
}
static void
bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
enum bfi_ioc_state fwstate)
{
writel(fwstate, ioc->ioc_regs.ioc_fwstate);
}
static enum bfi_ioc_state
bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
{
return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
}
static void
bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
enum bfi_ioc_state fwstate)
{
writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
}
static enum bfi_ioc_state
bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
{
return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
}
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
u32 pll_sclk, pll_fclk, r32;
bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
__APP_PLL_SCLK_JITLMT0_1(3U) |
__APP_PLL_SCLK_CNTLMT0_1(1U);
pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
__APP_PLL_LCLK_JITLMT0_1(3U) |
__APP_PLL_LCLK_CNTLMT0_1(1U);
if (fcmode) {
writel(0, (rb + OP_MODE));
writel(__APP_EMS_CMLCKSEL |
__APP_EMS_REFCKBUFEN2 |
__APP_EMS_CHANNEL_SEL,
(rb + ETH_MAC_SER_REG));
} else {
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
writel(__APP_EMS_REFCKBUFEN1,
(rb + ETH_MAC_SER_REG));
}
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(pll_sclk |
__APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_LCLK_CTL_REG);
writel(pll_sclk |
__APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
rb + APP_PLL_LCLK_CTL_REG);
readl(rb + HOSTFN0_INT_MSK);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(pll_sclk |
__APP_PLL_SCLK_ENABLE,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_ENABLE,
rb + APP_PLL_LCLK_CTL_REG);
if (!fcmode) {
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
if (!fcmode) {
writel(0, (rb + PMM_1T_RESET_REG_P0));
writel(0, (rb + PMM_1T_RESET_REG_P1));
}
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
udelay(1000);
r32 = readl((rb + MBIST_STAT_REG));
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
static void
bfa_ioc_ct2_sclk_init(void __iomem *rb)
{
u32 r32;
/*
* put s_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
__APP_PLL_SCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* Ignore mode and program for the max clock (which is FC16)
* Firmware/NFC will do the PLL init appropiately
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* while doing PLL init dont clock gate ethernet subsystem
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel((r32 | __ETH_CLK_ENABLE_PORT0),
(rb + CT2_CHIP_MISC_PRG));
r32 = readl((rb + CT2_PCIE_MISC_REG));
writel((r32 | __ETH_CLK_ENABLE_PORT1),
(rb + CT2_PCIE_MISC_REG));
/*
* set sclk value
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
__APP_PLL_SCLK_CLK_DIV2);
writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
/*
* Dont do clock gating for ethernet subsystem, firmware/NFC will
* do this appropriately
*/
}
static void
bfa_ioc_ct2_lclk_init(void __iomem *rb)
{
u32 r32;
/*
* put l_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
__APP_PLL_LCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set LPU speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel(r32, (rb + CT2_CHIP_MISC_PRG));
/*
* set LPU half speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set lclk for mode (set for FC16)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
r32 |= 0x20c1731b;
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
}
static void
bfa_ioc_ct2_mem_init(void __iomem *rb)
{
u32 r32;
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
udelay(1000);
writel(0, (rb + CT2_MBIST_CTL_REG));
}
static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
volatile u32 r32;
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
(rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
(rb + CT2_APP_PLL_LCLK_CTL_REG));
/* put port0, port1 MAC & AHB in reset */
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
(rb + CT2_CSI_MAC_CONTROL_REG(0)));
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
(rb + CT2_CSI_MAC_CONTROL_REG(1)));
}
#define CT2_NFC_MAX_DELAY 1000
#define CT2_NFC_VER_VALID 0x143
#define BFA_IOC_PLL_POLL 1000000
static bool
bfa_ioc_ct2_nfc_halted(void __iomem *rb)
{
volatile u32 r32;
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (r32 & __NFC_CONTROLLER_HALTED)
return true;
return false;
}
static void
bfa_ioc_ct2_nfc_resume(void __iomem *rb)
{
volatile u32 r32;
int i;
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (!(r32 & __NFC_CONTROLLER_HALTED))
return;
udelay(1000);
}
BUG_ON(1);
}
static enum bfa_status
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
volatile u32 wgn, r32;
u32 nfc_ver, i;
wgn = readl(rb + CT2_WGN_STATUS);
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
(nfc_ver >= CT2_NFC_VER_VALID)) {
if (bfa_ioc_ct2_nfc_halted(rb))
bfa_ioc_ct2_nfc_resume(rb);
writel(__RESET_AND_START_SCLK_LCLK_PLLS,
rb + CT2_CSI_FW_CTL_SET_REG);
for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
break;
}
BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
break;
}
BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
udelay(1000);
r32 = readl(rb + CT2_CSI_FW_CTL_REG);
BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
} else {
writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (r32 & __NFC_CONTROLLER_HALTED)
break;
udelay(1000);
}
bfa_ioc_ct2_mac_reset(rb);
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/* release soft reset on s_clk & l_clk */
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_SCLK_CTL_REG);
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + CT2_APP_PLL_LCLK_CTL_REG);
}
/* Announce flash device presence, if flash was corrupted. */
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
r32 = readl((rb + PSS_GPIO_OUT_REG));
writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
r32 = readl((rb + PSS_GPIO_OE_REG));
writel(r32 | 1, rb + PSS_GPIO_OE_REG);
}
/*
* Mask the interrupts and clear any
* pending interrupts left by BIOS/EFI
*/
writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
/* For first time initialization, no need to clear interrupts */
r32 = readl(rb + HOST_SEM5_REG);
if (r32 & 0x1) {
r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
if (r32 == 1) {
writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
}
r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
if (r32 == 1) {
writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
}
}
bfa_ioc_ct2_mem_init(rb);
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
return BFA_STATUS_OK;
}

View file

@ -0,0 +1,667 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/* MSGQ module source file. */
#include "bfi.h"
#include "bfa_msgq.h"
#include "bfa_ioc.h"
#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
{ \
bfa_msgq_cmdcbfn_t cbfn; \
void *cbarg; \
cbfn = (_cmdq_ent)->cbfn; \
cbarg = (_cmdq_ent)->cbarg; \
(_cmdq_ent)->cbfn = NULL; \
(_cmdq_ent)->cbarg = NULL; \
if (cbfn) { \
cbfn(cbarg, (_status)); \
} \
}
static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
enum cmdq_event {
CMDQ_E_START = 1,
CMDQ_E_STOP = 2,
CMDQ_E_FAIL = 3,
CMDQ_E_POST = 4,
CMDQ_E_INIT_RESP = 5,
CMDQ_E_DB_READY = 6,
};
bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
enum cmdq_event);
static void
cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
{
struct bfa_msgq_cmd_entry *cmdq_ent;
cmdq->producer_index = 0;
cmdq->consumer_index = 0;
cmdq->flags = 0;
cmdq->token = 0;
cmdq->offset = 0;
cmdq->bytes_to_copy = 0;
while (!list_empty(&cmdq->pending_q)) {
bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
bfa_q_qe_init(&cmdq_ent->qe);
call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
}
}
static void
cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
{
switch (event) {
case CMDQ_E_START:
bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
break;
case CMDQ_E_STOP:
case CMDQ_E_FAIL:
/* No-op */
break;
case CMDQ_E_POST:
cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
break;
default:
bfa_sm_fault(event);
}
}
static void
cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
{
bfa_wc_down(&cmdq->msgq->init_wc);
}
static void
cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
{
switch (event) {
case CMDQ_E_STOP:
case CMDQ_E_FAIL:
bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
break;
case CMDQ_E_POST:
cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
break;
case CMDQ_E_INIT_RESP:
if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
} else
bfa_fsm_set_state(cmdq, cmdq_sm_ready);
break;
default:
bfa_sm_fault(event);
}
}
static void
cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
{
}
static void
cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
{
switch (event) {
case CMDQ_E_STOP:
case CMDQ_E_FAIL:
bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
break;
case CMDQ_E_POST:
bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
{
bfa_msgq_cmdq_dbell(cmdq);
}
static void
cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
{
switch (event) {
case CMDQ_E_STOP:
case CMDQ_E_FAIL:
bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
break;
case CMDQ_E_POST:
cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
break;
case CMDQ_E_DB_READY:
if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
} else
bfa_fsm_set_state(cmdq, cmdq_sm_ready);
break;
default:
bfa_sm_fault(event);
}
}
static void
bfa_msgq_cmdq_dbell_ready(void *arg)
{
struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
}
static void
bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
{
struct bfi_msgq_h2i_db *dbell =
(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
dbell->mh.mtag.i2htok = 0;
dbell->idx.cmdq_pi = htons(cmdq->producer_index);
if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
bfa_msgq_cmdq_dbell_ready, cmdq)) {
bfa_msgq_cmdq_dbell_ready(cmdq);
}
}
static void
__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
{
size_t len = cmd->msg_size;
int num_entries = 0;
size_t to_copy;
u8 *src, *dst;
src = (u8 *)cmd->msg_hdr;
dst = (u8 *)cmdq->addr.kva;
dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
while (len) {
to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
len : BFI_MSGQ_CMD_ENTRY_SIZE;
memcpy(dst, src, to_copy);
len -= to_copy;
src += BFI_MSGQ_CMD_ENTRY_SIZE;
BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
dst = (u8 *)cmdq->addr.kva;
dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
num_entries++;
}
}
static void
bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
{
struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
struct bfa_msgq_cmd_entry *cmd;
int posted = 0;
cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
/* Walk through pending list to see if the command can be posted */
while (!list_empty(&cmdq->pending_q)) {
cmd =
(struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(cmdq)) {
list_del(&cmd->qe);
__cmd_copy(cmdq, cmd);
posted = 1;
call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
} else {
break;
}
}
if (posted)
bfa_fsm_send_event(cmdq, CMDQ_E_POST);
}
static void
bfa_msgq_cmdq_copy_next(void *arg)
{
struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
if (cmdq->bytes_to_copy)
bfa_msgq_cmdq_copy_rsp(cmdq);
}
static void
bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
{
struct bfi_msgq_i2h_cmdq_copy_req *req =
(struct bfi_msgq_i2h_cmdq_copy_req *)mb;
cmdq->token = 0;
cmdq->offset = ntohs(req->offset);
cmdq->bytes_to_copy = ntohs(req->len);
bfa_msgq_cmdq_copy_rsp(cmdq);
}
static void
bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
{
struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
int copied;
u8 *addr = (u8 *)cmdq->addr.kva;
memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
rsp->mh.mtag.i2htok = htons(cmdq->token);
copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
cmdq->bytes_to_copy;
addr += cmdq->offset;
memcpy(rsp->data, addr, copied);
cmdq->token++;
cmdq->offset += copied;
cmdq->bytes_to_copy -= copied;
if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
bfa_msgq_cmdq_copy_next, cmdq)) {
bfa_msgq_cmdq_copy_next(cmdq);
}
}
static void
bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
{
cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
INIT_LIST_HEAD(&cmdq->pending_q);
cmdq->msgq = msgq;
bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
}
static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
enum rspq_event {
RSPQ_E_START = 1,
RSPQ_E_STOP = 2,
RSPQ_E_FAIL = 3,
RSPQ_E_RESP = 4,
RSPQ_E_INIT_RESP = 5,
RSPQ_E_DB_READY = 6,
};
bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
enum rspq_event);
bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
enum rspq_event);
static void
rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
{
rspq->producer_index = 0;
rspq->consumer_index = 0;
rspq->flags = 0;
}
static void
rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
{
switch (event) {
case RSPQ_E_START:
bfa_fsm_set_state(rspq, rspq_sm_init_wait);
break;
case RSPQ_E_STOP:
case RSPQ_E_FAIL:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
{
bfa_wc_down(&rspq->msgq->init_wc);
}
static void
rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
{
switch (event) {
case RSPQ_E_FAIL:
case RSPQ_E_STOP:
bfa_fsm_set_state(rspq, rspq_sm_stopped);
break;
case RSPQ_E_INIT_RESP:
bfa_fsm_set_state(rspq, rspq_sm_ready);
break;
default:
bfa_sm_fault(event);
}
}
static void
rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
{
}
static void
rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
{
switch (event) {
case RSPQ_E_STOP:
case RSPQ_E_FAIL:
bfa_fsm_set_state(rspq, rspq_sm_stopped);
break;
case RSPQ_E_RESP:
bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
{
if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
bfa_msgq_rspq_dbell(rspq);
}
static void
rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
{
switch (event) {
case RSPQ_E_STOP:
case RSPQ_E_FAIL:
bfa_fsm_set_state(rspq, rspq_sm_stopped);
break;
case RSPQ_E_RESP:
rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
break;
case RSPQ_E_DB_READY:
if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
} else
bfa_fsm_set_state(rspq, rspq_sm_ready);
break;
default:
bfa_sm_fault(event);
}
}
static void
bfa_msgq_rspq_dbell_ready(void *arg)
{
struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
}
static void
bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
{
struct bfi_msgq_h2i_db *dbell =
(struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
dbell->mh.mtag.i2htok = 0;
dbell->idx.rspq_ci = htons(rspq->consumer_index);
if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
bfa_msgq_rspq_dbell_ready, rspq)) {
bfa_msgq_rspq_dbell_ready(rspq);
}
}
static void
bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
{
struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
struct bfi_msgq_mhdr *msghdr;
int num_entries;
int mc;
u8 *rspq_qe;
rspq->producer_index = ntohs(dbell->idx.rspq_pi);
while (rspq->consumer_index != rspq->producer_index) {
rspq_qe = (u8 *)rspq->addr.kva;
rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
mc = msghdr->msg_class;
num_entries = ntohs(msghdr->num_entries);
if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
break;
(rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
rspq->depth);
}
bfa_fsm_send_event(rspq, RSPQ_E_RESP);
}
static void
bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
{
rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
rspq->msgq = msgq;
bfa_fsm_set_state(rspq, rspq_sm_stopped);
}
static void
bfa_msgq_init_rsp(struct bfa_msgq *msgq,
struct bfi_mbmsg *mb)
{
bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
}
static void
bfa_msgq_init(void *arg)
{
struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
struct bfi_msgq_cfg_req *msgq_cfg =
(struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
msgq_cfg->mh.mtag.i2htok = 0;
bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
}
static void
bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
{
struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
switch (msg->mh.msg_id) {
case BFI_MSGQ_I2H_INIT_RSP:
bfa_msgq_init_rsp(msgq, msg);
break;
case BFI_MSGQ_I2H_DOORBELL_PI:
bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
break;
case BFI_MSGQ_I2H_DOORBELL_CI:
bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
break;
case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
break;
default:
BUG_ON(1);
}
}
static void
bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
{
struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
switch (event) {
case BFA_IOC_E_ENABLED:
bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
bfa_wc_up(&msgq->init_wc);
bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
bfa_wc_up(&msgq->init_wc);
bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
bfa_wc_wait(&msgq->init_wc);
break;
case BFA_IOC_E_DISABLED:
bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
break;
case BFA_IOC_E_FAILED:
bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
break;
default:
break;
}
}
u32
bfa_msgq_meminfo(void)
{
return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
}
void
bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
{
msgq->cmdq.addr.kva = kva;
msgq->cmdq.addr.pa = pa;
kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
msgq->rspq.addr.kva = kva;
msgq->rspq.addr.pa = pa;
}
void
bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
{
msgq->ioc = ioc;
bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
bfa_msgq_rspq_attach(&msgq->rspq, msgq);
bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
bfa_q_qe_init(&msgq->ioc_notify);
bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
}
void
bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
bfa_msgq_mcfunc_t cbfn, void *cbarg)
{
msgq->rspq.rsphdlr[mc].cbfn = cbfn;
msgq->rspq.rsphdlr[mc].cbarg = cbarg;
}
void
bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
{
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
__cmd_copy(&msgq->cmdq, cmd);
call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
} else {
list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
}
}
void
bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
{
struct bfa_msgq_rspq *rspq = &msgq->rspq;
size_t len = buf_len;
size_t to_copy;
int ci;
u8 *src, *dst;
ci = rspq->consumer_index;
src = (u8 *)rspq->addr.kva;
src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
dst = buf;
while (len) {
to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
len : BFI_MSGQ_RSP_ENTRY_SIZE;
memcpy(dst, src, to_copy);
len -= to_copy;
dst += BFI_MSGQ_RSP_ENTRY_SIZE;
BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
src = (u8 *)rspq->addr.kva;
src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
}
}

View file

@ -0,0 +1,130 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFA_MSGQ_H__
#define __BFA_MSGQ_H__
#include "bfa_defs.h"
#include "bfi.h"
#include "bfa_ioc.h"
#include "bfa_cs.h"
#define BFA_MSGQ_FREE_CNT(_q) \
(((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1))
#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \
((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1)))
#define BFA_MSGQ_CMDQ_NUM_ENTRY 128
#define BFA_MSGQ_CMDQ_SIZE \
(BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY)
#define BFA_MSGQ_RSPQ_NUM_ENTRY 128
#define BFA_MSGQ_RSPQ_SIZE \
(BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY)
#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \
do { \
(_cmd)->cbfn = (_cbfn); \
(_cmd)->cbarg = (_cbarg); \
(_cmd)->msg_size = (_msg_size); \
(_cmd)->msg_hdr = (_msg_hdr); \
} while (0)
struct bfa_msgq;
typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status);
struct bfa_msgq_cmd_entry {
struct list_head qe;
bfa_msgq_cmdcbfn_t cbfn;
void *cbarg;
size_t msg_size;
struct bfi_msgq_mhdr *msg_hdr;
};
enum bfa_msgq_cmdq_flags {
BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
};
struct bfa_msgq_cmdq {
bfa_fsm_t fsm;
enum bfa_msgq_cmdq_flags flags;
u16 producer_index;
u16 consumer_index;
u16 depth; /* FW Q depth is 16 bits */
struct bfa_dma addr;
struct bfa_mbox_cmd dbell_mb;
u16 token;
int offset;
int bytes_to_copy;
struct bfa_mbox_cmd copy_mb;
struct list_head pending_q; /* pending command queue */
struct bfa_msgq *msgq;
};
enum bfa_msgq_rspq_flags {
BFA_MSGQ_RSPQ_F_DB_UPDATE = 1,
};
typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
struct bfa_msgq_rspq {
bfa_fsm_t fsm;
enum bfa_msgq_rspq_flags flags;
u16 producer_index;
u16 consumer_index;
u16 depth; /* FW Q depth is 16 bits */
struct bfa_dma addr;
struct bfa_mbox_cmd dbell_mb;
int nmclass;
struct {
bfa_msgq_mcfunc_t cbfn;
void *cbarg;
} rsphdlr[BFI_MC_MAX];
struct bfa_msgq *msgq;
};
struct bfa_msgq {
struct bfa_msgq_cmdq cmdq;
struct bfa_msgq_rspq rspq;
struct bfa_wc init_wc;
struct bfa_mbox_cmd init_mb;
struct bfa_ioc_notify ioc_notify;
struct bfa_ioc *ioc;
};
u32 bfa_msgq_meminfo(void);
void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa);
void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc);
void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
bfa_msgq_mcfunc_t cbfn, void *cbarg);
void bfa_msgq_cmd_post(struct bfa_msgq *msgq,
struct bfa_msgq_cmd_entry *cmd);
void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len);
#endif

View file

@ -0,0 +1,570 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFI_H__
#define __BFI_H__
#include "bfa_defs.h"
#pragma pack(1)
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
#define BFI_FLASH_IMAGE_SZ 0x100000
/* Msg header common to all msgs */
struct bfi_mhdr {
u8 msg_class; /*!< @ref enum bfi_mclass */
u8 msg_id; /*!< msg opcode with in the class */
union {
struct {
u8 qid;
u8 fn_lpu; /*!< msg destination */
} h2i;
u16 i2htok; /*!< token in msgs to host */
} mtag;
};
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
(_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
} while (0)
#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
(_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.i2htok = (_i2htok); \
} while (0)
/*
* Message opcodes: 0-127 to firmware, 128-255 to host
*/
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
/****************************************************************************
*
* Scatter Gather Element and Page definition
*
****************************************************************************
*/
/* DMA addresses */
union bfi_addr_u {
struct {
u32 addr_lo;
u32 addr_hi;
} a32;
};
/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
};
/*
* Large Message structure - 128 Bytes size Msgs
*/
#define BFI_LMSG_SZ 128
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
/* Mailbox message structure */
#define BFI_MBMSG_SZ 7
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
/* Message Classes */
enum bfi_mclass {
BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
BFI_MC_FLASH = 3, /*!< Flash message class */
BFI_MC_CEE = 4, /*!< CEE */
BFI_MC_FCPORT = 5, /*!< FC port */
BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
BFI_MC_LL = 7, /*!< Link Layer */
BFI_MC_UF = 8, /*!< Unsolicited frame receive */
BFI_MC_FCXP = 9, /*!< FC Transport */
BFI_MC_LPS = 10, /*!< lport fc login services */
BFI_MC_RPORT = 11, /*!< Remote port */
BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
BFI_MC_TSKIM = 18, /*!< Initiator Task management */
BFI_MC_SBOOT = 19, /*!< SAN boot services */
BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
BFI_MC_PORT = 21, /*!< Physical port */
BFI_MC_SFP = 22, /*!< SFP module */
BFI_MC_MSGQ = 23, /*!< MSGQ */
BFI_MC_ENET = 24, /*!< ENET commands/responses */
BFI_MC_PHY = 25, /*!< External PHY message class */
BFI_MC_NBOOT = 26, /*!< Network Boot */
BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */
BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */
BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */
BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */
BFI_MC_TIO = 31, /*!< IO (target mode) */
BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */
BFI_MC_EDMA = 33, /*!< EDMA copy commands */
BFI_MC_MAX = 34
};
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_FWBOOT_ENV_OS 0
/*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
/* Different asic generations */
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
BFI_ASIC_GEN_CT2 = 3,
};
enum bfi_asic_mode {
BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
};
enum bfi_ioc_h2i_msgs {
BFI_IOC_H2I_ENABLE_REQ = 1,
BFI_IOC_H2I_DISABLE_REQ = 2,
BFI_IOC_H2I_GETATTR_REQ = 3,
BFI_IOC_H2I_DBG_SYNC = 4,
BFI_IOC_H2I_DBG_DUMP = 5,
};
enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
};
/* BFI_IOC_H2I_GETATTR_REQ message */
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
};
struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */
mac_t mfg_mac; /*!< Mfg mac */
u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn;
u64 nwwn;
mac_t mac; /*!< PBC or Mfg mac */
u16 rsvd_b;
mac_t fcoe_mac;
u16 rsvd_c;
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
u8 pcie_gen;
u8 pcie_lanes_orig;
u8 pcie_lanes;
u8 rx_bbcredit; /*!< receive buffer credits */
u32 adapter_prop; /*!< adapter properties */
u16 maxfrsize; /*!< max receive frame size */
char asic_rev;
u8 rsvd_d;
char fw_version[BFA_VERSION_LEN];
char optrom_version[BFA_VERSION_LEN];
struct bfa_mfg_vpd vpd;
u32 card_type; /*!< card type */
};
/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
/* Firmware statistic offset */
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
/* Firmware trace offset */
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_TRC_ENT_SZ 16
#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_FW_INV_SIGN (0xdeaddead)
#define BFI_IOC_MD5SUM_SZ 4
struct bfi_ioc_fwver {
#ifdef __BIG_ENDIAN
u8 patch;
u8 maint;
u8 minor;
u8 major;
u8 rsvd[2];
u8 build;
u8 phase;
#else
u8 major;
u8 minor;
u8 maint;
u8 patch;
u8 phase;
u8 build;
u8 rsvd[2];
#endif
};
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
u8 asic_gen; /*!< asic generation */
u8 asic_mode;
u8 port0_mode; /*!< device mode for port 0 */
u8 port1_mode; /*!< device mode for port 1 */
u32 exec; /*!< exec vector */
u32 bootenv; /*!< firmware boot env */
u32 rsvd_b[2];
struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
enum bfi_ioc_img_ver_cmp {
BFI_IOC_IMG_VER_INCOMP,
BFI_IOC_IMG_VER_OLD,
BFI_IOC_IMG_VER_SAME,
BFI_IOC_IMG_VER_BETTER
};
#define BFI_FWBOOT_DEVMODE_OFF 4
#define BFI_FWBOOT_TYPE_OFF 8
#define BFI_FWBOOT_ENV_OFF 12
#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
(((u32)(__asic_gen)) << 24 | \
((u32)(__asic_mode)) << 16 | \
((u32)(__p0_mode)) << 8 | \
((u32)(__p1_mode)))
enum bfi_fwboot_type {
BFI_FWBOOT_TYPE_NORMAL = 0,
BFI_FWBOOT_TYPE_FLASH = 1,
BFI_FWBOOT_TYPE_MEMTEST = 2,
};
enum bfi_port_mode {
BFI_PORT_MODE_FC = 1,
BFI_PORT_MODE_ETH = 2,
};
struct bfi_ioc_hbeat {
struct bfi_mhdr mh; /*!< common msg header */
u32 hb_count; /*!< current heart beat count */
};
/* IOC hardware/firmware state */
enum bfi_ioc_state {
BFI_IOC_UNINIT = 0, /*!< not initialized */
BFI_IOC_INITING = 1, /*!< h/w is being initialized */
BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
BFI_IOC_OP = 4, /*!< IOC is operational */
BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
};
#define BFI_IOC_ENDIAN_SIG 0x12345678
enum {
BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
};
#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
(((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_SETP(__prop, __val) \
((__val) << BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_IS_PROTO(__adap_type) \
((__adap_type) & BFI_ADAPTER_PROTO)
#define BFI_ADAPTER_IS_TTV(__adap_type) \
((__adap_type) & BFI_ADAPTER_TTV)
#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
((__adap_type) & BFI_ADAPTER_UNSUPP)
#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh;
u16 clscode;
u16 rsvd;
u32 tv_sec;
};
/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
};
#define BFI_IOC_MSGSZ 8
/* H2I Messages */
union bfi_ioc_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_req enable_req;
struct bfi_ioc_ctrl_req disable_req;
struct bfi_ioc_getattr_req getattr_req;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
/*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
*/
enum bfi_msgq_h2i_msgs {
BFI_MSGQ_H2I_INIT_REQ = 1,
BFI_MSGQ_H2I_DOORBELL_PI = 2,
BFI_MSGQ_H2I_DOORBELL_CI = 3,
BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4,
};
enum bfi_msgq_i2h_msgs {
BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ),
BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI),
BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI),
BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP),
};
/* Messages(commands/responsed/AENS will have the following header */
struct bfi_msgq_mhdr {
u8 msg_class;
u8 msg_id;
u16 msg_token;
u16 num_entries;
u8 enet_id;
u8 rsvd[1];
};
#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
(_mh).msg_class = (_mc); \
(_mh).msg_id = (_mid); \
(_mh).msg_token = (_tok); \
(_mh).enet_id = (_enet_id); \
} while (0)
/*
* Mailbox for messaging interface
*/
#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
#define bfi_msgq_num_cmd_entries(_size) \
(((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE)
struct bfi_msgq {
union bfi_addr_u addr;
u16 q_depth; /* Total num of entries in the queue */
u8 rsvd[2];
};
/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
struct bfi_msgq_cfg_req {
struct bfi_mhdr mh;
struct bfi_msgq cmdq;
struct bfi_msgq rspq;
};
/* BFI_ENET_MSGQ_CFG_RSP */
struct bfi_msgq_cfg_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
/* BFI_MSGQ_H2I_DOORBELL */
struct bfi_msgq_h2i_db {
struct bfi_mhdr mh;
union {
u16 cmdq_pi;
u16 rspq_ci;
} idx;
};
/* BFI_MSGQ_I2H_DOORBELL */
struct bfi_msgq_i2h_db {
struct bfi_mhdr mh;
union {
u16 rspq_pi;
u16 cmdq_ci;
} idx;
};
#define BFI_CMD_COPY_SZ 28
/* BFI_MSGQ_H2I_CMD_COPY_RSP */
struct bfi_msgq_h2i_cmdq_copy_rsp {
struct bfi_mhdr mh;
u8 data[BFI_CMD_COPY_SZ];
};
/* BFI_MSGQ_I2H_CMD_COPY_REQ */
struct bfi_msgq_i2h_cmdq_copy_req {
struct bfi_mhdr mh;
u16 offset;
u16 len;
};
/*
* FLASH module specific
*/
enum bfi_flash_h2i_msgs {
BFI_FLASH_H2I_QUERY_REQ = 1,
BFI_FLASH_H2I_ERASE_REQ = 2,
BFI_FLASH_H2I_WRITE_REQ = 3,
BFI_FLASH_H2I_READ_REQ = 4,
BFI_FLASH_H2I_BOOT_VER_REQ = 5,
};
enum bfi_flash_i2h_msgs {
BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
};
/*
* Flash query request
*/
struct bfi_flash_query_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
};
/*
* Flash write request
*/
struct bfi_flash_write_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
u32 type; /* partition type */
u8 instance; /* partition instance */
u8 last;
u8 rsv[2];
u32 offset;
u32 length;
};
/*
* Flash read request
*/
struct bfi_flash_read_req {
struct bfi_mhdr mh; /* Common msg header */
u32 type; /* partition type */
u8 instance; /* partition instance */
u8 rsv[3];
u32 offset;
u32 length;
struct bfi_alen alen;
};
/*
* Flash query response
*/
struct bfi_flash_query_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 status;
};
/*
* Flash read response
*/
struct bfi_flash_read_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 type; /* partition type */
u8 instance; /* partition instance */
u8 rsv[3];
u32 status;
u32 length;
};
/*
* Flash write response
*/
struct bfi_flash_write_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 type; /* partition type */
u8 instance; /* partition instance */
u8 rsv[3];
u32 status;
u32 length;
};
#pragma pack()
#endif /* __BFI_H__ */

View file

@ -0,0 +1,163 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BFI_CNA_H__
#define __BFI_CNA_H__
#include "bfi.h"
#include "bfa_defs_cna.h"
#pragma pack(1)
enum bfi_port_h2i {
BFI_PORT_H2I_ENABLE_REQ = (1),
BFI_PORT_H2I_DISABLE_REQ = (2),
BFI_PORT_H2I_GET_STATS_REQ = (3),
BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
};
enum bfi_port_i2h {
BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
/* Generic REQ type */
struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
/* Generic RSP type */
struct bfi_port_generic_rsp {
struct bfi_mhdr mh; /*!< common msg header */
u8 status; /*!< port enable status */
u8 rsvd[3];
u32 msgtag; /*!< msgtag for reply */
};
/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_req enable_req;
struct bfi_port_generic_req disable_req;
struct bfi_port_get_stats_req getstats_req;
struct bfi_port_generic_req clearstats_req;
};
union bfi_port_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_rsp enable_rsp;
struct bfi_port_generic_rsp disable_rsp;
struct bfi_port_generic_rsp getstats_rsp;
struct bfi_port_generic_rsp clearstats_rsp;
};
/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
enum bfi_cee_h2i_msgs {
BFI_CEE_H2I_GET_CFG_REQ = 1,
BFI_CEE_H2I_RESET_STATS = 2,
BFI_CEE_H2I_GET_STATS_REQ = 3,
};
/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
enum bfi_cee_i2h_msgs {
BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
};
/* Data structures */
/*
* @brief H2I command structure for resetting the stats.
* BFI_CEE_H2I_RESET_STATS
*/
struct bfi_lldp_reset_stats {
struct bfi_mhdr mh;
};
/*
* @brief H2I command structure for resetting the stats.
* BFI_CEE_H2I_RESET_STATS
*/
struct bfi_cee_reset_stats {
struct bfi_mhdr mh;
};
/*
* @brief get configuration command from host
* BFI_CEE_H2I_GET_CFG_REQ
*/
struct bfi_cee_get_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
};
/*
* @brief reply message from firmware
* BFI_CEE_I2H_GET_CFG_RSP
*/
struct bfi_cee_get_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
/*
* @brief get configuration command from host
* BFI_CEE_H2I_GET_STATS_REQ
*/
struct bfi_cee_stats_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
};
/*
* @brief reply message from firmware
* BFI_CEE_I2H_GET_STATS_RSP
*/
struct bfi_cee_stats_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
};
/* @brief mailbox command structures from host to firmware */
union bfi_cee_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_req get_req;
struct bfi_cee_stats_req stats_req;
};
/* @brief mailbox message structures from firmware to host */
union bfi_cee_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_rsp get_rsp;
struct bfi_cee_stats_rsp stats_rsp;
};
#pragma pack()
#endif /* __BFI_CNA_H__ */

View file

@ -0,0 +1,858 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/* BNA Hardware and Firmware Interface */
/* Skipping statistics collection to avoid clutter.
* Command is no longer needed:
* MTU
* TxQ Stop
* RxQ Stop
* RxF Enable/Disable
*
* HDS-off request is dynamic
* keep structures as multiple of 32-bit fields for alignment.
* All values must be written in big-endian.
*/
#ifndef __BFI_ENET_H__
#define __BFI_ENET_H__
#include "bfa_defs.h"
#include "bfi.h"
#pragma pack(1)
#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
#define BFI_ENET_TXQ_PRIO_MAX 8
#define BFI_ENET_RX_QSET_MAX 16
#define BFI_ENET_TXQ_WI_VECT_MAX 4
#define BFI_ENET_VLAN_ID_MAX 4096
#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */
#define BFI_ENET_VLAN_BLOCKS_MAX \
(BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE)
#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */
#define BFI_ENET_VLAN_WORDS_MAX \
(BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE)
#define BFI_ENET_RSS_RIT_MAX 64 /* entries */
#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */
union bfi_addr_be_u {
struct {
u32 addr_hi; /* Most Significant 32-bits */
u32 addr_lo; /* Least Significant 32-Bits */
} a32;
};
/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
/* TxQ Entry Opcodes */
#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
struct bfi_enet_txq_wi_base {
u8 reserved;
u8 num_vectors; /* number of vectors present */
u16 opcode;
/* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */
u16 flags; /* OR of all the flags */
u16 l4_hdr_size_n_offset;
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
};
struct bfi_enet_txq_wi_ext {
u16 reserved;
u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
u32 reserved2[3];
};
struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
u16 reserved;
u16 length; /* Only 14 LSB are valid */
union bfi_addr_be_u addr;
};
/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
struct bfi_enet_txq_wi_ext ext;
} wi;
struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
};
#define wi_hdr wi.base
#define wi_ext_hdr wi.ext
#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
#define BFI_ENET_CQ_EF_UDP (1 << 8)
#define BFI_ENET_CQ_EF_TCP (1 << 9)
#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
#define BFI_ENET_CQ_EF_VLAN (1 << 13)
#define BFI_ENET_CQ_EF_RSS (1 << 14)
#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
#define BFI_ENET_CQ_EF_MCAST (1 << 17)
#define BFI_ENET_CQ_EF_BCAST (1 << 18)
#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
/* CQ Entry Structure */
struct bfi_enet_cq_entry {
u32 flags;
u16 vlan_tag;
u16 length;
u32 rss_hash;
u8 valid;
u8 reserved1;
u8 reserved2;
u8 rxq_id;
};
/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
union bfi_addr_u pg_tbl;
union bfi_addr_u first_entry;
u16 pages; /* # of pages */
u16 page_sz;
};
struct bfi_enet_txq {
struct bfi_enet_q q;
u8 priority;
u8 rsvd[3];
};
struct bfi_enet_rxq {
struct bfi_enet_q q;
u16 rx_buffer_size;
u16 rsvd;
};
struct bfi_enet_cq {
struct bfi_enet_q q;
};
struct bfi_enet_ib_cfg {
u8 int_pkt_dma;
u8 int_enabled;
u8 int_pkt_enabled;
u8 continuous_coalescing;
u8 msix;
u8 rsvd[3];
u32 coalescing_timeout;
u32 inter_pkt_timeout;
u8 inter_pkt_count;
u8 rsvd1[3];
};
struct bfi_enet_ib {
union bfi_addr_u index_addr;
union {
u16 msix_index;
u16 intx_bitmask;
} intr;
u16 rsvd;
};
/* ENET command messages */
enum bfi_enet_h2i_msgs {
/* Rx Commands */
BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
BFI_ENET_H2I_RX_CFG_CLR_REQ = 2,
BFI_ENET_H2I_RIT_CFG_REQ = 3,
BFI_ENET_H2I_RSS_CFG_REQ = 4,
BFI_ENET_H2I_RSS_ENABLE_REQ = 5,
BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6,
BFI_ENET_H2I_RX_DEFAULT_REQ = 7,
BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8,
BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9,
BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11,
BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12,
BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13,
BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14,
BFI_ENET_H2I_RX_VLAN_SET_REQ = 15,
BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16,
/* Tx Commands */
BFI_ENET_H2I_TX_CFG_SET_REQ = 17,
BFI_ENET_H2I_TX_CFG_CLR_REQ = 18,
/* Port Commands */
BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19,
BFI_ENET_H2I_SET_PAUSE_REQ = 20,
BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21,
/* Get Attributes Command */
BFI_ENET_H2I_GET_ATTR_REQ = 22,
/* Statistics Commands */
BFI_ENET_H2I_STATS_GET_REQ = 23,
BFI_ENET_H2I_STATS_CLR_REQ = 24,
BFI_ENET_H2I_WOL_MAGIC_REQ = 25,
BFI_ENET_H2I_WOL_FRAME_REQ = 26,
BFI_ENET_H2I_MAX = 27,
};
enum bfi_enet_i2h_msgs {
/* Rx Responses */
BFI_ENET_I2H_RX_CFG_SET_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ),
BFI_ENET_I2H_RX_CFG_CLR_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ),
BFI_ENET_I2H_RIT_CFG_RSP =
BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ),
BFI_ENET_I2H_RSS_CFG_RSP =
BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ),
BFI_ENET_I2H_RSS_ENABLE_RSP =
BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ),
BFI_ENET_I2H_RX_PROMISCUOUS_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ),
BFI_ENET_I2H_RX_DEFAULT_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ),
BFI_ENET_I2H_MAC_UCAST_SET_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ),
BFI_ENET_I2H_MAC_UCAST_CLR_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ),
BFI_ENET_I2H_MAC_UCAST_ADD_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ),
BFI_ENET_I2H_MAC_UCAST_DEL_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ),
BFI_ENET_I2H_MAC_MCAST_ADD_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ),
BFI_ENET_I2H_MAC_MCAST_DEL_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ),
BFI_ENET_I2H_MAC_MCAST_FILTER_RSP =
BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ),
BFI_ENET_I2H_RX_VLAN_SET_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ),
BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP =
BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ),
/* Tx Responses */
BFI_ENET_I2H_TX_CFG_SET_RSP =
BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ),
BFI_ENET_I2H_TX_CFG_CLR_RSP =
BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ),
/* Port Responses */
BFI_ENET_I2H_PORT_ADMIN_RSP =
BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ),
BFI_ENET_I2H_SET_PAUSE_RSP =
BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ),
BFI_ENET_I2H_DIAG_LOOPBACK_RSP =
BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ),
/* Attributes Response */
BFI_ENET_I2H_GET_ATTR_RSP =
BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ),
/* Statistics Responses */
BFI_ENET_I2H_STATS_GET_RSP =
BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ),
BFI_ENET_I2H_STATS_CLR_RSP =
BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ),
BFI_ENET_I2H_WOL_MAGIC_RSP =
BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ),
BFI_ENET_I2H_WOL_FRAME_RSP =
BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ),
/* AENs */
BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX),
BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1),
BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2),
BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3),
BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
};
/* The following error codes can be returned by the enet commands */
enum bfi_enet_err {
BFI_ENET_CMD_OK = 0,
BFI_ENET_CMD_FAIL = 1,
BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */
BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */
BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
};
/* Generic Request
*
* bfi_enet_req is used by:
* BFI_ENET_H2I_RX_CFG_CLR_REQ
* BFI_ENET_H2I_TX_CFG_CLR_REQ
*/
struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
/* Enable/Disable Request
*
* bfi_enet_enable_req is used by:
* BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
* BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero)
* BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero)
* BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ
* BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero)
*/
struct bfi_enet_enable_req {
struct bfi_msgq_mhdr mh;
u8 enable; /* 1 = enable; 0 = disable */
u8 rsvd[3];
};
/* Generic Response */
struct bfi_enet_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
};
/* GLOBAL CONFIGURATION */
/* bfi_enet_attr_req is used by:
* BFI_ENET_H2I_GET_ATTR_REQ
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
*/
struct bfi_enet_attr_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
u32 max_cfg;
u32 max_ucmac;
u32 rit_size;
};
/* Tx Configuration
*
* bfi_enet_tx_cfg is used by:
* BFI_ENET_H2I_TX_CFG_SET_REQ
*/
enum bfi_enet_tx_vlan_mode {
BFI_ENET_TX_VLAN_NOP = 0,
BFI_ENET_TX_VLAN_INS = 1,
BFI_ENET_TX_VLAN_WI = 2,
};
struct bfi_enet_tx_cfg {
u8 vlan_mode; /*!< processing mode */
u8 rsvd;
u16 vlan_id;
u8 admit_tagged_frame;
u8 apply_vlan_filter;
u8 add_to_vswitch;
u8 rsvd1[1];
};
struct bfi_enet_tx_cfg_req {
struct bfi_msgq_mhdr mh;
u8 num_queues; /* # of Tx Queues */
u8 rsvd[3];
struct {
struct bfi_enet_txq q;
struct bfi_enet_ib ib;
} q_cfg[BFI_ENET_TXQ_PRIO_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_tx_cfg tx_cfg;
};
struct bfi_enet_tx_cfg_rsp {
struct bfi_msgq_mhdr mh;
u8 error;
u8 hw_id; /* For debugging */
u8 rsvd[2];
struct {
u32 q_dbell; /* PCI base address offset */
u32 i_dbell; /* PCI base address offset */
u8 hw_qid; /* For debugging */
u8 rsvd[3];
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
/* Rx Configuration
*
* bfi_enet_rx_cfg is used by:
* BFI_ENET_H2I_RX_CFG_SET_REQ
*/
enum bfi_enet_rxq_type {
BFI_ENET_RXQ_SINGLE = 1,
BFI_ENET_RXQ_LARGE_SMALL = 2,
BFI_ENET_RXQ_HDS = 3,
BFI_ENET_RXQ_HDS_OPT_BASED = 4,
};
enum bfi_enet_hds_type {
BFI_ENET_HDS_FORCED = 0x01,
BFI_ENET_HDS_IPV6_UDP = 0x02,
BFI_ENET_HDS_IPV6_TCP = 0x04,
BFI_ENET_HDS_IPV4_TCP = 0x08,
BFI_ENET_HDS_IPV4_UDP = 0x10,
};
struct bfi_enet_rx_cfg {
u8 rxq_type;
u8 rsvd[1];
u16 frame_size;
struct {
u8 max_header_size;
u8 force_offset;
u8 type;
u8 rsvd1;
} hds;
u8 multi_buffer;
u8 strip_vlan;
u8 drop_untagged;
u8 rsvd2;
};
/*
* Multicast frames are received on the ql of q-set index zero.
* On the completion queue. RxQ ID = even is for large/data buffer queues
* and RxQ ID = odd is for small/header buffer queues.
*/
struct bfi_enet_rx_cfg_req {
struct bfi_msgq_mhdr mh;
u8 num_queue_sets; /* # of Rx Queue Sets */
u8 rsvd[3];
struct {
struct bfi_enet_rxq ql; /* large/data/single buffers */
struct bfi_enet_rxq qs; /* small/header buffers */
struct bfi_enet_cq cq;
struct bfi_enet_ib ib;
} q_cfg[BFI_ENET_RX_QSET_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_rx_cfg rx_cfg;
};
struct bfi_enet_rx_cfg_rsp {
struct bfi_msgq_mhdr mh;
u8 error;
u8 hw_id; /* For debugging */
u8 rsvd[2];
struct {
u32 ql_dbell; /* PCI base address offset */
u32 qs_dbell; /* PCI base address offset */
u32 i_dbell; /* PCI base address offset */
u8 hw_lqid; /* For debugging */
u8 hw_sqid; /* For debugging */
u8 hw_cqid; /* For debugging */
u8 rsvd;
} q_handles[BFI_ENET_RX_QSET_MAX];
};
/* RIT
*
* bfi_enet_rit_req is used by:
* BFI_ENET_H2I_RIT_CFG_REQ
*/
struct bfi_enet_rit_req {
struct bfi_msgq_mhdr mh;
u16 size; /* number of table-entries used */
u8 rsvd[2];
u8 table[BFI_ENET_RSS_RIT_MAX];
};
/* RSS
*
* bfi_enet_rss_cfg_req is used by:
* BFI_ENET_H2I_RSS_CFG_REQ
*/
enum bfi_enet_rss_type {
BFI_ENET_RSS_IPV6 = 0x01,
BFI_ENET_RSS_IPV6_TCP = 0x02,
BFI_ENET_RSS_IPV4 = 0x04,
BFI_ENET_RSS_IPV4_TCP = 0x08
};
struct bfi_enet_rss_cfg {
u8 type;
u8 mask;
u8 rsvd[2];
u32 key[BFI_ENET_RSS_KEY_LEN];
};
struct bfi_enet_rss_cfg_req {
struct bfi_msgq_mhdr mh;
struct bfi_enet_rss_cfg cfg;
};
/* MAC Unicast
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_MAC_UCAST_SET_REQ
* BFI_ENET_H2I_MAC_UCAST_CLR_REQ
* BFI_ENET_H2I_MAC_UCAST_ADD_REQ
* BFI_ENET_H2I_MAC_UCAST_DEL_REQ
*/
struct bfi_enet_ucast_req {
struct bfi_msgq_mhdr mh;
mac_t mac_addr;
u8 rsvd[2];
};
/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
/* MAC Multicast
*
* bfi_enet_mac_mfilter_add_req is used by:
* BFI_ENET_H2I_MAC_MCAST_ADD_REQ
*/
struct bfi_enet_mcast_add_req {
struct bfi_msgq_mhdr mh;
mac_t mac_addr;
u8 rsvd[2];
};
/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
*/
struct bfi_enet_mcast_add_rsp {
struct bfi_msgq_mhdr mh;
u8 error;
u8 rsvd;
u16 cmd_offset;
u16 handle;
u8 rsvd1[2];
};
/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
*/
struct bfi_enet_mcast_del_req {
struct bfi_msgq_mhdr mh;
u16 handle;
u8 rsvd[2];
};
/* VLAN
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_RX_VLAN_SET_REQ
*/
struct bfi_enet_rx_vlan_req {
struct bfi_msgq_mhdr mh;
u8 block_idx;
u8 rsvd[3];
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
/* PAUSE
*
* bfi_enet_set_pause_req is used by:
* BFI_ENET_H2I_SET_PAUSE_REQ
*/
struct bfi_enet_set_pause_req {
struct bfi_msgq_mhdr mh;
u8 rsvd[2];
u8 tx_pause; /* 1 = enable; 0 = disable */
u8 rx_pause; /* 1 = enable; 0 = disable */
};
/* DIAGNOSTICS
*
* bfi_enet_diag_lb_req is used by:
* BFI_ENET_H2I_DIAG_LOOPBACK
*/
struct bfi_enet_diag_lb_req {
struct bfi_msgq_mhdr mh;
u8 rsvd[2];
u8 mode; /* cable or Serdes */
u8 enable; /* 1 = enable; 0 = disable */
};
/* enum for Loopback opmodes */
enum {
BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
};
/* STATISTICS
*
* bfi_enet_stats_req is used by:
* BFI_ENET_H2I_STATS_GET_REQ
* BFI_ENET_I2H_STATS_CLR_REQ
*/
struct bfi_enet_stats_req {
struct bfi_msgq_mhdr mh;
u16 stats_mask;
u8 rsvd[2];
u32 rx_enet_mask;
u32 tx_enet_mask;
union bfi_addr_u host_buffer;
};
/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_ALL 0x1f
/* TxF Frame Statistics */
struct bfi_enet_stats_txf {
u64 ucast_octets;
u64 ucast;
u64 ucast_vlan;
u64 mcast_octets;
u64 mcast;
u64 mcast_vlan;
u64 bcast_octets;
u64 bcast;
u64 bcast_vlan;
u64 errors;
u64 filter_vlan; /* frames filtered due to VLAN */
u64 filter_mac_sa; /* frames filtered due to SA check */
};
/* RxF Frame Statistics */
struct bfi_enet_stats_rxf {
u64 ucast_octets;
u64 ucast;
u64 ucast_vlan;
u64 mcast_octets;
u64 mcast;
u64 mcast_vlan;
u64 bcast_octets;
u64 bcast;
u64 bcast_vlan;
u64 frame_drops;
};
/* FC Tx Frame Statistics */
struct bfi_enet_stats_fc_tx {
u64 txf_ucast_octets;
u64 txf_ucast;
u64 txf_ucast_vlan;
u64 txf_mcast_octets;
u64 txf_mcast;
u64 txf_mcast_vlan;
u64 txf_bcast_octets;
u64 txf_bcast;
u64 txf_bcast_vlan;
u64 txf_parity_errors;
u64 txf_timeout;
u64 txf_fid_parity_errors;
};
/* FC Rx Frame Statistics */
struct bfi_enet_stats_fc_rx {
u64 rxf_ucast_octets;
u64 rxf_ucast;
u64 rxf_ucast_vlan;
u64 rxf_mcast_octets;
u64 rxf_mcast;
u64 rxf_mcast_vlan;
u64 rxf_bcast_octets;
u64 rxf_bcast;
u64 rxf_bcast_vlan;
};
/* RAD Frame Statistics */
struct bfi_enet_stats_rad {
u64 rx_frames;
u64 rx_octets;
u64 rx_vlan_frames;
u64 rx_ucast;
u64 rx_ucast_octets;
u64 rx_ucast_vlan;
u64 rx_mcast;
u64 rx_mcast_octets;
u64 rx_mcast_vlan;
u64 rx_bcast;
u64 rx_bcast_octets;
u64 rx_bcast_vlan;
u64 rx_drops;
};
/* BPC Tx Registers */
struct bfi_enet_stats_bpc {
/* transmit stats */
u64 tx_pause[8];
u64 tx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 tx_first_pause[8];
/* receive stats */
u64 rx_pause[8];
u64 rx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 rx_first_pause[8];
};
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
u64 stats_clr_cnt; /* times this stats cleared */
u64 frame_64; /* both rx and tx counter */
u64 frame_65_127; /* both rx and tx counter */
u64 frame_128_255; /* both rx and tx counter */
u64 frame_256_511; /* both rx and tx counter */
u64 frame_512_1023; /* both rx and tx counter */
u64 frame_1024_1518; /* both rx and tx counter */
u64 frame_1519_1522; /* both rx and tx counter */
/* receive stats */
u64 rx_bytes;
u64 rx_packets;
u64 rx_fcs_error;
u64 rx_multicast;
u64 rx_broadcast;
u64 rx_control_frames;
u64 rx_pause;
u64 rx_unknown_opcode;
u64 rx_alignment_error;
u64 rx_frame_length_error;
u64 rx_code_error;
u64 rx_carrier_sense_error;
u64 rx_undersize;
u64 rx_oversize;
u64 rx_fragments;
u64 rx_jabber;
u64 rx_drop;
/* transmit stats */
u64 tx_bytes;
u64 tx_packets;
u64 tx_multicast;
u64 tx_broadcast;
u64 tx_pause;
u64 tx_deferral;
u64 tx_excessive_deferral;
u64 tx_single_collision;
u64 tx_muliple_collision;
u64 tx_late_collision;
u64 tx_excessive_collision;
u64 tx_total_collision;
u64 tx_pause_honored;
u64 tx_drop;
u64 tx_jabber;
u64 tx_fcs_error;
u64 tx_control_frame;
u64 tx_oversize;
u64 tx_undersize;
u64 tx_fragments;
};
/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
*/
struct bfi_enet_stats {
struct bfi_enet_stats_mac mac_stats;
struct bfi_enet_stats_bpc bpc_stats;
struct bfi_enet_stats_rad rad_stats;
struct bfi_enet_stats_rad rlb_stats;
struct bfi_enet_stats_fc_rx fc_rx_stats;
struct bfi_enet_stats_fc_tx fc_tx_stats;
struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
};
#pragma pack()
#endif /* __BFI_ENET_H__ */

View file

@ -0,0 +1,456 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/*
* bfi_reg.h ASIC register defines for all Brocade adapter ASICs
*/
#ifndef __BFI_REG_H__
#define __BFI_REG_H__
#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
#define HOSTFN2_INT_MSK 0x00014304 /* ct */
#define HOSTFN3_INT_MSK 0x00014404 /* ct */
#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
#define __P_LCLK_PLL_LOCK 0x80000000
#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
#define __APP_PLL_LCLK_RESET_TIMER_SH 17
#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
#define __APP_PLL_LCLK_JITLMT0_1_SH 12
#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
#define __APP_PLL_LCLK_HREF 0x00000800
#define __APP_PLL_LCLK_HDIV 0x00000400
#define __APP_PLL_LCLK_P0_1_MK 0x00000300
#define __APP_PLL_LCLK_P0_1_SH 8
#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
#define __APP_PLL_LCLK_Z0_2_SH 5
#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
#define __APP_PLL_LCLK_RSEL200500 0x00000010
#define __APP_PLL_LCLK_ENARST 0x00000008
#define __APP_PLL_LCLK_BYPASS 0x00000004
#define __APP_PLL_LCLK_LRESETN 0x00000002
#define __APP_PLL_LCLK_ENABLE 0x00000001
#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
#define __P_SCLK_PLL_LOCK 0x80000000
#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
#define __APP_PLL_SCLK_RESET_TIMER_SH 17
#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
#define __APP_PLL_SCLK_JITLMT0_1_SH 12
#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
#define __APP_PLL_SCLK_HREF 0x00000800
#define __APP_PLL_SCLK_HDIV 0x00000400
#define __APP_PLL_SCLK_P0_1_MK 0x00000300
#define __APP_PLL_SCLK_P0_1_SH 8
#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
#define __APP_PLL_SCLK_Z0_2_SH 5
#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
#define __APP_PLL_SCLK_RSEL200500 0x00000010
#define __APP_PLL_SCLK_ENARST 0x00000008
#define __APP_PLL_SCLK_BYPASS 0x00000004
#define __APP_PLL_SCLK_LRESETN 0x00000002
#define __APP_PLL_SCLK_ENABLE 0x00000001
#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
#define __ENABLE_MAC_1 0x00200000 /* ct */
#define __ENABLE_MAC_0 0x00100000 /* ct */
#define HOST_SEM0_REG 0x00014230 /* cb/ct */
#define HOST_SEM1_REG 0x00014234 /* cb/ct */
#define HOST_SEM2_REG 0x00014238 /* cb/ct */
#define HOST_SEM3_REG 0x0001423c /* cb/ct */
#define HOST_SEM4_REG 0x00014610 /* cb/ct */
#define HOST_SEM5_REG 0x00014614 /* cb/ct */
#define HOST_SEM6_REG 0x00014618 /* cb/ct */
#define HOST_SEM7_REG 0x0001461c /* cb/ct */
#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
#define PSS_CTL_REG 0x00018800 /* cb/ct */
#define __PSS_I2C_CLK_DIV_MK 0x007f0000
#define __PSS_I2C_CLK_DIV_SH 16
#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
#define __PSS_LMEM_INIT_DONE 0x00001000
#define __PSS_LMEM_RESET 0x00000200
#define __PSS_LMEM_INIT_EN 0x00000100
#define __PSS_LPU1_RESET 0x00000002
#define __PSS_LPU0_RESET 0x00000001
#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
#define ERR_SET_REG 0x00018818 /* cb/ct */
#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
#define __PSS_GPIO_OUT_REG 0x00000fff
#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
#define __PSS_GPIO_OE_REG 0x000000ff
#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
#define MBIST_CTL_REG 0x00014220 /* ct */
#define __EDRAM_BISTR_START 0x00000004
#define MBIST_STAT_REG 0x00014224 /* ct */
#define ETH_MAC_SER_REG 0x00014288 /* ct */
#define __APP_EMS_CKBUFAMPIN 0x00000020
#define __APP_EMS_REFCLKSEL 0x00000010
#define __APP_EMS_CMLCKSEL 0x00000008
#define __APP_EMS_REFCKBUFEN2 0x00000004
#define __APP_EMS_REFCKBUFEN1 0x00000002
#define __APP_EMS_CHANNEL_SEL 0x00000001
#define FNC_PERS_REG 0x00014604 /* ct */
#define __F3_FUNCTION_ACTIVE 0x80000000
#define __F3_FUNCTION_MODE 0x40000000
#define __F3_PORT_MAP_MK 0x30000000
#define __F3_PORT_MAP_SH 28
#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
#define __F3_VM_MODE 0x08000000
#define __F3_INTX_STATUS_MK 0x07000000
#define __F3_INTX_STATUS_SH 24
#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
#define __F2_FUNCTION_ACTIVE 0x00800000
#define __F2_FUNCTION_MODE 0x00400000
#define __F2_PORT_MAP_MK 0x00300000
#define __F2_PORT_MAP_SH 20
#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
#define __F2_VM_MODE 0x00080000
#define __F2_INTX_STATUS_MK 0x00070000
#define __F2_INTX_STATUS_SH 16
#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
#define __F1_FUNCTION_ACTIVE 0x00008000
#define __F1_FUNCTION_MODE 0x00004000
#define __F1_PORT_MAP_MK 0x00003000
#define __F1_PORT_MAP_SH 12
#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
#define __F1_VM_MODE 0x00000800
#define __F1_INTX_STATUS_MK 0x00000700
#define __F1_INTX_STATUS_SH 8
#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
#define __F0_FUNCTION_ACTIVE 0x00000080
#define __F0_FUNCTION_MODE 0x00000040
#define __F0_PORT_MAP_MK 0x00000030
#define __F0_PORT_MAP_SH 4
#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
#define __F0_VM_MODE 0x00000008
#define __F0_INTX_STATUS 0x00000007
enum {
__F0_INTX_STATUS_MSIX = 0x0,
__F0_INTX_STATUS_INTA = 0x1,
__F0_INTX_STATUS_INTB = 0x2,
__F0_INTX_STATUS_INTC = 0x3,
__F0_INTX_STATUS_INTD = 0x4,
};
#define OP_MODE 0x0001460c
#define __APP_ETH_CLK_LOWSPEED 0x00000004
#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
#define __GLOBAL_FCOE_MODE 0x00000001
#define FW_INIT_HALT_P0 0x000191ac
#define __FW_INIT_HALT_P 0x00000001
#define FW_INIT_HALT_P1 0x000191bc
#define PMM_1T_RESET_REG_P0 0x0002381c
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
/* Brocade 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
/*
* APP block registers
*/
#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
#define __PME_STATUS_ 0x00200000
#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
#define __PF_VF_BAR_SIZE_MODE__SH 19
#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
#define __FC_LL_PORT_MAP__MK 0x00060000
#define __FC_LL_PORT_MAP__SH 17
#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
#define __PF_VF_ACTIVE_ 0x00010000
#define __PF_VF_CFG_RDY_ 0x00008000
#define __PF_VF_ENABLE_ 0x00004000
#define __PF_DRIVER_ACTIVE_ 0x00002000
#define __PF_PME_SEND_ENABLE_ 0x00001000
#define __PF_EXROM_OFFSET__MK 0x00000ff0
#define __PF_EXROM_OFFSET__SH 4
#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
#define __FC_LL_MODE_ 0x00000008
#define __PF_INTX_PIN_ 0x00000007
#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
#define __PF_NUM_QUEUES1__MK 0xff000000
#define __PF_NUM_QUEUES1__SH 24
#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
#define __PF_VF_QUE_OFFSET1__SH 16
#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
#define __PF_VF_NUM_QUEUES__SH 8
#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
#define __PF_VF_QUE_OFFSET_ 0x000000ff
#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
/*
* Brocade 1860 adapter CPQ block registers
*/
#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
#define CT2_HOST_SEM0_REG 0x000148f0
#define CT2_HOST_SEM1_REG 0x000148f4
#define CT2_HOST_SEM2_REG 0x000148f8
#define CT2_HOST_SEM3_REG 0x000148fc
#define CT2_HOST_SEM4_REG 0x00014900
#define CT2_HOST_SEM5_REG 0x00014904
#define CT2_HOST_SEM6_REG 0x00014908
#define CT2_HOST_SEM7_REG 0x0001490c
#define CT2_HOST_SEM0_INFO_REG 0x000148b0
#define CT2_HOST_SEM1_INFO_REG 0x000148b4
#define CT2_HOST_SEM2_INFO_REG 0x000148b8
#define CT2_HOST_SEM3_INFO_REG 0x000148bc
#define CT2_HOST_SEM4_INFO_REG 0x000148c0
#define CT2_HOST_SEM5_INFO_REG 0x000148c4
#define CT2_HOST_SEM6_INFO_REG 0x000148c8
#define CT2_HOST_SEM7_INFO_REG 0x000148cc
#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
#define __APP_LPUCLK_HALFSPEED 0x40000000
#define __APP_PLL_LCLK_LOAD 0x20000000
#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
#define __APP_PLL_LCLK_FBCNT_SH 21
#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
enum {
__APP_PLL_LCLK_FBCNT_425_MHZ = 6,
__APP_PLL_LCLK_FBCNT_468_MHZ = 4,
};
#define __APP_PLL_LCLK_EXTFB 0x00000800
#define __APP_PLL_LCLK_ENOUTS 0x00000400
#define __APP_PLL_LCLK_RATE 0x00000010
#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
#define __P_SCLK_PLL_LOCK 0x80000000
#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
#define __APP_PLL_SCLK_LOAD 0x10000000
#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
#define __APP_PLL_SCLK_FBCNT_SH 20
#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
enum {
__APP_PLL_SCLK_FBCNT_NORM = 6,
__APP_PLL_SCLK_FBCNT_10G_FC = 10,
};
#define __APP_PLL_SCLK_EXTFB 0x00000800
#define __APP_PLL_SCLK_ENOUTS 0x00000400
#define __APP_PLL_SCLK_RATE 0x00000010
#define CT2_PCIE_MISC_REG 0x00014804
#define __ETH_CLK_ENABLE_PORT1 0x00000010
#define CT2_CHIP_MISC_PRG 0x000148a4
#define __ETH_CLK_ENABLE_PORT0 0x00004000
#define __APP_LPU_SPEED 0x00000002
#define CT2_MBIST_STAT_REG 0x00014818
#define CT2_MBIST_CTL_REG 0x0001481c
#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
#define __PMM_1T_PNDB_P 0x00000002
#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
#define CT2_WGN_STATUS 0x00014990
#define __A2T_AHB_LOAD 0x00000800
#define __WGN_READY 0x00000400
#define __GLBL_PF_VF_CFG_RDY 0x00000200
#define CT2_NFC_CSR_CLR_REG 0x00027420
#define CT2_NFC_CSR_SET_REG 0x00027424
#define __HALT_NFC_CONTROLLER 0x00000002
#define __NFC_CONTROLLER_HALTED 0x00001000
#define CT2_RSC_GPR15_REG 0x0002765c
#define CT2_CSI_FW_CTL_REG 0x00027080
#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
#define CT2_CSI_FW_CTL_SET_REG 0x00027088
#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
#define __CSI_MAC_RESET 0x00000010
#define __CSI_MAC_AHB_RESET 0x00000008
#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
#define CT2_CSI_MAC_CONTROL_REG(__n) \
(CT2_CSI_MAC0_CONTROL_REG + \
(__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
/*
* Name semaphore registers based on usage
*/
#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
/*
* CT2 semaphore register locations changed
*/
#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
/*
* And corresponding host interrupt status bit field defines
*/
#define __HFN_INT_CPE_Q0 0x00000001U
#define __HFN_INT_CPE_Q1 0x00000002U
#define __HFN_INT_CPE_Q2 0x00000004U
#define __HFN_INT_CPE_Q3 0x00000008U
#define __HFN_INT_CPE_Q4 0x00000010U
#define __HFN_INT_CPE_Q5 0x00000020U
#define __HFN_INT_CPE_Q6 0x00000040U
#define __HFN_INT_CPE_Q7 0x00000080U
#define __HFN_INT_RME_Q0 0x00000100U
#define __HFN_INT_RME_Q1 0x00000200U
#define __HFN_INT_RME_Q2 0x00000400U
#define __HFN_INT_RME_Q3 0x00000800U
#define __HFN_INT_RME_Q4 0x00001000U
#define __HFN_INT_RME_Q5 0x00002000U
#define __HFN_INT_RME_Q6 0x00004000U
#define __HFN_INT_RME_Q7 0x00008000U
#define __HFN_INT_ERR_EMC 0x00010000U
#define __HFN_INT_ERR_LPU0 0x00020000U
#define __HFN_INT_ERR_LPU1 0x00040000U
#define __HFN_INT_ERR_PSS 0x00080000U
#define __HFN_INT_MBOX_LPU0 0x00100000U
#define __HFN_INT_MBOX_LPU1 0x00200000U
#define __HFN_INT_MBOX1_LPU0 0x00400000U
#define __HFN_INT_MBOX1_LPU1 0x00800000U
#define __HFN_INT_LL_HALT 0x01000000U
#define __HFN_INT_CPE_MASK 0x000000ffU
#define __HFN_INT_RME_MASK 0x0000ff00U
#define __HFN_INT_ERR_MASK \
(__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
__HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
#define __HFN_INT_FN0_MASK \
(__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
__HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
__HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
#define __HFN_INT_FN1_MASK \
(__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
__HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
/*
* Host interrupt status defines for 1860
*/
#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
#define __HFN_INT_ERR_PSS_CT2 0x00040000U
#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
#define __HFN_INT_ERR_WGN_CT2 0x00400000U
#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
#define __HFN_INT_ERR_MASK_CT2 \
(__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
__HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
__HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
__HFN_INT_ERR_LEHTX_CT2)
#define __HFN_INT_FN0_MASK_CT2 \
(__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
__HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
__HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
#define __HFN_INT_FN1_MASK_CT2 \
(__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
__HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
/*
* asic memory map.
*/
#define PSS_SMEM_PAGE_START 0x8000
#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
#endif /* __BFI_REG_H__ */

View file

@ -0,0 +1,564 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BNA_H__
#define __BNA_H__
#include "bfa_defs.h"
#include "bfa_ioc.h"
#include "bfi_enet.h"
#include "bna_types.h"
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
/* Log string size */
#define BNA_MESSAGE_SIZE 256
#define bna_is_small_rxq(_id) ((_id) & 0x1)
#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
(!memcmp((_mac1), (_mac2), sizeof(mac_t)))
#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
#define BNA_TO_POWER_OF_2(x) \
do { \
int _shift = 0; \
while ((x) && (x) != 1) { \
(x) >>= 1; \
_shift++; \
} \
(x) <<= _shift; \
} while (0)
#define BNA_TO_POWER_OF_2_HIGH(x) \
do { \
int n = 1; \
while (n < (x)) \
n <<= 1; \
(x) = n; \
} while (0)
/*
* input : _addr-> os dma addr in host endian format,
* output : _bna_dma_addr-> pointer to hw dma addr
*/
#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
do { \
u64 tmp_addr = \
cpu_to_be64((u64)(_addr)); \
(_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
(_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
} while (0)
/*
* input : _bna_dma_addr-> pointer to hw dma addr
* output : _addr-> os dma addr in host endian format
*/
#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
do { \
(_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
} while (0)
#define containing_rec(addr, type, field) \
((type *)((unsigned char *)(addr) - \
(unsigned char *)(&((type *)0)->field)))
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
/* TxQ element is 64 bytes */
#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}
/* RxQ element is 8 bytes */
#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> \
BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
}
/* CQ element is 16 bytes */
#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
\
page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> \
BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
}
#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
(&((_cast *)(_q_base))[(_qe_idx)])
#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)
#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
((_q_depth) - 1))
#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
(_q_depth - 1))
#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
#define BNA_Q_PI_ADD(_q_ptr, _num) \
(_q_ptr)->q.producer_index = \
(((_q_ptr)->q.producer_index + (_num)) & \
((_q_ptr)->q.q_depth - 1))
#define BNA_Q_CI_ADD(_q_ptr, _num) \
(_q_ptr)->q.consumer_index = \
(((_q_ptr)->q.consumer_index + (_num)) \
& ((_q_ptr)->q.q_depth - 1))
#define BNA_Q_FREE_COUNT(_q_ptr) \
(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
#define BNA_Q_IN_USE_COUNT(_q_ptr) \
(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
#define BNA_LARGE_PKT_SIZE 1000
#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
do { \
if ((_len) > BNA_LARGE_PKT_SIZE) { \
(_pkt)->large_pkt_cnt++; \
} else { \
(_pkt)->small_pkt_cnt++; \
} \
} while (0)
#define call_rxf_stop_cbfn(rxf) \
do { \
if ((rxf)->stop_cbfn) { \
void (*cbfn)(struct bna_rx *); \
struct bna_rx *cbarg; \
cbfn = (rxf)->stop_cbfn; \
cbarg = (rxf)->stop_cbarg; \
(rxf)->stop_cbfn = NULL; \
(rxf)->stop_cbarg = NULL; \
cbfn(cbarg); \
} \
} while (0)
#define call_rxf_start_cbfn(rxf) \
do { \
if ((rxf)->start_cbfn) { \
void (*cbfn)(struct bna_rx *); \
struct bna_rx *cbarg; \
cbfn = (rxf)->start_cbfn; \
cbarg = (rxf)->start_cbarg; \
(rxf)->start_cbfn = NULL; \
(rxf)->start_cbarg = NULL; \
cbfn(cbarg); \
} \
} while (0)
#define call_rxf_cam_fltr_cbfn(rxf) \
do { \
if ((rxf)->cam_fltr_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_rx *); \
struct bnad *cbarg; \
cbfn = (rxf)->cam_fltr_cbfn; \
cbarg = (rxf)->cam_fltr_cbarg; \
(rxf)->cam_fltr_cbfn = NULL; \
(rxf)->cam_fltr_cbarg = NULL; \
cbfn(cbarg, rxf->rx); \
} \
} while (0)
#define call_rxf_pause_cbfn(rxf) \
do { \
if ((rxf)->oper_state_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_rx *); \
struct bnad *cbarg; \
cbfn = (rxf)->oper_state_cbfn; \
cbarg = (rxf)->oper_state_cbarg; \
(rxf)->oper_state_cbfn = NULL; \
(rxf)->oper_state_cbarg = NULL; \
cbfn(cbarg, rxf->rx); \
} \
} while (0)
#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
#define xxx_enable(mode, bitmask, xxx) \
do { \
bitmask |= xxx; \
mode |= xxx; \
} while (0)
#define xxx_disable(mode, bitmask, xxx) \
do { \
bitmask |= xxx; \
mode &= ~xxx; \
} while (0)
#define xxx_inactive(mode, bitmask, xxx) \
do { \
bitmask &= ~xxx; \
mode &= ~xxx; \
} while (0)
#define is_promisc_enable(mode, bitmask) \
is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
#define is_promisc_disable(mode, bitmask) \
is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
#define promisc_enable(mode, bitmask) \
xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
#define promisc_disable(mode, bitmask) \
xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
#define promisc_inactive(mode, bitmask) \
xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
#define is_default_enable(mode, bitmask) \
is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
#define is_default_disable(mode, bitmask) \
is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
#define default_enable(mode, bitmask) \
xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
#define default_disable(mode, bitmask) \
xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
#define default_inactive(mode, bitmask) \
xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
#define is_allmulti_enable(mode, bitmask) \
is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
#define is_allmulti_disable(mode, bitmask) \
is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
#define allmulti_enable(mode, bitmask) \
xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
#define allmulti_disable(mode, bitmask) \
xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
#define allmulti_inactive(mode, bitmask) \
xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
#define GET_RXQS(rxp, q0, q1) do { \
switch ((rxp)->type) { \
case BNA_RXP_SINGLE: \
(q0) = rxp->rxq.single.only; \
(q1) = NULL; \
break; \
case BNA_RXP_SLR: \
(q0) = rxp->rxq.slr.large; \
(q1) = rxp->rxq.slr.small; \
break; \
case BNA_RXP_HDS: \
(q0) = rxp->rxq.hds.data; \
(q1) = rxp->rxq.hds.hdr; \
break; \
} \
} while (0)
#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
#define bna_tx_from_rid(_bna, _rid, _tx) \
do { \
struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
struct bna_tx *__tx; \
struct list_head *qe; \
_tx = NULL; \
list_for_each(qe, &__tx_mod->tx_active_q) { \
__tx = (struct bna_tx *)qe; \
if (__tx->rid == (_rid)) { \
(_tx) = __tx; \
break; \
} \
} \
} while (0)
#define bna_rx_from_rid(_bna, _rid, _rx) \
do { \
struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
struct bna_rx *__rx; \
struct list_head *qe; \
_rx = NULL; \
list_for_each(qe, &__rx_mod->rx_active_q) { \
__rx = (struct bna_rx *)qe; \
if (__rx->rid == (_rid)) { \
(_rx) = __rx; \
break; \
} \
} \
} while (0)
#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
list_for_each(qe, q) {
if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
mac = (struct bna_mac *)qe;
break;
}
}
return mac;
}
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
/* Function prototypes */
/* BNA */
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
/* APIs for BNAD */
void bna_res_req(struct bna_res_info *res_info);
void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
void bna_init(struct bna *bna, struct bnad *bnad,
struct bfa_pcidev *pcidev,
struct bna_res_info *res_info);
void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
void bna_uninit(struct bna *bna);
int bna_num_txq_set(struct bna *bna, int num_txq);
int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
/* MBOX */
/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);
/* ETHPORT */
/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
/* TX MODULE AND TX */
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
/* APIs for BNA */
void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
struct bna_res_info *res_info);
void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
/* APIs for ENET */
void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
/* APIs for BNAD */
void bna_tx_res_req(int num_txq, int txq_depth,
struct bna_res_info *res_info);
struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
const struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv);
void bna_tx_destroy(struct bna_tx *tx);
void bna_tx_enable(struct bna_tx *tx);
void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_tx *));
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
/* RX MODULE, RX, RXF */
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr);
/* APIs for BNA */
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
struct bna_res_info *res_info);
void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
/* APIs for ENET */
void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
/* APIs for BNAD */
void bna_rx_res_req(struct bna_rx_config *rx_config,
struct bna_res_info *res_info);
struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
const struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info, void *priv);
void bna_rx_destroy(struct bna_rx *rx);
void bna_rx_enable(struct bna_rx *rx);
void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_rx *));
void bna_rx_cleanup_complete(struct bna_rx *rx);
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
void
bna_rx_mcast_delall(struct bna_rx *rx,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
enum bna_rxmode bitmask,
void (*cbfn)(struct bnad *, struct bna_rx *));
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
void bna_rx_vlan_strip_enable(struct bna_rx *rx);
void bna_rx_vlan_strip_disable(struct bna_rx *rx);
/* ENET */
/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);
/* Callbacks for TX, RX */
void bna_enet_cb_tx_stopped(struct bna_enet *enet);
void bna_enet_cb_rx_stopped(struct bna_enet *enet);
/* API for BNAD */
void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
struct bna_pause_config *pause_config,
void (*cbfn)(struct bnad *));
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
/* IOCETH */
/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
enum bna_cleanup_type type);
/* BNAD */
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status status);
/* Callbacks for IOCETH */
void bnad_cb_ioceth_ready(struct bnad *bnad);
void bnad_cb_ioceth_failed(struct bnad *bnad);
void bnad_cb_ioceth_disabled(struct bnad *bnad);
void bnad_cb_mbox_intr_enable(struct bnad *bnad);
void bnad_cb_mbox_intr_disable(struct bnad *bnad);
/* Callbacks for BNA */
void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
struct bna_stats *stats);
#endif /* __BNA_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,410 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/* File for interrupt macros and functions */
#ifndef __BNA_HW_DEFS_H__
#define __BNA_HW_DEFS_H__
#include "bfi_reg.h"
/* SW imposed limits */
#define BFI_ENET_DEF_TXQ 1
#define BFI_ENET_DEF_RXP 1
#define BFI_ENET_DEF_UCAM 1
#define BFI_ENET_DEF_RITSZ 1
#define BFI_ENET_MAX_MCAM 256
#define BFI_INVALID_RID -1
#define BFI_IBIDX_SIZE 4
#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */
#define BFI_VLAN_WORD_MASK 0x1F
#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */
#define BFI_VLAN_BMASK_ALL 0xFF
#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
#define BFI_MAX_INTERPKT_COUNT 0xFF
#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */
#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */
#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
#define BFI_TXQ_WI_SIZE 64 /* bytes */
#define BFI_RXQ_WI_SIZE 8 /* bytes */
#define BFI_CQ_WI_SIZE 16 /* bytes */
#define BFI_TX_MAX_WRR_QUOTA 0xFFF
#define BFI_TX_MAX_VECTORS_PER_WI 4
#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
/* Small Q buffer size */
#define BFI_SMALL_RXBUF_SIZE 128
#define BFI_TX_MAX_PRIO 8
#define BFI_TX_PRIO_MAP_ALL 0xFF
/*
*
* Register definitions and macros
*
*/
#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
#define ct_reg_addr_init(_bna, _pcidev) \
{ \
struct bna_reg_offset reg_offset[] = \
{{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \
{HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \
{HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \
{HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \
\
(_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
reg_offset[(_pcidev)->pci_func].fn_int_status;\
(_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
reg_offset[(_pcidev)->pci_func].fn_int_mask;\
}
#define ct_bit_defn_init(_bna, _pcidev) \
{ \
(_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \
__HFN_INT_MBOX_LPU1); \
(_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \
__HFN_INT_MBOX_LPU1); \
(_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \
(_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \
(_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \
(_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \
}
#define ct2_reg_addr_init(_bna, _pcidev) \
{ \
(_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
CT2_HOSTFN_INT_STATUS; \
(_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
CT2_HOSTFN_INTR_MASK; \
}
#define ct2_bit_defn_init(_bna, _pcidev) \
{ \
(_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
__HFN_INT_MBOX_LPU1_CT2); \
(_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
__HFN_INT_MBOX_LPU1_CT2); \
(_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \
(_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \
(_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \
(_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \
}
#define bna_reg_addr_init(_bna, _pcidev) \
{ \
switch ((_pcidev)->device_id) { \
case PCI_DEVICE_ID_BROCADE_CT: \
ct_reg_addr_init((_bna), (_pcidev)); \
ct_bit_defn_init((_bna), (_pcidev)); \
break; \
case BFA_PCI_DEVICE_ID_CT2: \
ct2_reg_addr_init((_bna), (_pcidev)); \
ct2_bit_defn_init((_bna), (_pcidev)); \
break; \
} \
}
#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
/* Interrupt related bits, flags and macros */
#define IB_STATUS_BITS 0x0000ffff
#define BNA_IS_MBOX_INTR(_bna, _intr_status) \
((_intr_status) & (_bna)->bits.mbox_status_bits)
#define BNA_IS_HALT_INTR(_bna, _intr_status) \
((_intr_status) & (_bna)->bits.halt_status_bits)
#define BNA_IS_ERR_INTR(_bna, _intr_status) \
((_intr_status) & (_bna)->bits.error_status_bits)
#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \
(BNA_IS_MBOX_INTR(_bna, _intr_status) | \
BNA_IS_ERR_INTR(_bna, _intr_status))
#define BNA_IS_INTX_DATA_INTR(_intr_status) \
((_intr_status) & IB_STATUS_BITS)
#define bna_halt_clear(_bna) \
do { \
u32 init_halt; \
init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
init_halt &= ~__FW_INIT_HALT_P; \
writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \
init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
} while (0)
#define bna_intx_disable(_bna, _cur_mask) \
{ \
(_cur_mask) = readl((_bna)->regs.fn_int_mask); \
writel(0xffffffff, (_bna)->regs.fn_int_mask); \
}
#define bna_intx_enable(bna, new_mask) \
writel((new_mask), (bna)->regs.fn_int_mask)
#define bna_mbox_intr_disable(bna) \
do { \
u32 mask; \
mask = readl((bna)->regs.fn_int_mask); \
writel((mask | (bna)->bits.mbox_mask_bits | \
(bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \
mask = readl((bna)->regs.fn_int_mask); \
} while (0)
#define bna_mbox_intr_enable(bna) \
do { \
u32 mask; \
mask = readl((bna)->regs.fn_int_mask); \
writel((mask & ~((bna)->bits.mbox_mask_bits | \
(bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\
mask = readl((bna)->regs.fn_int_mask); \
} while (0)
#define bna_intr_status_get(_bna, _status) \
{ \
(_status) = readl((_bna)->regs.fn_int_status); \
if (_status) { \
writel(((_status) & ~(_bna)->bits.mbox_status_bits), \
(_bna)->regs.fn_int_status); \
} \
}
/*
* MAX ACK EVENTS : No. of acks that can be accumulated in driver,
* before acking to h/w. The no. of bits is 16 in the doorbell register,
* however we keep this limited to 15 bits.
* This is because around the edge of 64K boundary (16 bits), one
* single poll can make the accumulated ACK counter cross the 64K boundary,
* causing problems, when we try to ack with a value greater than 64K.
* 15 bits (32K) should be large enough to accumulate, anyways, and the max.
* acked events to h/w can be (32K + max poll weight) (currently 64).
*/
#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
/* These macros build the data portion of the TxQ/RxQ doorbell */
#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
#define BNA_DOORBELL_Q_STOP (0x40000000)
/* These macros build the data portion of the IB doorbell */
#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
(0x80000000 | ((_timeout) << 16) | (_events))
#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
/* Set the coalescing timer for the given ib */
#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
/* Acks 'events' # of events for a given ib while disabling interrupts */
#define bna_ib_ack_disable_irq(_i_dbell, _events) \
(writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \
(_i_dbell)->doorbell_addr));
/* Acks 'events' # of events for a given ib */
#define bna_ib_ack(_i_dbell, _events) \
(writel(((_i_dbell)->doorbell_ack | (_events)), \
(_i_dbell)->doorbell_addr));
#define bna_ib_start(_bna, _ib, _is_regular) \
{ \
u32 intx_mask; \
struct bna_ib *ib = _ib; \
if ((ib->intr_type == BNA_INTR_T_INTX)) { \
bna_intx_disable((_bna), intx_mask); \
intx_mask &= ~(ib->intr_vector); \
bna_intx_enable((_bna), intx_mask); \
} \
bna_ib_coalescing_timer_set(&ib->door_bell, \
ib->coalescing_timeo); \
if (_is_regular) \
bna_ib_ack(&ib->door_bell, 0); \
}
#define bna_ib_stop(_bna, _ib) \
{ \
u32 intx_mask; \
struct bna_ib *ib = _ib; \
writel(BNA_DOORBELL_IB_INT_DISABLE, \
ib->door_bell.doorbell_addr); \
if (ib->intr_type == BNA_INTR_T_INTX) { \
bna_intx_disable((_bna), intx_mask); \
intx_mask |= ib->intr_vector; \
bna_intx_enable((_bna), intx_mask); \
} \
}
#define bna_txq_prod_indx_doorbell(_tcb) \
(writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
(_tcb)->q_dbell));
#define bna_rxq_prod_indx_doorbell(_rcb) \
(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
(_rcb)->q_dbell));
/* TxQ, RxQ, CQ related bits, offsets, macros */
/* TxQ Entry Opcodes */
#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
/*
* Completion Q defines
*/
/* CQ Entry Flags */
#define BNA_CQ_EF_MAC_ERROR (1 << 0)
#define BNA_CQ_EF_FCS_ERROR (1 << 1)
#define BNA_CQ_EF_TOO_LONG (1 << 2)
#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
#define BNA_CQ_EF_RSVD1 (1 << 4)
#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BNA_CQ_EF_HDS_HEADER (1 << 7)
#define BNA_CQ_EF_UDP (1 << 8)
#define BNA_CQ_EF_TCP (1 << 9)
#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
#define BNA_CQ_EF_IPV6 (1 << 11)
#define BNA_CQ_EF_IPV4 (1 << 12)
#define BNA_CQ_EF_VLAN (1 << 13)
#define BNA_CQ_EF_RSS (1 << 14)
#define BNA_CQ_EF_RSVD2 (1 << 15)
#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
#define BNA_CQ_EF_MCAST (1 << 17)
#define BNA_CQ_EF_BCAST (1 << 18)
#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
#define BNA_CQ_EF_EOP (1 << 31)
/* Data structures */
struct bna_reg_offset {
u32 fn_int_status;
u32 fn_int_mask;
};
struct bna_bit_defn {
u32 mbox_status_bits;
u32 mbox_mask_bits;
u32 error_status_bits;
u32 error_mask_bits;
u32 halt_status_bits;
u32 halt_mask_bits;
};
struct bna_reg {
void __iomem *fn_int_status;
void __iomem *fn_int_mask;
};
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
struct bna_dma_addr {
u32 msb;
u32 lsb;
};
struct bna_txq_wi_vector {
u16 reserved;
u16 length; /* Only 14 LSB are valid */
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
/* TxQ Entry Structure
*
* BEWARE: Load values into this structure with correct endianess.
*/
struct bna_txq_entry {
union {
struct {
u8 reserved;
u8 num_vectors; /* number of vectors present */
u16 opcode; /* Either */
/* BNA_TXQ_WI_SEND or */
/* BNA_TXQ_WI_SEND_LSO */
u16 flags; /* OR of all the flags */
u16 l4_hdr_size_n_offset;
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
} wi;
struct {
u16 reserved;
u16 opcode; /* Must be */
/* BNA_TXQ_WI_EXTENSION */
u32 reserved2[3]; /* Place holder for */
/* removed vector (12 bytes) */
} wi_ext;
} hdr;
struct bna_txq_wi_vector vector[4];
};
/* RxQ Entry Structure */
struct bna_rxq_entry { /* Rx-Buffer */
struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
};
/* CQ Entry Structure */
struct bna_cq_entry {
u32 flags;
u16 vlan_tag;
u16 length;
u32 rss_hash;
u8 valid;
u8 reserved1;
u8 reserved2;
u8 rxq_id;
};
#endif /* __BNA_HW_DEFS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,956 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BNA_TYPES_H__
#define __BNA_TYPES_H__
#include "cna.h"
#include "bna_hw_defs.h"
#include "bfa_cee.h"
#include "bfa_msgq.h"
/* Forward declarations */
struct bna_mcam_handle;
struct bna_txq;
struct bna_tx;
struct bna_rxq;
struct bna_cq;
struct bna_rx;
struct bna_rxf;
struct bna_enet;
struct bna;
struct bnad;
/* Enums, primitive data types */
enum bna_status {
BNA_STATUS_T_DISABLED = 0,
BNA_STATUS_T_ENABLED = 1
};
enum bna_cleanup_type {
BNA_HARD_CLEANUP = 0,
BNA_SOFT_CLEANUP = 1
};
enum bna_cb_status {
BNA_CB_SUCCESS = 0,
BNA_CB_FAIL = 1,
BNA_CB_INTERRUPT = 2,
BNA_CB_BUSY = 3,
BNA_CB_INVALID_MAC = 4,
BNA_CB_MCAST_LIST_FULL = 5,
BNA_CB_UCAST_CAM_FULL = 6,
BNA_CB_WAITING = 7,
BNA_CB_NOT_EXEC = 8
};
enum bna_res_type {
BNA_RES_T_MEM = 1,
BNA_RES_T_INTR = 2
};
enum bna_mem_type {
BNA_MEM_T_KVA = 1,
BNA_MEM_T_DMA = 2
};
enum bna_intr_type {
BNA_INTR_T_INTX = 1,
BNA_INTR_T_MSIX = 2
};
enum bna_res_req_type {
BNA_RES_MEM_T_COM = 0,
BNA_RES_MEM_T_ATTR = 1,
BNA_RES_MEM_T_FWTRC = 2,
BNA_RES_MEM_T_STATS = 3,
BNA_RES_T_MAX
};
enum bna_mod_res_req_type {
BNA_MOD_RES_MEM_T_TX_ARRAY = 0,
BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1,
BNA_MOD_RES_MEM_T_RX_ARRAY = 2,
BNA_MOD_RES_MEM_T_RXP_ARRAY = 3,
BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4,
BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5,
BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6,
BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7,
BNA_MOD_RES_T_MAX
};
enum bna_tx_res_req_type {
BNA_TX_RES_MEM_T_TCB = 0,
BNA_TX_RES_MEM_T_UNMAPQ = 1,
BNA_TX_RES_MEM_T_QPT = 2,
BNA_TX_RES_MEM_T_SWQPT = 3,
BNA_TX_RES_MEM_T_PAGE = 4,
BNA_TX_RES_MEM_T_IBIDX = 5,
BNA_TX_RES_INTR_T_TXCMPL = 6,
BNA_TX_RES_T_MAX,
};
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
BNA_RX_RES_MEM_T_UNMAPHQ = 2,
BNA_RX_RES_MEM_T_UNMAPDQ = 3,
BNA_RX_RES_MEM_T_CQPT = 4,
BNA_RX_RES_MEM_T_CSWQPT = 5,
BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
BNA_RX_RES_MEM_T_HQPT = 7,
BNA_RX_RES_MEM_T_DQPT = 8,
BNA_RX_RES_MEM_T_HSWQPT = 9,
BNA_RX_RES_MEM_T_DSWQPT = 10,
BNA_RX_RES_MEM_T_DPAGE = 11,
BNA_RX_RES_MEM_T_HPAGE = 12,
BNA_RX_RES_MEM_T_IBIDX = 13,
BNA_RX_RES_MEM_T_RIT = 14,
BNA_RX_RES_T_INTR = 15,
BNA_RX_RES_T_MAX = 16
};
enum bna_tx_type {
BNA_TX_T_REGULAR = 0,
BNA_TX_T_LOOPBACK = 1,
};
enum bna_tx_flags {
BNA_TX_F_ENET_STARTED = 1,
BNA_TX_F_ENABLED = 2,
BNA_TX_F_PRIO_CHANGED = 4,
BNA_TX_F_BW_UPDATED = 8,
};
enum bna_tx_mod_flags {
BNA_TX_MOD_F_ENET_STARTED = 1,
BNA_TX_MOD_F_ENET_LOOPBACK = 2,
};
enum bna_rx_type {
BNA_RX_T_REGULAR = 0,
BNA_RX_T_LOOPBACK = 1,
};
enum bna_rxp_type {
BNA_RXP_SINGLE = 1,
BNA_RXP_SLR = 2,
BNA_RXP_HDS = 3
};
enum bna_rxmode {
BNA_RXMODE_PROMISC = 1,
BNA_RXMODE_DEFAULT = 2,
BNA_RXMODE_ALLMULTI = 4
};
enum bna_rx_event {
RX_E_START = 1,
RX_E_STOP = 2,
RX_E_FAIL = 3,
RX_E_STARTED = 4,
RX_E_STOPPED = 5,
RX_E_RXF_STARTED = 6,
RX_E_RXF_STOPPED = 7,
RX_E_CLEANUP_DONE = 8,
};
enum bna_rx_flags {
BNA_RX_F_ENET_STARTED = 1,
BNA_RX_F_ENABLED = 2,
};
enum bna_rx_mod_flags {
BNA_RX_MOD_F_ENET_STARTED = 1,
BNA_RX_MOD_F_ENET_LOOPBACK = 2,
};
enum bna_rxf_flags {
BNA_RXF_F_PAUSED = 1,
};
enum bna_rxf_event {
RXF_E_START = 1,
RXF_E_STOP = 2,
RXF_E_FAIL = 3,
RXF_E_CONFIG = 4,
RXF_E_PAUSE = 5,
RXF_E_RESUME = 6,
RXF_E_FW_RESP = 7,
};
enum bna_enet_type {
BNA_ENET_T_REGULAR = 0,
BNA_ENET_T_LOOPBACK_INTERNAL = 1,
BNA_ENET_T_LOOPBACK_EXTERNAL = 2,
};
enum bna_link_status {
BNA_LINK_DOWN = 0,
BNA_LINK_UP = 1,
BNA_CEE_UP = 2
};
enum bna_ethport_flags {
BNA_ETHPORT_F_ADMIN_UP = 1,
BNA_ETHPORT_F_PORT_ENABLED = 2,
BNA_ETHPORT_F_RX_STARTED = 4,
};
enum bna_enet_flags {
BNA_ENET_F_IOCETH_READY = 1,
BNA_ENET_F_ENABLED = 2,
BNA_ENET_F_PAUSE_CHANGED = 4,
BNA_ENET_F_MTU_CHANGED = 8
};
enum bna_rss_flags {
BNA_RSS_F_RIT_PENDING = 1,
BNA_RSS_F_CFG_PENDING = 2,
BNA_RSS_F_STATUS_PENDING = 4,
};
enum bna_mod_flags {
BNA_MOD_F_INIT_DONE = 1,
};
enum bna_pkt_rates {
BNA_PKT_RATE_10K = 10000,
BNA_PKT_RATE_20K = 20000,
BNA_PKT_RATE_30K = 30000,
BNA_PKT_RATE_40K = 40000,
BNA_PKT_RATE_50K = 50000,
BNA_PKT_RATE_60K = 60000,
BNA_PKT_RATE_70K = 70000,
BNA_PKT_RATE_80K = 80000,
};
enum bna_dim_load_types {
BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
BNA_LOAD_T_MAX = 8
};
enum bna_dim_bias_types {
BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
BNA_BIAS_T_MAX = 2
};
#define BNA_MAX_NAME_SIZE 64
struct bna_ident {
int id;
char name[BNA_MAX_NAME_SIZE];
};
struct bna_mac {
/* This should be the first one */
struct list_head qe;
u8 addr[ETH_ALEN];
struct bna_mcam_handle *handle;
};
struct bna_mem_descr {
u32 len;
void *kva;
struct bna_dma_addr dma;
};
struct bna_mem_info {
enum bna_mem_type mem_type;
u32 len;
u32 num;
u32 align_sz; /* 0/1 = no alignment */
struct bna_mem_descr *mdl;
void *cookie; /* For bnad to unmap dma later */
};
struct bna_intr_descr {
int vector;
};
struct bna_intr_info {
enum bna_intr_type intr_type;
int num;
struct bna_intr_descr *idl;
};
union bna_res_u {
struct bna_mem_info mem_info;
struct bna_intr_info intr_info;
};
struct bna_res_info {
enum bna_res_type res_type;
union bna_res_u res_u;
};
/* HW QPT */
struct bna_qpt {
struct bna_dma_addr hw_qpt_ptr;
void *kv_qpt_ptr;
u32 page_count;
u32 page_size;
};
struct bna_attr {
bool fw_query_complete;
int num_txq;
int num_rxp;
int num_ucmac;
int num_mcmac;
int max_rit_size;
};
/* IOCEth */
struct bna_ioceth {
bfa_fsm_t fsm;
struct bfa_ioc ioc;
struct bna_attr attr;
struct bfa_msgq_cmd_entry msgq_cmd;
struct bfi_enet_attr_req attr_req;
void (*stop_cbfn)(struct bnad *bnad);
struct bnad *stop_cbarg;
struct bna *bna;
};
/* Enet */
/* Pause configuration */
struct bna_pause_config {
enum bna_status tx_pause;
enum bna_status rx_pause;
};
struct bna_enet {
bfa_fsm_t fsm;
enum bna_enet_flags flags;
enum bna_enet_type type;
struct bna_pause_config pause_config;
int mtu;
/* Callback for bna_enet_disable(), enet_stop() */
void (*stop_cbfn)(void *);
void *stop_cbarg;
/* Callback for bna_enet_pause_config() */
void (*pause_cbfn)(struct bnad *);
/* Callback for bna_enet_mtu_set() */
void (*mtu_cbfn)(struct bnad *);
struct bfa_wc chld_stop_wc;
struct bfa_msgq_cmd_entry msgq_cmd;
struct bfi_enet_set_pause_req pause_req;
struct bna *bna;
};
/* Ethport */
struct bna_ethport {
bfa_fsm_t fsm;
enum bna_ethport_flags flags;
enum bna_link_status link_status;
int rx_started_count;
void (*stop_cbfn)(struct bna_enet *);
void (*adminup_cbfn)(struct bnad *, enum bna_cb_status);
void (*link_cbfn)(struct bnad *, enum bna_link_status);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_enable_req admin_req;
struct bfi_enet_diag_lb_req lpbk_req;
} bfi_enet_cmd;
struct bna *bna;
};
/* Interrupt Block */
/* Doorbell structure */
struct bna_ib_dbell {
void __iomem *doorbell_addr;
u32 doorbell_ack;
};
/* IB structure */
struct bna_ib {
struct bna_dma_addr ib_seg_host_addr;
void *ib_seg_host_addr_kva;
struct bna_ib_dbell door_bell;
enum bna_intr_type intr_type;
int intr_vector;
u8 coalescing_timeo; /* Unit is 5usec. */
int interpkt_count;
int interpkt_timeo;
};
/* Tx object */
/* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16
struct bna_tcb {
/* Fast path */
void **sw_qpt;
void *sw_q;
void *unmap_q;
u32 producer_index;
u32 consumer_index;
volatile u32 *hw_consumer_index;
u32 q_depth;
void __iomem *q_dbell;
struct bna_ib_dbell *i_dbell;
/* Control path */
struct bna_txq *txq;
struct bnad *bnad;
void *priv; /* BNAD's cookie */
enum bna_intr_type intr_type;
int intr_vector;
u8 priority; /* Current priority */
unsigned long flags; /* Used by bnad as required */
int id;
char name[BNA_Q_NAME_SIZE];
};
/* TxQ QPT and configuration */
struct bna_txq {
/* This should be the first one */
struct list_head qe;
u8 priority;
struct bna_qpt qpt;
struct bna_tcb *tcb;
struct bna_ib ib;
struct bna_tx *tx;
int hw_id;
u64 tx_packets;
u64 tx_bytes;
};
/* Tx object */
struct bna_tx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
bfa_fsm_t fsm;
enum bna_tx_flags flags;
enum bna_tx_type type;
int num_txq;
struct list_head txq_q;
u16 txf_vlan_id;
/* Tx event handlers */
void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
/* callback for bna_tx_disable(), bna_tx_stop() */
void (*stop_cbfn)(void *arg, struct bna_tx *tx);
void *stop_cbarg;
/* callback for bna_tx_prio_set() */
void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_tx_cfg_req cfg_req;
struct bfi_enet_req req;
struct bfi_enet_tx_cfg_rsp cfg_rsp;
} bfi_enet_cmd;
struct bna *bna;
void *priv; /* bnad's cookie */
};
/* Tx object configuration used during creation */
struct bna_tx_config {
int num_txq;
int txq_depth;
int coalescing_timeo;
enum bna_tx_type tx_type;
};
struct bna_tx_event_cbfn {
/* Optional */
void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
/* Mandatory */
void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
};
/* Tx module - keeps track of free, active tx objects */
struct bna_tx_mod {
struct bna_tx *tx; /* BFI_MAX_TXQ entries */
struct bna_txq *txq; /* BFI_MAX_TXQ entries */
struct list_head tx_free_q;
struct list_head tx_active_q;
struct list_head txq_free_q;
/* callback for bna_tx_mod_stop() */
void (*stop_cbfn)(struct bna_enet *enet);
struct bfa_wc tx_stop_wc;
enum bna_tx_mod_flags flags;
u8 prio_map;
int default_prio;
int iscsi_over_cee;
int iscsi_prio;
int prio_reconfigured;
u32 rid_mask;
struct bna *bna;
};
/* Rx object */
/* Rx datapath control structure */
struct bna_rcb {
/* Fast path */
void **sw_qpt;
void *sw_q;
void *unmap_q;
u32 producer_index;
u32 consumer_index;
u32 q_depth;
void __iomem *q_dbell;
/* Control path */
struct bna_rxq *rxq;
struct bna_ccb *ccb;
struct bnad *bnad;
void *priv; /* BNAD's cookie */
unsigned long flags;
int id;
};
/* RxQ structure - QPT, configuration */
struct bna_rxq {
struct list_head qe;
int buffer_size;
int q_depth;
u32 num_vecs;
enum bna_status multi_buffer;
struct bna_qpt qpt;
struct bna_rcb *rcb;
struct bna_rxp *rxp;
struct bna_rx *rx;
int hw_id;
u64 rx_packets;
u64 rx_bytes;
u64 rx_packets_with_error;
u64 rxbuf_alloc_failed;
};
/* RxQ pair */
union bna_rxq_u {
struct {
struct bna_rxq *hdr;
struct bna_rxq *data;
} hds;
struct {
struct bna_rxq *small;
struct bna_rxq *large;
} slr;
struct {
struct bna_rxq *only;
struct bna_rxq *reserved;
} single;
};
/* Packet rate for Dynamic Interrupt Moderation */
struct bna_pkt_rate {
u32 small_pkt_cnt;
u32 large_pkt_cnt;
};
/* Completion control structure */
struct bna_ccb {
/* Fast path */
void **sw_qpt;
void *sw_q;
u32 producer_index;
volatile u32 *hw_producer_index;
u32 q_depth;
struct bna_ib_dbell *i_dbell;
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
u32 pkts_una;
u32 bytes_per_intr;
/* Control path */
struct bna_cq *cq;
struct bnad *bnad;
void *priv; /* BNAD's cookie */
enum bna_intr_type intr_type;
int intr_vector;
u8 rx_coalescing_timeo; /* For NAPI */
int id;
char name[BNA_Q_NAME_SIZE];
};
/* CQ QPT, configuration */
struct bna_cq {
struct bna_qpt qpt;
struct bna_ccb *ccb;
struct bna_ib ib;
struct bna_rx *rx;
};
struct bna_rss_config {
enum bfi_enet_rss_type hash_type;
u8 hash_mask;
u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN];
};
struct bna_hds_config {
enum bfi_enet_hds_type hdr_type;
int forced_offset;
};
/* Rx object configuration used during creation */
struct bna_rx_config {
enum bna_rx_type rx_type;
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
* for SLR and HDS queue type.
*/
u32 frame_size;
/* header or small queue */
u32 q1_depth;
u32 q1_buf_size;
/* data or large queue */
u32 q0_depth;
u32 q0_buf_size;
u32 q0_num_vecs;
enum bna_status q0_multi_buf;
enum bna_status rss_status;
struct bna_rss_config rss_config;
struct bna_hds_config hds_config;
enum bna_status vlan_strip_status;
};
/* Rx Path structure - one per MSIX vector/CPU */
struct bna_rxp {
/* This should be the first one */
struct list_head qe;
enum bna_rxp_type type;
union bna_rxq_u rxq;
struct bna_cq cq;
struct bna_rx *rx;
/* MSI-x vector number for configuring RSS */
int vector;
int hw_id;
};
/* RxF structure (hardware Rx Function) */
struct bna_rxf {
bfa_fsm_t fsm;
enum bna_rxf_flags flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_enable_req req;
struct bfi_enet_rss_cfg_req rss_req;
struct bfi_enet_rit_req rit_req;
struct bfi_enet_rx_vlan_req vlan_req;
struct bfi_enet_mcast_add_req mcast_add_req;
struct bfi_enet_mcast_del_req mcast_del_req;
struct bfi_enet_ucast_req ucast_req;
} bfi_enet_cmd;
/* callback for bna_rxf_start() */
void (*start_cbfn) (struct bna_rx *rx);
struct bna_rx *start_cbarg;
/* callback for bna_rxf_stop() */
void (*stop_cbfn) (struct bna_rx *rx);
struct bna_rx *stop_cbarg;
/* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
struct bnad *oper_state_cbarg;
/**
* callback for:
* bna_rxf_ucast_set()
* bna_rxf_{ucast/mcast}_add(),
* bna_rxf_{ucast/mcast}_del(),
* bna_rxf_mode_set()
*/
void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx);
struct bnad *cam_fltr_cbarg;
/* List of unicast addresses yet to be applied to h/w */
struct list_head ucast_pending_add_q;
struct list_head ucast_pending_del_q;
struct bna_mac *ucast_pending_mac;
int ucast_pending_set;
/* ucast addresses applied to the h/w */
struct list_head ucast_active_q;
struct bna_mac ucast_active_mac;
int ucast_active_set;
/* List of multicast addresses yet to be applied to h/w */
struct list_head mcast_pending_add_q;
struct list_head mcast_pending_del_q;
/* multicast addresses applied to the h/w */
struct list_head mcast_active_q;
struct list_head mcast_handle_q;
/* Rx modes yet to be applied to h/w */
enum bna_rxmode rxmode_pending;
enum bna_rxmode rxmode_pending_bitmask;
/* Rx modes applied to h/w */
enum bna_rxmode rxmode_active;
u8 vlan_pending_bitmask;
enum bna_status vlan_filter_status;
u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32];
bool vlan_strip_pending;
enum bna_status vlan_strip_status;
enum bna_rss_flags rss_pending;
enum bna_status rss_status;
struct bna_rss_config rss_cfg;
u8 *rit;
int rit_size;
struct bna_rx *rx;
};
/* Rx object */
struct bna_rx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
bfa_fsm_t fsm;
enum bna_rx_type type;
int num_paths;
struct list_head rxp_q;
struct bna_hds_config hds_cfg;
struct bna_rxf rxf;
enum bna_rx_flags rx_flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_rx_cfg_req cfg_req;
struct bfi_enet_req req;
struct bfi_enet_rx_cfg_rsp cfg_rsp;
} bfi_enet_cmd;
/* Rx event handlers */
void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
/* callback for bna_rx_disable(), bna_rx_stop() */
void (*stop_cbfn)(void *arg, struct bna_rx *rx);
void *stop_cbarg;
struct bna *bna;
void *priv; /* bnad's cookie */
};
struct bna_rx_event_cbfn {
/* Optional */
void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
/* Mandatory */
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
};
/* Rx module - keeps track of free, active rx objects */
struct bna_rx_mod {
struct bna *bna; /* back pointer to parent */
struct bna_rx *rx; /* BFI_MAX_RXQ entries */
struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
struct list_head rx_free_q;
struct list_head rx_active_q;
int rx_free_count;
struct list_head rxp_free_q;
int rxp_free_count;
struct list_head rxq_free_q;
int rxq_free_count;
enum bna_rx_mod_flags flags;
/* callback for bna_rx_mod_stop() */
void (*stop_cbfn)(struct bna_enet *enet);
struct bfa_wc rx_stop_wc;
u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
u32 rid_mask;
};
/* CAM */
struct bna_ucam_mod {
struct bna_mac *ucmac; /* num_ucmac * 2 entries */
struct list_head free_q;
struct list_head del_q;
struct bna *bna;
};
struct bna_mcam_handle {
/* This should be the first one */
struct list_head qe;
int handle;
int refcnt;
};
struct bna_mcam_mod {
struct bna_mac *mcmac; /* num_mcmac * 2 entries */
struct bna_mcam_handle *mchandle; /* num_mcmac entries */
struct list_head free_q;
struct list_head del_q;
struct list_head free_handle_q;
struct bna *bna;
};
/* Statistics */
struct bna_stats {
struct bna_dma_addr hw_stats_dma;
struct bfi_enet_stats *hw_stats_kva;
struct bfi_enet_stats hw_stats;
};
struct bna_stats_mod {
bool ioc_ready;
bool stats_get_busy;
bool stats_clr_busy;
struct bfa_msgq_cmd_entry stats_get_cmd;
struct bfa_msgq_cmd_entry stats_clr_cmd;
struct bfi_enet_stats_req stats_get;
struct bfi_enet_stats_req stats_clr;
};
/* BNA */
struct bna {
struct bna_ident ident;
struct bfa_pcidev pcidev;
struct bna_reg regs;
struct bna_bit_defn bits;
struct bna_stats stats;
struct bna_ioceth ioceth;
struct bfa_cee cee;
struct bfa_flash flash;
struct bfa_msgq msgq;
struct bna_ethport ethport;
struct bna_enet enet;
struct bna_stats_mod stats_mod;
struct bna_tx_mod tx_mod;
struct bna_rx_mod rx_mod;
struct bna_ucam_mod ucam_mod;
struct bna_mcam_mod mcam_mod;
enum bna_mod_flags mod_flags;
int default_mode_rid;
int promisc_rid;
struct bnad *bnad;
};
#endif /* __BNA_TYPES_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,431 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __BNAD_H__
#define __BNAD_H__
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <linux/ipv6.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
#include <linux/if_vlan.h>
/* Fix for IA64 */
#include <asm/checksum.h>
#include <net/ip6_checksum.h>
#include <net/ip.h>
#include <net/tcp.h>
#include "bna.h"
#define BNAD_TXQ_DEPTH 2048
#define BNAD_RXQ_DEPTH 2048
#define BNAD_MAX_TX 1
#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
#define BNAD_TXQ_NUM 1
#define BNAD_MAX_RX 1
#define BNAD_MAX_RXP_PER_RX 16
#define BNAD_MAX_RXQ_PER_RXP 2
/*
* Control structure pointed to ccb->ctrl, which
* determines the NAPI / LRO behavior CCB
* There is 1:1 corres. between ccb & ctrl
*/
struct bnad_rx_ctrl {
struct bna_ccb *ccb;
struct bnad *bnad;
unsigned long flags;
struct napi_struct napi;
u64 rx_intr_ctr;
u64 rx_poll_ctr;
u64 rx_schedule;
u64 rx_keep_poll;
u64 rx_complete;
};
#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
/*
* GLOBAL #defines (CONSTANTS)
*/
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
#define BNAD_VERSION "3.2.23.0"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
#define BNAD_INTX_TX_IB_BITMASK 0x1
#define BNAD_INTX_RX_IB_BITMASK 0x2
#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
#define BNAD_NETIF_WAKE_THRESHOLD 8
#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
/* Bit positions for tcb->flags */
#define BNAD_TXQ_FREE_SENT 0
#define BNAD_TXQ_TX_STARTED 1
/* Bit positions for rcb->flags */
#define BNAD_RXQ_STARTED 0
#define BNAD_RXQ_POST_OK 1
/* Resource limits */
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
#define BNAD_FRAME_SIZE(_mtu) \
(ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
/*
* DATA STRUCTURES
*/
/* enums */
enum bnad_intr_source {
BNAD_INTR_TX = 1,
BNAD_INTR_RX = 2
};
enum bnad_link_state {
BNAD_LS_DOWN = 0,
BNAD_LS_UP = 1
};
struct bnad_iocmd_comp {
struct bnad *bnad;
struct completion comp;
int comp_status;
};
struct bnad_completion {
struct completion ioc_comp;
struct completion ucast_comp;
struct completion mcast_comp;
struct completion tx_comp;
struct completion rx_comp;
struct completion stats_comp;
struct completion enet_comp;
struct completion mtu_comp;
u8 ioc_comp_status;
u8 ucast_comp_status;
u8 mcast_comp_status;
u8 tx_comp_status;
u8 rx_comp_status;
u8 stats_comp_status;
u8 port_comp_status;
u8 mtu_comp_status;
};
/* Tx Rx Control Stats */
struct bnad_drv_stats {
u64 netif_queue_stop;
u64 netif_queue_wakeup;
u64 netif_queue_stopped;
u64 tso4;
u64 tso6;
u64 tso_err;
u64 tcpcsum_offload;
u64 udpcsum_offload;
u64 csum_help;
u64 tx_skb_too_short;
u64 tx_skb_stopping;
u64 tx_skb_max_vectors;
u64 tx_skb_mss_too_long;
u64 tx_skb_tso_too_short;
u64 tx_skb_tso_prepare;
u64 tx_skb_non_tso_too_long;
u64 tx_skb_tcp_hdr;
u64 tx_skb_udp_hdr;
u64 tx_skb_csum_err;
u64 tx_skb_headlen_too_long;
u64 tx_skb_headlen_zero;
u64 tx_skb_frag_zero;
u64 tx_skb_len_mismatch;
u64 hw_stats_updates;
u64 netif_rx_dropped;
u64 link_toggle;
u64 cee_toggle;
u64 rxp_info_alloc_failed;
u64 mbox_intr_disabled;
u64 mbox_intr_enabled;
u64 tx_unmap_q_alloc_failed;
u64 rx_unmap_q_alloc_failed;
u64 rxbuf_alloc_failed;
};
/* Complete driver stats */
struct bnad_stats {
struct bnad_drv_stats drv_stats;
struct bna_stats *bna_stats;
};
/* Tx / Rx Resources */
struct bnad_tx_res_info {
struct bna_res_info res_info[BNA_TX_RES_T_MAX];
};
struct bnad_rx_res_info {
struct bna_res_info res_info[BNA_RX_RES_T_MAX];
};
struct bnad_tx_info {
struct bna_tx *tx; /* 1:1 between tx_info & tx */
struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
u32 tx_id;
struct delayed_work tx_cleanup_work;
} ____cacheline_aligned;
struct bnad_rx_info {
struct bna_rx *rx; /* 1:1 between rx_info & rx */
struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
u32 rx_id;
struct work_struct rx_cleanup_work;
} ____cacheline_aligned;
struct bnad_tx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct bnad_tx_unmap {
struct sk_buff *skb;
u32 nvecs;
struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI];
};
struct bnad_rx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
u32 len;
};
struct bnad_rx_unmap {
struct page *page;
struct sk_buff *skb;
struct bnad_rx_vector vector;
u32 page_offset;
};
enum bnad_rxbuf_type {
BNAD_RXBUF_NONE = 0,
BNAD_RXBUF_SK_BUFF = 1,
BNAD_RXBUF_PAGE = 2,
BNAD_RXBUF_MULTI_BUFF = 3
};
#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
struct bnad_rx_unmap_q {
int reuse_pi;
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
/* Bit mask values for bnad->cfg_flags */
#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
#define BNAD_CF_PROMISC 0x02
#define BNAD_CF_ALLMULTI 0x04
#define BNAD_CF_DEFAULT 0x08
#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */
/* Defines for run_flags bit-mask */
/* Set, tested & cleared using xxx_bit() functions */
/* Values indicated bit positions */
#define BNAD_RF_CEE_RUNNING 0
#define BNAD_RF_MTU_SET 1
#define BNAD_RF_MBOX_IRQ_DISABLED 2
#define BNAD_RF_NETDEV_REGISTERED 3
#define BNAD_RF_DIM_TIMER_RUNNING 4
#define BNAD_RF_STATS_TIMER_RUNNING 5
#define BNAD_RF_TX_PRIO_SET 6
struct bnad {
struct net_device *netdev;
u32 id;
struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
struct bnad_rx_info rx_info[BNAD_MAX_RX];
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/*
* These q numbers are global only because
* they are used to calculate MSIx vectors.
* Actually the exact # of queues are per Tx/Rx
* object.
*/
u32 num_tx;
u32 num_rx;
u32 num_txq_per_tx;
u32 num_rxp_per_rx;
u32 txq_depth;
u32 rxq_depth;
u8 tx_coalescing_timeo;
u8 rx_coalescing_timeo;
struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned;
struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned;
void __iomem *bar0; /* BAR0 address */
struct bna bna;
u32 cfg_flags;
unsigned long run_flags;
struct pci_dev *pcidev;
u64 mmio_start;
u64 mmio_len;
u32 msix_num;
struct msix_entry *msix_table;
struct mutex conf_mutex;
spinlock_t bna_lock ____cacheline_aligned;
/* Timers */
struct timer_list ioc_timer;
struct timer_list dim_timer;
struct timer_list stats_timer;
/* Control path resources, memory & irq */
struct bna_res_info res_info[BNA_RES_T_MAX];
struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
struct bnad_completion bnad_completions;
/* Burnt in MAC address */
mac_t perm_addr;
struct workqueue_struct *work_q;
/* Statistics */
struct bnad_stats stats;
struct bnad_diag *diag;
char adapter_name[BNAD_NAME_LEN];
char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
char wq_name[BNAD_NAME_LEN];
/* debugfs specific data */
char *regdata;
u32 reglen;
struct dentry *bnad_dentry_files[5];
struct dentry *port_debugfs_root;
};
struct bnad_drvinfo {
struct bfa_ioc_attr ioc_attr;
struct bfa_cee_attr cee_attr;
struct bfa_flash_attr flash_attr;
u32 cee_status;
u32 flash_status;
};
/*
* EXTERN VARIABLES
*/
extern const struct firmware *bfi_fw;
/*
* EXTERN PROTOTYPES
*/
u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
void bnad_set_rx_mode(struct net_device *netdev);
struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
int bnad_enable_default_bcast(struct bnad *bnad);
void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
void bnad_set_ethtool_ops(struct net_device *netdev);
void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
/* Timer start/stop protos */
void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
void bnad_netdev_qstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
/* Debugfs */
void bnad_debugfs_init(struct bnad *bnad);
void bnad_debugfs_uninit(struct bnad *bnad);
/* MACROS */
/* To set & get the stats counters */
#define BNAD_UPDATE_CTR(_bnad, _ctr) \
(((_bnad)->stats.drv_stats._ctr)++)
#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
#define bnad_enable_rx_irq_unsafe(_ccb) \
{ \
if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
(_ccb)->rx_coalescing_timeo); \
bna_ib_ack((_ccb)->i_dbell, 0); \
} \
}
#endif /* __BNAD_H__ */

View file

@ -0,0 +1,588 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include "bnad.h"
/*
* BNA debufs interface
*
* To access the interface, debugfs file system should be mounted
* if not already mounted using:
* mount -t debugfs none /sys/kernel/debug
*
* BNA Hierarchy:
* - bna/pci_dev:<pci_name>
* where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
*
* Debugging service available per pci_dev:
* fwtrc: To collect current firmware trace.
* fwsave: To collect last saved fw trace as a result of firmware crash.
* regwr: To write one word to chip register
* regrd: To read one or more words from chip register.
*/
struct bnad_debug_info {
char *debug_buffer;
void *i_private;
int buffer_len;
};
static int
bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
{
struct bnad *bnad = inode->i_private;
struct bnad_debug_info *fw_debug;
unsigned long flags;
int rc;
fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
if (!fw_debug)
return -ENOMEM;
fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
fw_debug = NULL;
return -ENOMEM;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
fw_debug->debug_buffer,
&fw_debug->buffer_len);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (rc != BFA_STATUS_OK) {
kfree(fw_debug->debug_buffer);
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
pr_warn("bnad %s: Failed to collect fwtrc\n",
pci_name(bnad->pcidev));
return -ENOMEM;
}
file->private_data = fw_debug;
return 0;
}
static int
bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
{
struct bnad *bnad = inode->i_private;
struct bnad_debug_info *fw_debug;
unsigned long flags;
int rc;
fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
if (!fw_debug)
return -ENOMEM;
fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
fw_debug = NULL;
return -ENOMEM;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
fw_debug->debug_buffer,
&fw_debug->buffer_len);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
kfree(fw_debug->debug_buffer);
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
pr_warn("bna %s: Failed to collect fwsave\n",
pci_name(bnad->pcidev));
return -ENOMEM;
}
file->private_data = fw_debug;
return 0;
}
static int
bnad_debugfs_open_reg(struct inode *inode, struct file *file)
{
struct bnad_debug_info *reg_debug;
reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
if (!reg_debug)
return -ENOMEM;
reg_debug->i_private = inode->i_private;
file->private_data = reg_debug;
return 0;
}
static int
bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
{
struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
struct bnad_iocmd_comp fcomp;
unsigned long flags = 0;
int ret = BFA_STATUS_FAILED;
/* Get IOC info */
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Retrieve CEE related info */
fcomp.bnad = bnad;
fcomp.comp_status = 0;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
goto out;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&fcomp.comp);
drvinfo->cee_status = fcomp.comp_status;
/* Retrieve flash partition info */
fcomp.comp_status = 0;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
goto out;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&fcomp.comp);
drvinfo->flash_status = fcomp.comp_status;
out:
return ret;
}
static int
bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
{
struct bnad *bnad = inode->i_private;
struct bnad_debug_info *drv_info;
int rc;
drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
if (!drv_info)
return -ENOMEM;
drv_info->buffer_len = sizeof(struct bnad_drvinfo);
drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
if (!drv_info->debug_buffer) {
kfree(drv_info);
drv_info = NULL;
return -ENOMEM;
}
mutex_lock(&bnad->conf_mutex);
rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
drv_info->buffer_len);
mutex_unlock(&bnad->conf_mutex);
if (rc != BFA_STATUS_OK) {
kfree(drv_info->debug_buffer);
drv_info->debug_buffer = NULL;
kfree(drv_info);
drv_info = NULL;
pr_warn("bna %s: Failed to collect drvinfo\n",
pci_name(bnad->pcidev));
return -ENOMEM;
}
file->private_data = drv_info;
return 0;
}
/* Changes the current file position */
static loff_t
bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
{
struct bnad_debug_info *debug = file->private_data;
if (!debug)
return -EINVAL;
return fixed_size_llseek(file, offset, orig, debug->buffer_len);
}
static ssize_t
bnad_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *pos)
{
struct bnad_debug_info *debug = file->private_data;
if (!debug || !debug->debug_buffer)
return 0;
return simple_read_from_buffer(buf, nbytes, pos,
debug->debug_buffer, debug->buffer_len);
}
#define BFA_REG_CT_ADDRSZ (0x40000)
#define BFA_REG_CB_ADDRSZ (0x20000)
#define BFA_REG_ADDRSZ(__ioc) \
((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
/*
* Function to check if the register offset passed is valid.
*/
static int
bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
{
u8 area;
/* check [16:15] */
area = (offset >> 15) & 0x7;
if (area == 0) {
/* PCIe core register */
if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else if (area == 0x1) {
/* CB 32 KB memory page */
if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else {
/* CB register space 64KB */
if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
}
static ssize_t
bnad_debugfs_read_regrd(struct file *file, char __user *buf,
size_t nbytes, loff_t *pos)
{
struct bnad_debug_info *regrd_debug = file->private_data;
struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
ssize_t rc;
if (!bnad->regdata)
return 0;
rc = simple_read_from_buffer(buf, nbytes, pos,
bnad->regdata, bnad->reglen);
if ((*pos + nbytes) >= bnad->reglen) {
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
}
return rc;
}
static ssize_t
bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct bnad_debug_info *regrd_debug = file->private_data;
struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
int addr, len, rc, i;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
void *kern_buf;
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf)
return -ENOMEM;
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
pr_warn("bna %s: Failed to read user buffer\n",
pci_name(bnad->pcidev));
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
if (!bnad->regdata)
return -ENOMEM;
bnad->reglen = len << 2;
rb = bfa_ioc_bar0(ioc);
addr &= BFA_REG_ADDRMSK(ioc);
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, len);
if (rc) {
pr_warn("bna %s: Failed reg offset check\n",
pci_name(bnad->pcidev));
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
return -EINVAL;
}
reg_addr = rb + addr;
regbuf = (u32 *)bnad->regdata;
spin_lock_irqsave(&bnad->bna_lock, flags);
for (i = 0; i < len; i++) {
*regbuf = readl(reg_addr);
regbuf++;
reg_addr += sizeof(u32);
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
return nbytes;
}
static ssize_t
bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct bnad_debug_info *debug = file->private_data;
struct bnad *bnad = (struct bnad *)debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
int addr, val, rc;
void __iomem *reg_addr;
unsigned long flags;
void *kern_buf;
/* Allocate memory to store the user space buf */
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf)
return -ENOMEM;
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
pr_warn("bna %s: Failed to read user buffer\n",
pci_name(bnad->pcidev));
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, 1);
if (rc) {
pr_warn("bna %s: Failed reg offset check\n",
pci_name(bnad->pcidev));
return -EINVAL;
}
reg_addr = (bfa_ioc_bar0(ioc)) + addr;
spin_lock_irqsave(&bnad->bna_lock, flags);
writel(val, reg_addr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
return nbytes;
}
static int
bnad_debugfs_release(struct inode *inode, struct file *file)
{
struct bnad_debug_info *debug = file->private_data;
if (!debug)
return 0;
file->private_data = NULL;
kfree(debug);
return 0;
}
static int
bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
{
struct bnad_debug_info *debug = file->private_data;
if (!debug)
return 0;
kfree(debug->debug_buffer);
file->private_data = NULL;
kfree(debug);
debug = NULL;
return 0;
}
static const struct file_operations bnad_debugfs_op_fwtrc = {
.owner = THIS_MODULE,
.open = bnad_debugfs_open_fwtrc,
.llseek = bnad_debugfs_lseek,
.read = bnad_debugfs_read,
.release = bnad_debugfs_buffer_release,
};
static const struct file_operations bnad_debugfs_op_fwsave = {
.owner = THIS_MODULE,
.open = bnad_debugfs_open_fwsave,
.llseek = bnad_debugfs_lseek,
.read = bnad_debugfs_read,
.release = bnad_debugfs_buffer_release,
};
static const struct file_operations bnad_debugfs_op_regrd = {
.owner = THIS_MODULE,
.open = bnad_debugfs_open_reg,
.llseek = bnad_debugfs_lseek,
.read = bnad_debugfs_read_regrd,
.write = bnad_debugfs_write_regrd,
.release = bnad_debugfs_release,
};
static const struct file_operations bnad_debugfs_op_regwr = {
.owner = THIS_MODULE,
.open = bnad_debugfs_open_reg,
.llseek = bnad_debugfs_lseek,
.write = bnad_debugfs_write_regwr,
.release = bnad_debugfs_release,
};
static const struct file_operations bnad_debugfs_op_drvinfo = {
.owner = THIS_MODULE,
.open = bnad_debugfs_open_drvinfo,
.llseek = bnad_debugfs_lseek,
.read = bnad_debugfs_read,
.release = bnad_debugfs_buffer_release,
};
struct bnad_debugfs_entry {
const char *name;
umode_t mode;
const struct file_operations *fops;
};
static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
{ "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
{ "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
{ "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
{ "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
{ "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
};
static struct dentry *bna_debugfs_root;
static atomic_t bna_debugfs_port_count;
/* Initialize debugfs interface for BNA */
void
bnad_debugfs_init(struct bnad *bnad)
{
const struct bnad_debugfs_entry *file;
char name[64];
int i;
/* Setup the BNA debugfs root directory*/
if (!bna_debugfs_root) {
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
if (!bna_debugfs_root) {
pr_warn("BNA: debugfs root dir creation failed\n");
return;
}
}
/* Setup the pci_dev debugfs directory for the port */
snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
if (!bnad->port_debugfs_root) {
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
if (!bnad->port_debugfs_root) {
pr_warn("bna pci_dev %s: root dir creation failed\n",
pci_name(bnad->pcidev));
return;
}
atomic_inc(&bna_debugfs_port_count);
for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
file = &bnad_debugfs_files[i];
bnad->bnad_dentry_files[i] =
debugfs_create_file(file->name,
file->mode,
bnad->port_debugfs_root,
bnad,
file->fops);
if (!bnad->bnad_dentry_files[i]) {
pr_warn(
"BNA pci_dev:%s: create %s entry failed\n",
pci_name(bnad->pcidev), file->name);
return;
}
}
}
}
/* Uninitialize debugfs interface for BNA */
void
bnad_debugfs_uninit(struct bnad *bnad)
{
int i;
for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
if (bnad->bnad_dentry_files[i]) {
debugfs_remove(bnad->bnad_dentry_files[i]);
bnad->bnad_dentry_files[i] = NULL;
}
}
/* Remove the pci_dev debugfs directory for the port */
if (bnad->port_debugfs_root) {
debugfs_remove(bnad->port_debugfs_root);
bnad->port_debugfs_root = NULL;
atomic_dec(&bna_debugfs_port_count);
}
/* Remove the BNA debugfs root directory */
if (atomic_read(&bna_debugfs_port_count) == 0) {
debugfs_remove(bna_debugfs_root);
bna_debugfs_root = NULL;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,106 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#ifndef __CNA_H__
#define __CNA_H__
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
#include <linux/if_ether.h>
#define bfa_sm_fault(__event) do { \
pr_err("SM Assertion failure: %s: %d: event = %d\n", \
__FILE__, __LINE__, __event); \
} while (0)
extern char bfa_version[];
#define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin"
#define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
/*
* bfa_q_qe_init - to initialize a queue element
*/
#define bfa_q_qe_init(_qe) { \
bfa_q_next(_qe) = (struct list_head *) NULL; \
bfa_q_prev(_qe) = (struct list_head *) NULL; \
}
/*
* bfa_q_deq - dequeue an element from head of the queue
*/
#define bfa_q_deq(_q, _qe) { \
if (!list_empty(_q)) { \
(*((struct list_head **) (_qe))) = bfa_q_next(_q); \
bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **)(_qe)) = NULL; \
} \
}
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue
*/
#define bfa_q_deq_tail(_q, _qe) { \
if (!list_empty(_q)) { \
*((struct list_head **) (_qe)) = bfa_q_prev(_q); \
bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL; \
} \
}
/*
* bfa_add_tail_head - enqueue an element at the head of queue
*/
#define bfa_q_enq_head(_q, _qe) { \
if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
pr_err("Assertion failure: %s:%d: %d", \
__FILE__, __LINE__, \
(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
bfa_q_next(_qe) = bfa_q_next(_q); \
bfa_q_prev(_qe) = (struct list_head *) (_q); \
bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
bfa_q_next(_q) = (struct list_head *) (_qe); \
}
#endif /* __CNA_H__ */

View file

@ -0,0 +1,89 @@
/*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include <linux/firmware.h>
#include "bnad.h"
#include "bfi.h"
#include "cna.h"
const struct firmware *bfi_fw;
static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna;
static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size;
static u32 *
cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name)
{
const struct firmware *fw;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
pr_alert("Can't locate firmware %s\n", fw_name);
goto error;
}
*bfi_image = (u32 *)fw->data;
*bfi_image_size = fw->size/sizeof(u32);
bfi_fw = fw;
return *bfi_image;
error:
return NULL;
}
u32 *
cna_get_firmware_buf(struct pci_dev *pdev)
{
if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
if (bfi_image_ct2_cna_size == 0)
cna_read_firmware(pdev, &bfi_image_ct2_cna,
&bfi_image_ct2_cna_size, CNA_FW_FILE_CT2);
return bfi_image_ct2_cna;
} else if (bfa_asic_id_ct(pdev->device)) {
if (bfi_image_ct_cna_size == 0)
cna_read_firmware(pdev, &bfi_image_ct_cna,
&bfi_image_ct_cna_size, CNA_FW_FILE_CT);
return bfi_image_ct_cna;
}
return NULL;
}
u32 *
bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
return (bfi_image_ct_cna + off);
case BFI_ASIC_GEN_CT2:
return (bfi_image_ct2_cna + off);
default:
return NULL;
}
}
u32
bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
return bfi_image_ct_cna_size;
case BFI_ASIC_GEN_CT2:
return bfi_image_ct2_cna_size;
default:
return 0;
}
}