Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,8 @@
config INFINIBAND_OCRDMA
tristate "Emulex One Connect HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
select NET_VENDOR_EMULEX
select BE2NET
---help---
This driver provides low-level InfiniBand over Ethernet
support for Emulex One Connect host channel adapters (HCAs).

View file

@ -0,0 +1,5 @@
ccflags-y := -Idrivers/net/ethernet/emulex/benet
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o

View file

@ -0,0 +1,543 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_H__
#define __OCRDMA_H__
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <be_roce.h>
#include "ocrdma_sli.h"
#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
#define OC_SKH_DEVICE_PF 0x720
#define OC_SKH_DEVICE_VF 0x728
#define OCRDMA_MAX_AH 512
#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
struct ocrdma_dev_attr {
u8 fw_ver[32];
u32 vendor_id;
u32 device_id;
u16 max_pd;
u16 max_cq;
u16 max_cqe;
u16 max_qp;
u16 max_wqe;
u16 max_rqe;
u16 max_srq;
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
int max_srq_sge;
int max_rdma_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
int max_mw;
int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
u16 max_ord_per_qp;
u16 max_ird_per_qp;
int device_cap_flags;
u8 cq_overflow_detect;
u8 srq_supported;
u32 wqe_size;
u32 rqe_size;
u32 ird_page_size;
u8 local_ca_ack_delay;
u8 ird;
u8 num_ird_pages;
};
struct ocrdma_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
};
struct ocrdma_pbl {
void *va;
dma_addr_t pa;
};
struct ocrdma_queue_info {
void *va;
dma_addr_t dma;
u32 size;
u16 len;
u16 entry_size; /* Size of an element in the queue */
u16 id; /* qid, where to ring the doorbell. */
u16 head, tail;
bool created;
};
struct ocrdma_eq {
struct ocrdma_queue_info q;
u32 vector;
int cq_cnt;
struct ocrdma_dev *dev;
char irq_name[32];
};
struct ocrdma_mq {
struct ocrdma_queue_info sq;
struct ocrdma_queue_info cq;
bool rearm_cq;
};
struct mqe_ctx {
struct mutex lock; /* for serializing mailbox commands on MQ */
wait_queue_head_t cmd_wait;
u32 tag;
u16 cqe_status;
u16 ext_status;
bool cmd_done;
bool fw_error_state;
};
struct ocrdma_hw_mr {
u32 lkey;
u8 fr_mr;
u8 remote_atomic;
u8 remote_rd;
u8 remote_wr;
u8 local_rd;
u8 local_wr;
u8 mw_bind;
u8 rsvd;
u64 len;
struct ocrdma_pbl *pbl_table;
u32 num_pbls;
u32 num_pbes;
u32 pbl_size;
u32 pbe_size;
u64 fbo;
u64 va;
};
struct ocrdma_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct ocrdma_hw_mr hwmr;
};
struct ocrdma_stats {
u8 type;
struct ocrdma_dev *dev;
};
struct stats_mem {
struct ocrdma_mqe mqe;
void *va;
dma_addr_t pa;
u32 size;
char *debugfs_mem;
};
struct phy_info {
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
u16 phy_type;
u16 interface_type;
};
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
struct mutex dev_lock; /* provides syncronise access to device data */
spinlock_t flush_q_lock ____cacheline_aligned;
struct ocrdma_cq **cq_tbl;
struct ocrdma_qp **qp_tbl;
struct ocrdma_eq *eq_tbl;
int eq_cnt;
u16 base_eqid;
u16 max_eq;
union ib_gid *sgid_tbl;
/* provided synchronization to sgid table for
* updating gid entries triggered by notifier.
*/
spinlock_t sgid_lock;
int gsi_qp_created;
struct ocrdma_cq *gsi_sqcq;
struct ocrdma_cq *gsi_rqcq;
struct {
struct ocrdma_av *va;
dma_addr_t pa;
u32 size;
u32 num_ah;
/* provide synchronization for av
* entry allocations.
*/
spinlock_t lock;
u32 ahid;
struct ocrdma_pbl pbl;
} av_tbl;
void *mbx_cmd;
struct ocrdma_mq mq;
struct mqe_ctx mqe_ctx;
struct be_dev_info nic_info;
struct phy_info phy;
char model_number[32];
u32 hba_port_num;
struct list_head entry;
struct rcu_head rcu;
int id;
u64 *stag_arr;
u8 sl; /* service level */
bool pfc_state;
atomic_t update_sl;
u16 pvid;
u32 asic_id;
ulong last_stats_time;
struct mutex stats_lock; /* provide synch for debugfs operations */
struct stats_mem stats_mem;
struct ocrdma_stats rsrc_stats;
struct ocrdma_stats rx_stats;
struct ocrdma_stats wqe_stats;
struct ocrdma_stats tx_stats;
struct ocrdma_stats db_err_stats;
struct ocrdma_stats tx_qp_err_stats;
struct ocrdma_stats rx_qp_err_stats;
struct ocrdma_stats tx_dbg_stats;
struct ocrdma_stats rx_dbg_stats;
struct dentry *dir;
};
struct ocrdma_cq {
struct ib_cq ibcq;
struct ocrdma_cqe *va;
u32 phase;
u32 getp; /* pointer to pending wrs to
* return to stack, wrap arounds
* at max_hw_cqe
*/
u32 max_hw_cqe;
bool phase_change;
bool deferred_arm, deferred_sol;
bool first_arm;
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
*/
/* syncronizes cq completion handler invoked from multiple context */
spinlock_t comp_handler_lock ____cacheline_aligned;
u16 id;
u16 eqn;
struct ocrdma_ucontext *ucontext;
dma_addr_t pa;
u32 len;
u32 cqe_cnt;
/* head of all qp's sq and rq for which cqes need to be flushed
* by the software.
*/
struct list_head sq_head, rq_head;
};
struct ocrdma_pd {
struct ib_pd ibpd;
struct ocrdma_ucontext *uctx;
u32 id;
int num_dpp_qp;
u32 dpp_page;
bool dpp_enabled;
};
struct ocrdma_ah {
struct ib_ah ibah;
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
};
struct ocrdma_qp_hwq_info {
u8 *va; /* virtual address */
u32 max_sges;
u32 head, tail;
u32 entry_size;
u32 max_cnt;
u32 max_wqe_idx;
u16 dbid; /* qid, where to ring the doorbell. */
u32 len;
dma_addr_t pa;
};
struct ocrdma_srq {
struct ib_srq ibsrq;
u8 __iomem *db;
struct ocrdma_qp_hwq_info rq;
u64 *rqe_wr_id_tbl;
u32 *idx_bit_fields;
u32 bit_fields_len;
/* provide synchronization to multiple context(s) posting rqe */
spinlock_t q_lock ____cacheline_aligned;
struct ocrdma_pd *pd;
u32 id;
};
struct ocrdma_qp {
struct ib_qp ibqp;
struct ocrdma_dev *dev;
u8 __iomem *sq_db;
struct ocrdma_qp_hwq_info sq;
struct {
uint64_t wrid;
uint16_t dpp_wqe_idx;
uint16_t dpp_wqe;
uint8_t signaled;
uint8_t rsvd[3];
} *wqe_wr_id_tbl;
u32 max_inline_data;
/* provide synchronization to multiple context(s) posting wqe, rqe */
spinlock_t q_lock ____cacheline_aligned;
struct ocrdma_cq *sq_cq;
/* list maintained per CQ to flush SQ errors */
struct list_head sq_entry;
u8 __iomem *rq_db;
struct ocrdma_qp_hwq_info rq;
u64 *rqe_wr_id_tbl;
struct ocrdma_cq *rq_cq;
struct ocrdma_srq *srq;
/* list maintained per CQ to flush RQ errors */
struct list_head rq_entry;
enum ocrdma_qp_state state; /* QP state */
int cap_flags;
u32 max_ord, max_ird;
u32 id;
struct ocrdma_pd *pd;
enum ib_qp_type qp_type;
int sgid_idx;
u32 qkey;
bool dpp_enabled;
u8 *ird_q_va;
bool signaled;
};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
struct list_head mm_head;
struct mutex mm_list_lock; /* protects list entries of mm type */
struct ocrdma_pd *cntxt_pd;
int pd_in_use;
struct {
u32 *va;
dma_addr_t pa;
u32 len;
} ah_tbl;
};
struct ocrdma_mm {
struct {
u64 phy_addr;
unsigned long len;
} key;
struct list_head entry;
};
static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct ocrdma_dev, ibdev);
}
static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
*ibucontext)
{
return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
}
static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct ocrdma_pd, ibpd);
}
static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct ocrdma_cq, ibcq);
}
static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct ocrdma_qp, ibqp);
}
static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct ocrdma_mr, ibmr);
}
static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
{
return container_of(ibah, struct ocrdma_ah, ibah);
}
static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct ocrdma_srq, ibsrq);
}
static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
{
int cqe_valid;
cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
return (cqe_valid == cq->phase);
}
static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
{
return (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_QTYPE) ? 0 : 1;
}
static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
{
return (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_INVALIDATE) ? 1 : 0;
}
static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
{
return (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_IMM) ? 1 : 0;
}
static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
{
return (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
}
static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
struct ib_ah_attr *ah_attr, u8 *mac_addr)
{
struct in6_addr in6;
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
if (rdma_is_multicast_addr(&in6))
rdma_get_mcast_mac(&in6, mac_addr);
else
memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
return 0;
}
static inline char *hca_name(struct ocrdma_dev *dev)
{
switch (dev->nic_info.pdev->device) {
case OC_SKH_DEVICE_PF:
case OC_SKH_DEVICE_VF:
return OC_NAME_SH;
default:
return OC_NAME_UNKNOWN;
}
}
static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
int eqid)
{
int indx;
for (indx = 0; indx < dev->eq_cnt; indx++) {
if (dev->eq_tbl[indx].q.id == eqid)
return indx;
}
return -EINVAL;
}
static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
{
if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
pci_read_config_dword(
dev->nic_info.pdev,
OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
}
return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
}
static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
{
return *(pfc + prio);
}
static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
{
return *(app_prio + prio);
}
static inline u8 ocrdma_is_enabled_and_synced(u32 state)
{ /* May also be used to interpret TC-state, QCN-state
* Appl-state and Logical-link-state in future.
*/
return (state & OCRDMA_STATE_FLAG_ENABLED) &&
(state & OCRDMA_STATE_FLAG_SYNC);
}
#endif

View file

@ -0,0 +1,134 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
#define OCRDMA_ABI_VERSION 2
#define OCRDMA_BE_ROCE_ABI_VERSION 1
/* user kernel communication data structures. */
struct ocrdma_alloc_ucontext_resp {
u32 dev_id;
u32 wqe_size;
u32 max_inline_data;
u32 dpp_wqe_size;
u64 ah_tbl_page;
u32 ah_tbl_len;
u32 rqe_size;
u8 fw_ver[32];
/* for future use/new features in progress */
u64 rsvd1;
u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
u64 rsvd1;
};
struct ocrdma_alloc_pd_uresp {
u32 id;
u32 dpp_enabled;
u32 dpp_page_addr_hi;
u32 dpp_page_addr_lo;
u64 rsvd1;
};
struct ocrdma_create_cq_ureq {
u32 dpp_cq;
u32 rsvd; /* pad */
};
#define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp {
u32 cq_id;
u32 page_size;
u32 num_pages;
u32 max_hw_cqe;
u64 page_addr[MAX_CQ_PAGES];
u64 db_page_addr;
u32 db_page_size;
u32 phase_change;
/* for future use/new features in progress */
u64 rsvd1;
u64 rsvd2;
};
#define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8
struct ocrdma_create_qp_ureq {
u8 enable_dpp_cq;
u8 rsvd;
u16 dpp_cq_id;
u32 rsvd1; /* pad */
};
struct ocrdma_create_qp_uresp {
u16 qp_id;
u16 sq_dbid;
u16 rq_dbid;
u16 resv0; /* pad */
u32 sq_page_size;
u32 rq_page_size;
u32 num_sq_pages;
u32 num_rq_pages;
u64 sq_page_addr[MAX_QP_PAGES];
u64 rq_page_addr[MAX_QP_PAGES];
u64 db_page_addr;
u32 db_page_size;
u32 dpp_credit;
u32 dpp_offset;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
u64 rsvd[11];
} __packed;
struct ocrdma_create_srq_uresp {
u16 rq_dbid;
u16 resv0; /* pad */
u32 resv1;
u32 rq_page_size;
u32 num_rq_pages;
u64 rq_page_addr[MAX_QP_PAGES];
u64 db_page_addr;
u32 db_page_size;
u32 num_rqe_allocated;
u32 db_rq_offset;
u32 db_shift;
u64 rsvd2;
u64 rsvd3;
};
#endif /* __OCRDMA_ABI_H__ */

View file

@ -0,0 +1,198 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#include <net/neighbour.h>
#include <net/netevent.h>
#include <rdma/ib_addr.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
#define OCRDMA_VID_PCP_SHIFT 0xD
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
/* VLAN */
vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true;
} else {
eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
/* MAC */
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
ah->sgid_index = attr->grh.sgid_index;
memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
grh.tclass_flow = cpu_to_be32((6 << 28) |
(attr->grh.traffic_class << 24) |
attr->grh.flow_label);
/* 0x1b is next header value in GRH */
grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
(0x1b << 8) | attr->grh.hop_limit);
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
}
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
{
u32 *ahid_addr;
int status;
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
union ib_gid sgid;
u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
if (status) {
pr_err("%s(): Failed to query sgid, status = %d\n",
__func__, status);
goto av_conf_err;
}
memset(&zmac, 0, ETH_ALEN);
if (pd->uctx &&
memcmp(attr->dmac, &zmac, ETH_ALEN)) {
status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
attr->dmac, &attr->vlan_id);
if (status) {
pr_err("%s(): Failed to resolve dmac from gid."
"status = %d\n", __func__, status);
goto av_conf_err;
}
}
status = set_av_attr(dev, ah, attr, &sgid, pd->id);
if (status)
goto av_conf_err;
/* if pd is for the user process, pass the ah_id to user space */
if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
*ahid_addr = ah->id;
}
return &ah->ibah;
av_conf_err:
ocrdma_free_av(dev, ah);
av_err:
kfree(ah);
return ERR_PTR(status);
}
int ocrdma_destroy_ah(struct ib_ah *ibah)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
ocrdma_free_av(dev, ah);
kfree(ah);
return 0;
}
int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_av *av = ah->av;
struct ocrdma_grh *grh;
attr->ah_flags |= IB_AH_GRH;
if (ah->av->valid & OCRDMA_AV_VALID) {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_vlan));
attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
} else {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_basic));
attr->sl = 0;
}
memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
attr->grh.sgid_index = ah->sgid_index;
attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
return 0;
}
int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
{
/* modify_ah is unsupported */
return -ENOSYS;
}
int ocrdma_process_mad(struct ib_device *ibdev,
int process_mad_flags,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
return IB_MAD_RESULT_SUCCESS;
}

View file

@ -0,0 +1,42 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_AH_H__
#define __OCRDMA_AH_H__
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
int ocrdma_process_mad(struct ib_device *,
int process_mad_flags,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad);
#endif /* __OCRDMA_AH_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,140 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) CNA Adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_HW_H__
#define __OCRDMA_HW_H__
#include "ocrdma_sli.h"
static inline void ocrdma_cpu_to_le32(void *dst, u32 len)
{
#ifdef __BIG_ENDIAN
int i = 0;
u32 *src_ptr = dst;
u32 *dst_ptr = dst;
for (; i < (len / 4); i++)
*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
#endif
}
static inline void ocrdma_le32_to_cpu(void *dst, u32 len)
{
#ifdef __BIG_ENDIAN
int i = 0;
u32 *src_ptr = dst;
u32 *dst_ptr = dst;
for (; i < (len / sizeof(u32)); i++)
*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
#endif
}
static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len)
{
#ifdef __BIG_ENDIAN
int i = 0;
u32 *src_ptr = src;
u32 *dst_ptr = dst;
for (; i < (len / sizeof(u32)); i++)
*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
#else
memcpy(dst, src, len);
#endif
}
static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
{
#ifdef __BIG_ENDIAN
int i = 0;
u32 *src_ptr = src;
u32 *dst_ptr = dst;
for (; i < len / sizeof(u32); i++)
*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
#else
memcpy(dst, src, len);
#endif
}
static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
{
return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
}
int ocrdma_init_hw(struct ocrdma_dev *);
void ocrdma_cleanup_hw(struct ocrdma_dev *);
enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);
void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int addr_check);
int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
int entries, int dpp_cq, u16 pd_id);
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
u16 *dpp_credit_lmt);
int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ib_qp_attr *attrs, int attr_mask);
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ib_srq_init_attr *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
void ocrdma_flush_qp(struct ocrdma_qp *);
int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
#endif /* __OCRDMA_HW_H__ */

View file

@ -0,0 +1,676 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#include <linux/module.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <linux/netdevice.h>
#include <net/addrconf.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "be_roce.h"
#include "ocrdma_hw.h"
#include "ocrdma_stats.h"
#include "ocrdma_abi.h"
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static LIST_HEAD(ocrdma_dev_list);
static DEFINE_SPINLOCK(ocrdma_devlist_lock);
static DEFINE_IDR(ocrdma_dev_id);
static union ib_gid ocrdma_zero_sgid;
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
guid[0] = mac_addr[0] ^ 2;
guid[1] = mac_addr[1];
guid[2] = mac_addr[2];
guid[3] = 0xff;
guid[4] = 0xfe;
guid[5] = mac_addr[3];
guid[6] = mac_addr[4];
guid[7] = mac_addr[5];
}
static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
{
int i;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
spin_lock_irqsave(&dev->sgid_lock, flags);
for (i = 0; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
memcpy(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return true;
} else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return false;
}
}
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return false;
}
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
{
int found = false;
int i;
unsigned long flags;
spin_lock_irqsave(&dev->sgid_lock, flags);
/* first is default sgid, which cannot be deleted. */
for (i = 1; i < OCRDMA_MAX_SGID; i++) {
if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
/* found matching entry */
memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
found = true;
break;
}
}
spin_unlock_irqrestore(&dev->sgid_lock, flags);
return found;
}
static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
union ib_gid *gid)
{
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
bool updated = false;
bool is_vlan = false;
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan)
netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
if (dev->nic_info.netdev == netdev) {
found = true;
break;
}
}
rcu_read_unlock();
if (!found)
return NOTIFY_DONE;
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
updated = ocrdma_add_sgid(dev, gid);
break;
case NETDEV_DOWN:
updated = ocrdma_del_sgid(dev, gid);
break;
default:
break;
}
if (updated) {
/* GID table updated, notify the consumers about it */
gid_event.device = &dev->ibdev;
gid_event.element.port_num = 1;
gid_event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&gid_event);
}
mutex_unlock(&dev->dev_lock);
return NOTIFY_OK;
}
static int ocrdma_inetaddr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = ptr;
union ib_gid gid;
struct net_device *netdev = ifa->ifa_dev->dev;
ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
return ocrdma_addr_event(event, netdev, &gid);
}
static struct notifier_block ocrdma_inetaddr_notifier = {
.notifier_call = ocrdma_inetaddr_event
};
#if IS_ENABLED(CONFIG_IPV6)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
union ib_gid *gid = (union ib_gid *)&ifa->addr;
struct net_device *netdev = ifa->idev->dev;
return ocrdma_addr_event(event, netdev, gid);
}
static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
#endif /* IPV6 and VLAN */
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask =
OCRDMA_UVERBS(GET_CONTEXT) |
OCRDMA_UVERBS(QUERY_DEVICE) |
OCRDMA_UVERBS(QUERY_PORT) |
OCRDMA_UVERBS(ALLOC_PD) |
OCRDMA_UVERBS(DEALLOC_PD) |
OCRDMA_UVERBS(REG_MR) |
OCRDMA_UVERBS(DEREG_MR) |
OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
OCRDMA_UVERBS(CREATE_CQ) |
OCRDMA_UVERBS(RESIZE_CQ) |
OCRDMA_UVERBS(DESTROY_CQ) |
OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
OCRDMA_UVERBS(CREATE_QP) |
OCRDMA_UVERBS(MODIFY_QP) |
OCRDMA_UVERBS(QUERY_QP) |
OCRDMA_UVERBS(DESTROY_QP) |
OCRDMA_UVERBS(POLL_CQ) |
OCRDMA_UVERBS(POST_SEND) |
OCRDMA_UVERBS(POST_RECV);
dev->ibdev.uverbs_cmd_mask |=
OCRDMA_UVERBS(CREATE_AH) |
OCRDMA_UVERBS(MODIFY_AH) |
OCRDMA_UVERBS(QUERY_AH) |
OCRDMA_UVERBS(DESTROY_AH);
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
/* mandatory verbs. */
dev->ibdev.query_device = ocrdma_query_device;
dev->ibdev.query_port = ocrdma_query_port;
dev->ibdev.modify_port = ocrdma_modify_port;
dev->ibdev.query_gid = ocrdma_query_gid;
dev->ibdev.get_link_layer = ocrdma_link_layer;
dev->ibdev.alloc_pd = ocrdma_alloc_pd;
dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
dev->ibdev.create_cq = ocrdma_create_cq;
dev->ibdev.destroy_cq = ocrdma_destroy_cq;
dev->ibdev.resize_cq = ocrdma_resize_cq;
dev->ibdev.create_qp = ocrdma_create_qp;
dev->ibdev.modify_qp = ocrdma_modify_qp;
dev->ibdev.query_qp = ocrdma_query_qp;
dev->ibdev.destroy_qp = ocrdma_destroy_qp;
dev->ibdev.query_pkey = ocrdma_query_pkey;
dev->ibdev.create_ah = ocrdma_create_ah;
dev->ibdev.destroy_ah = ocrdma_destroy_ah;
dev->ibdev.query_ah = ocrdma_query_ah;
dev->ibdev.modify_ah = ocrdma_modify_ah;
dev->ibdev.poll_cq = ocrdma_poll_cq;
dev->ibdev.post_send = ocrdma_post_send;
dev->ibdev.post_recv = ocrdma_post_recv;
dev->ibdev.req_notify_cq = ocrdma_arm_cq;
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
/* mandatory to support user space verbs consumer. */
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
dev->ibdev.mmap = ocrdma_mmap;
dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
dev->ibdev.process_mad = ocrdma_process_mad;
if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
dev->ibdev.uverbs_cmd_mask |=
OCRDMA_UVERBS(CREATE_SRQ) |
OCRDMA_UVERBS(MODIFY_SRQ) |
OCRDMA_UVERBS(QUERY_SRQ) |
OCRDMA_UVERBS(DESTROY_SRQ) |
OCRDMA_UVERBS(POST_SRQ_RECV);
dev->ibdev.create_srq = ocrdma_create_srq;
dev->ibdev.modify_srq = ocrdma_modify_srq;
dev->ibdev.query_srq = ocrdma_query_srq;
dev->ibdev.destroy_srq = ocrdma_destroy_srq;
dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
}
return ib_register_device(&dev->ibdev, NULL);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
{
mutex_init(&dev->dev_lock);
dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
OCRDMA_MAX_SGID, GFP_KERNEL);
if (!dev->sgid_tbl)
goto alloc_err;
spin_lock_init(&dev->sgid_lock);
dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
OCRDMA_MAX_CQ, GFP_KERNEL);
if (!dev->cq_tbl)
goto alloc_err;
if (dev->attr.max_qp) {
dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
OCRDMA_MAX_QP, GFP_KERNEL);
if (!dev->qp_tbl)
goto alloc_err;
}
dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
if (dev->stag_arr == NULL)
goto alloc_err;
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
alloc_err:
pr_err("%s(%d) error.\n", __func__, dev->id);
return -ENOMEM;
}
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
kfree(dev->sgid_tbl);
}
/* OCRDMA sysfs interface */
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
}
static ssize_t show_hca_type(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_fw_ver,
&dev_attr_hca_type
};
static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
}
static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
{
/* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
union ib_gid *sgid = &dev->sgid_tbl[0];
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
ocrdma_get_guid(dev, &sgid->raw[8]);
}
static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
struct in_device *in_dev;
union ib_gid gid;
in_dev = in_dev_get(net);
if (in_dev) {
for_ifa(in_dev) {
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
ocrdma_add_sgid(dev, &gid);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
}
}
static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
in6_dev = in6_dev_get(net);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
ocrdma_add_sgid(dev, pgid);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
}
#endif
}
static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
{
struct net_device *net_dev;
for_each_netdev(&init_net, net_dev) {
struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
rdma_vlan_dev_real_dev(net_dev) : net_dev;
if (real_dev == dev->nic_info.netdev) {
ocrdma_add_default_sgid(dev);
ocrdma_init_ipv4_gids(dev, net_dev);
ocrdma_init_ipv6_gids(dev, net_dev);
}
}
}
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
if (!dev) {
pr_err("Unable to allocate ib device\n");
return NULL;
}
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
if (!dev->mbx_cmd)
goto idr_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
if (dev->id < 0)
goto idr_err;
status = ocrdma_init_hw(dev);
if (status)
goto init_err;
status = ocrdma_alloc_resources(dev);
if (status)
goto alloc_err;
ocrdma_init_service_level(dev);
ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
goto sysfs_err;
spin_lock(&ocrdma_devlist_lock);
list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
spin_unlock(&ocrdma_devlist_lock);
/* Init stats */
ocrdma_add_port_stats(dev);
pr_info("%s %s: %s \"%s\" port %d\n",
dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
port_speed_string(dev), dev->model_number,
dev->hba_port_num);
pr_info("%s ocrdma%d driver loaded successfully\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
sysfs_err:
ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
init_err:
idr_remove(&ocrdma_dev_id, dev->id);
idr_err:
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
pr_err("%s() leaving. ret=%d\n", __func__, status);
return NULL;
}
static void ocrdma_remove_free(struct rcu_head *rcu)
{
struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
idr_remove(&ocrdma_dev_id, dev->id);
kfree(dev->mbx_cmd);
ib_dealloc_device(&dev->ibdev);
}
static void ocrdma_remove(struct ocrdma_dev *dev)
{
/* first unregister with stack to stop all the active traffic
* of the registered clients.
*/
ocrdma_rem_port_stats(dev);
ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
spin_lock(&ocrdma_devlist_lock);
list_del_rcu(&dev->entry);
spin_unlock(&ocrdma_devlist_lock);
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
call_rcu(&dev->rcu, ocrdma_remove_free);
}
static int ocrdma_open(struct ocrdma_dev *dev)
{
struct ib_event port_event;
port_event.event = IB_EVENT_PORT_ACTIVE;
port_event.element.port_num = 1;
port_event.device = &dev->ibdev;
ib_dispatch_event(&port_event);
return 0;
}
static int ocrdma_close(struct ocrdma_dev *dev)
{
int i;
struct ocrdma_qp *qp, **cur_qp;
struct ib_event err_event;
struct ib_qp_attr attrs;
int attr_mask = IB_QP_STATE;
attrs.qp_state = IB_QPS_ERR;
mutex_lock(&dev->dev_lock);
if (dev->qp_tbl) {
cur_qp = dev->qp_tbl;
for (i = 0; i < OCRDMA_MAX_QP; i++) {
qp = cur_qp[i];
if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
/* change the QP state to ERROR */
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
err_event.event = IB_EVENT_QP_FATAL;
err_event.element.qp = &qp->ibqp;
err_event.device = &dev->ibdev;
ib_dispatch_event(&err_event);
}
}
}
mutex_unlock(&dev->dev_lock);
err_event.event = IB_EVENT_PORT_ERR;
err_event.element.port_num = 1;
err_event.device = &dev->ibdev;
ib_dispatch_event(&err_event);
return 0;
}
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
ocrdma_close(dev);
ocrdma_remove(dev);
}
/* event handling via NIC driver ensures that all the NIC specific
* initialization done before RoCE driver notifies
* event to stack.
*/
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
{
switch (event) {
case BE_DEV_UP:
ocrdma_open(dev);
break;
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
}
}
static struct ocrdma_driver ocrdma_drv = {
.name = "ocrdma_driver",
.add = ocrdma_add,
.remove = ocrdma_remove,
.state_change_handler = ocrdma_event_handler,
.be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
};
static void ocrdma_unregister_inet6addr_notifier(void)
{
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
#endif
}
static void ocrdma_unregister_inetaddr_notifier(void)
{
unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier);
}
static int __init ocrdma_init_module(void)
{
int status;
ocrdma_init_debugfs();
status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
if (status)
return status;
#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
goto err_notifier6;
#endif
status = be_roce_register_driver(&ocrdma_drv);
if (status)
goto err_be_reg;
return 0;
err_be_reg:
#if IS_ENABLED(CONFIG_IPV6)
ocrdma_unregister_inet6addr_notifier();
err_notifier6:
#endif
ocrdma_unregister_inetaddr_notifier();
return status;
}
static void __exit ocrdma_exit_module(void)
{
be_roce_unregister_driver(&ocrdma_drv);
ocrdma_unregister_inet6addr_notifier();
ocrdma_unregister_inetaddr_notifier();
ocrdma_rem_debugfs();
}
module_init(ocrdma_init_module);
module_exit(ocrdma_exit_module);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,616 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#include <rdma/ib_addr.h>
#include "ocrdma_stats.h"
static struct dentry *ocrdma_dbgfs_dir;
static int ocrdma_add_stat(char *start, char *pcur,
char *name, u64 count)
{
char buff[128] = {0};
int cpy_len = 0;
snprintf(buff, 128, "%s: %llu\n", name, count);
cpy_len = strlen(buff);
if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
pr_err("%s: No space in stats buff\n", __func__);
return 0;
}
memcpy(pcur, buff, cpy_len);
return cpy_len;
}
static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
/* Alloc mbox command mem*/
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
&mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
memset(mem->va, 0, mem->size);
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem) {
pr_err("%s: stats debugfs mem allocation failed\n", __func__);
return false;
}
return true;
}
static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
if (mem->va)
dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
mem->va, mem->pa);
kfree(mem->debugfs_mem);
}
static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
(u64)rsrc_stats->dpp_pds);
pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
(u64)rsrc_stats->non_dpp_pds);
pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
(u64)rsrc_stats->rc_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
(u64)rsrc_stats->uc_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
(u64)rsrc_stats->ud_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
(u64)rsrc_stats->rc_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
(u64)rsrc_stats->uc_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
(u64)rsrc_stats->ud_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
(u64)rsrc_stats->srqs);
pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
(u64)rsrc_stats->rbqs);
pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
(u64)rsrc_stats->r64K_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
(u64)rsrc_stats->r64K_to_2M_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
(u64)rsrc_stats->r2M_to_44M_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
(u64)rsrc_stats->r44M_to_1G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
(u64)rsrc_stats->r1G_to_4G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
(u64)rsrc_stats->nsmr_count_4G_to_32G);
pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
(u64)rsrc_stats->r32G_to_64G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
(u64)rsrc_stats->r64G_to_128G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
(u64)rsrc_stats->r128G_to_higher_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
(u64)rsrc_stats->embedded_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
(u64)rsrc_stats->frmr);
pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
(u64)rsrc_stats->prefetch_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
(u64)rsrc_stats->ondemand_qps);
pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
(u64)rsrc_stats->phy_mr);
pcur += ocrdma_add_stat(stats, pcur, "active_mw",
(u64)rsrc_stats->mw);
/* Print the threshold stats */
rsrc_stats = &rdma_stats->th_rsrc_stats;
pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
(u64)rsrc_stats->dpp_pds);
pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
(u64)rsrc_stats->non_dpp_pds);
pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
(u64)rsrc_stats->rc_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
(u64)rsrc_stats->uc_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
(u64)rsrc_stats->ud_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
(u64)rsrc_stats->rc_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
(u64)rsrc_stats->uc_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
(u64)rsrc_stats->ud_non_dpp_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
(u64)rsrc_stats->srqs);
pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
(u64)rsrc_stats->rbqs);
pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
(u64)rsrc_stats->r64K_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
(u64)rsrc_stats->r64K_to_2M_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
(u64)rsrc_stats->r2M_to_44M_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
(u64)rsrc_stats->r44M_to_1G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
(u64)rsrc_stats->r1G_to_4G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
(u64)rsrc_stats->nsmr_count_4G_to_32G);
pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
(u64)rsrc_stats->r32G_to_64G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
(u64)rsrc_stats->r64G_to_128G_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
(u64)rsrc_stats->r128G_to_higher_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
(u64)rsrc_stats->embedded_nsmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
(u64)rsrc_stats->frmr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
(u64)rsrc_stats->prefetch_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
(u64)rsrc_stats->ondemand_qps);
pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
(u64)rsrc_stats->phy_mr);
pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
(u64)rsrc_stats->mw);
return stats;
}
static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat
(stats, pcur, "roce_frame_bytes",
convert_to_64bit(rx_stats->roce_frame_bytes_lo,
rx_stats->roce_frame_bytes_hi));
pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
(u64)rx_stats->roce_frame_icrc_drops);
pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
(u64)rx_stats->roce_frame_payload_len_drops);
pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
(u64)rx_stats->ud_drops);
pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
(u64)rx_stats->qp1_drops);
pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
(u64)rx_stats->psn_error_request_packets);
pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
(u64)rx_stats->psn_error_resp_packets);
pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
(u64)rx_stats->rnr_nak_timeouts);
pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
(u64)rx_stats->rnr_nak_receives);
pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
(u64)rx_stats->roce_frame_rxmt_drops);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
(u64)rx_stats->nak_count_psn_sequence_errors);
pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
(u64)rx_stats->rc_drop_count_lookup_errors);
pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
(u64)rx_stats->rq_rnr_naks);
pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
(u64)rx_stats->srq_rnr_naks);
pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
convert_to_64bit(rx_stats->roce_frames_lo,
rx_stats->roce_frames_hi));
return stats;
}
static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
convert_to_64bit(tx_stats->send_pkts_lo,
tx_stats->send_pkts_hi));
pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
convert_to_64bit(tx_stats->write_pkts_lo,
tx_stats->write_pkts_hi));
pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
convert_to_64bit(tx_stats->read_pkts_lo,
tx_stats->read_pkts_hi));
pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
convert_to_64bit(tx_stats->read_rsp_pkts_lo,
tx_stats->read_rsp_pkts_hi));
pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
convert_to_64bit(tx_stats->ack_pkts_lo,
tx_stats->ack_pkts_hi));
pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
convert_to_64bit(tx_stats->send_bytes_lo,
tx_stats->send_bytes_hi));
pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
convert_to_64bit(tx_stats->write_bytes_lo,
tx_stats->write_bytes_hi));
pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
convert_to_64bit(tx_stats->read_req_bytes_lo,
tx_stats->read_req_bytes_hi));
pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
convert_to_64bit(tx_stats->read_rsp_bytes_lo,
tx_stats->read_rsp_bytes_hi));
pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
(u64)tx_stats->ack_timeouts);
return stats;
}
static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
wqe_stats->large_send_rc_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
wqe_stats->large_write_rc_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
convert_to_64bit(wqe_stats->read_wqes_lo,
wqe_stats->read_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
convert_to_64bit(wqe_stats->frmr_wqes_lo,
wqe_stats->frmr_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
wqe_stats->mw_bind_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
convert_to_64bit(wqe_stats->invalidate_wqes_lo,
wqe_stats->invalidate_wqes_hi));
pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
(u64)wqe_stats->dpp_wqe_drops);
return stats;
}
static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
(u64)db_err_stats->sq_doorbell_errors);
pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
(u64)db_err_stats->cq_doorbell_errors);
pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
(u64)db_err_stats->rq_srq_doorbell_errors);
pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
(u64)db_err_stats->cq_overflow_errors);
return stats;
}
static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
&rdma_stats->rx_qp_err_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
(u64)rx_qp_err_stats->nak_invalid_requst_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
(u64)rx_qp_err_stats->nak_count_remote_access_errors);
pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
(u64)rx_qp_err_stats->local_length_errors);
pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
(u64)rx_qp_err_stats->local_protection_errors);
pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
(u64)rx_qp_err_stats->local_qp_operation_errors);
return stats;
}
static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
&rdma_stats->tx_qp_err_stats;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
(u64)tx_qp_err_stats->local_length_errors);
pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
(u64)tx_qp_err_stats->local_protection_errors);
pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
(u64)tx_qp_err_stats->local_qp_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
(u64)tx_qp_err_stats->retry_count_exceeded_errors);
pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
(u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
return stats;
}
static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
{
int i;
char *pstats = dev->stats_mem.debugfs_mem;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_tx_dbg_stats *tx_dbg_stats =
&rdma_stats->tx_dbg_stats;
memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
for (i = 0; i < 100; i++)
pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
tx_dbg_stats->data[i]);
return dev->stats_mem.debugfs_mem;
}
static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
{
int i;
char *pstats = dev->stats_mem.debugfs_mem;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rx_dbg_stats *rx_dbg_stats =
&rdma_stats->rx_dbg_stats;
memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
for (i = 0; i < 200; i++)
pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
rx_dbg_stats->data[i]);
return dev->stats_mem.debugfs_mem;
}
static void ocrdma_update_stats(struct ocrdma_dev *dev)
{
ulong now = jiffies, secs;
int status = 0;
secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
if (secs) {
/* update */
status = ocrdma_mbx_rdma_stats(dev, false);
if (status)
pr_err("%s: stats mbox failed with status = %d\n",
__func__, status);
dev->last_stats_time = jiffies;
}
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
size_t usr_buf_len, loff_t *ppos)
{
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
ssize_t status = 0;
char *data = NULL;
/* No partial reads */
if (*ppos != 0)
return 0;
mutex_lock(&dev->stats_lock);
ocrdma_update_stats(dev);
switch (pstats->type) {
case OCRDMA_RSRC_STATS:
data = ocrdma_resource_stats(dev);
break;
case OCRDMA_RXSTATS:
data = ocrdma_rx_stats(dev);
break;
case OCRDMA_WQESTATS:
data = ocrdma_wqe_stats(dev);
break;
case OCRDMA_TXSTATS:
data = ocrdma_tx_stats(dev);
break;
case OCRDMA_DB_ERRSTATS:
data = ocrdma_db_errstats(dev);
break;
case OCRDMA_RXQP_ERRSTATS:
data = ocrdma_rxqp_errstats(dev);
break;
case OCRDMA_TXQP_ERRSTATS:
data = ocrdma_txqp_errstats(dev);
break;
case OCRDMA_TX_DBG_STATS:
data = ocrdma_tx_dbg_stats(dev);
break;
case OCRDMA_RX_DBG_STATS:
data = ocrdma_rx_dbg_stats(dev);
break;
default:
status = -EFAULT;
goto exit;
}
if (usr_buf_len < strlen(data)) {
status = -ENOSPC;
goto exit;
}
status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
strlen(data));
exit:
mutex_unlock(&dev->stats_lock);
return status;
}
static const struct file_operations ocrdma_dbg_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = ocrdma_dbgfs_ops_read,
};
void ocrdma_add_port_stats(struct ocrdma_dev *dev)
{
if (!ocrdma_dbgfs_dir)
return;
/* Create post stats base dir */
dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
if (!dev->dir)
goto err;
dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
dev->rsrc_stats.dev = dev;
if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
&dev->rsrc_stats, &ocrdma_dbg_ops))
goto err;
dev->rx_stats.type = OCRDMA_RXSTATS;
dev->rx_stats.dev = dev;
if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
&dev->rx_stats, &ocrdma_dbg_ops))
goto err;
dev->wqe_stats.type = OCRDMA_WQESTATS;
dev->wqe_stats.dev = dev;
if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
&dev->wqe_stats, &ocrdma_dbg_ops))
goto err;
dev->tx_stats.type = OCRDMA_TXSTATS;
dev->tx_stats.dev = dev;
if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
&dev->tx_stats, &ocrdma_dbg_ops))
goto err;
dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
dev->db_err_stats.dev = dev;
if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
&dev->db_err_stats, &ocrdma_dbg_ops))
goto err;
dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
dev->tx_qp_err_stats.dev = dev;
if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
&dev->tx_qp_err_stats, &ocrdma_dbg_ops))
goto err;
dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
dev->rx_qp_err_stats.dev = dev;
if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
&dev->rx_qp_err_stats, &ocrdma_dbg_ops))
goto err;
dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
dev->tx_dbg_stats.dev = dev;
if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
&dev->tx_dbg_stats, &ocrdma_dbg_ops))
goto err;
dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
dev->rx_dbg_stats.dev = dev;
if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
&dev->rx_dbg_stats, &ocrdma_dbg_ops))
goto err;
/* Now create dma_mem for stats mbx command */
if (!ocrdma_alloc_stats_mem(dev))
goto err;
mutex_init(&dev->stats_lock);
return;
err:
ocrdma_release_stats_mem(dev);
debugfs_remove_recursive(dev->dir);
dev->dir = NULL;
}
void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
{
if (!dev->dir)
return;
mutex_destroy(&dev->stats_lock);
ocrdma_release_stats_mem(dev);
debugfs_remove(dev->dir);
}
void ocrdma_init_debugfs(void)
{
/* Create base dir in debugfs root dir */
ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
}
void ocrdma_rem_debugfs(void)
{
debugfs_remove_recursive(ocrdma_dbgfs_dir);
}

View file

@ -0,0 +1,54 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_STATS_H__
#define __OCRDMA_STATS_H__
#include <linux/debugfs.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
#define OCRDMA_MAX_DBGFS_MEM 4096
enum OCRDMA_STATS_TYPE {
OCRDMA_RSRC_STATS,
OCRDMA_RXSTATS,
OCRDMA_WQESTATS,
OCRDMA_TXSTATS,
OCRDMA_DB_ERRSTATS,
OCRDMA_RXQP_ERRSTATS,
OCRDMA_TXQP_ERRSTATS,
OCRDMA_TX_DBG_STATS,
OCRDMA_RX_DBG_STATS
};
void ocrdma_rem_debugfs(void);
void ocrdma_init_debugfs(void);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
#endif /* __OCRDMA_STATS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,99 @@
/*******************************************************************
* This file is part of the Emulex RoCE Device Driver for *
* RoCE (RDMA over Converged Ethernet) adapters. *
* Copyright (C) 2008-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*
* Contact Information:
* linux-drivers@emulex.com
*
* Emulex
* 3333 Susan Street
* Costa Mesa, CA 92626
*******************************************************************/
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
struct ib_send_wr **bad_wr);
int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
struct ib_recv_wr **bad_wr);
int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
struct ib_port_modify *props);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
int ocrdma_query_gid(struct ib_device *, u8 port,
int index, union ib_gid *gid);
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
struct ib_udata *);
int ocrdma_dealloc_ucontext(struct ib_ucontext *);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
struct ib_ucontext *, struct ib_udata *);
int ocrdma_dealloc_pd(struct ib_pd *pd);
struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,
struct ib_ucontext *, struct ib_udata *);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *);
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs,
struct ib_udata *);
int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask);
int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *);
void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
struct ib_udata *);
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
int ocrdma_destroy_srq(struct ib_srq *);
int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
struct ib_recv_wr **bad_recv_wr);
int ocrdma_dereg_mr(struct ib_mr *);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
*ibdev,
int page_list_len);
void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* __OCRDMA_VERBS_H__ */