Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,18 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T4 and T5
1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called iw_cxgb4.

View file

@ -0,0 +1,5 @@
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,237 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/mman.h>
#include <net/sock.h>
#include "iw_cxgb4.h"
static void print_tpte(struct c4iw_dev *dev, u32 stag)
{
int ret;
struct fw_ri_tpte tpte;
ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
(__be32 *)&tpte);
if (ret) {
dev_err(&dev->rdev.lldi.pdev->dev,
"%s cxgb4_read_tpte err %d\n", __func__, ret);
return;
}
PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
stag & 0xffffff00,
G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}
static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
{
__be64 *p = (void *)err_cqe;
dev_err(&dev->rdev.lldi.pdev->dev,
"AE qpid %d opcode %d status 0x%x "
"type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
PDBG("%016llx %016llx %016llx %016llx\n",
be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
be64_to_cpu(p[3]));
/*
* Ingress WRITE and READ_RESP errors provide
* the offending stag, so parse and log it.
*/
if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
print_tpte(dev, CQE_WRID_STAG(err_cqe));
}
static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
struct c4iw_qp *qhp,
struct t4_cqe *err_cqe,
enum ib_event_type ib_event)
{
struct ib_event event;
struct c4iw_qp_attributes attrs;
unsigned long flag;
dump_err_cqe(dev, err_cqe);
if (qhp->attr.state == C4IW_QP_STATE_RTS) {
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 0);
}
event.event = ib_event;
event.device = chp->ibcq.device;
if (ib_event == IB_EVENT_CQ_ERR)
event.element.cq = &chp->ibcq;
else
event.element.qp = &qhp->ibqp;
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
{
struct c4iw_cq *chp;
struct c4iw_qp *qhp;
u32 cqid;
spin_lock_irq(&dev->lock);
qhp = get_qhp(dev, CQE_QPID(err_cqe));
if (!qhp) {
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
spin_unlock_irq(&dev->lock);
goto out;
}
if (SQ_TYPE(err_cqe))
cqid = qhp->attr.scq;
else
cqid = qhp->attr.rcq;
chp = get_chp(dev, cqid);
if (!chp) {
printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
spin_unlock_irq(&dev->lock);
goto out;
}
c4iw_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
spin_unlock_irq(&dev->lock);
/* Bad incoming write */
if (RQ_TYPE(err_cqe) &&
(CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
goto done;
}
switch (CQE_STATUS(err_cqe)) {
/* Completion Events */
case T4_ERR_SUCCESS:
printk(KERN_ERR MOD "AE with status 0!\n");
break;
case T4_ERR_STAG:
case T4_ERR_PDID:
case T4_ERR_QPID:
case T4_ERR_ACCESS:
case T4_ERR_WRAP:
case T4_ERR_BOUND:
case T4_ERR_INVALIDATE_SHARED_MR:
case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
break;
/* Device Fatal Errors */
case T4_ERR_ECC:
case T4_ERR_ECC_PSTAG:
case T4_ERR_INTERNAL_ERR:
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
break;
/* QP Fatal Errors */
case T4_ERR_OUT_OF_RQE:
case T4_ERR_PBL_ADDR_BOUND:
case T4_ERR_CRC:
case T4_ERR_MARKER:
case T4_ERR_PDU_LEN_ERR:
case T4_ERR_DDP_VERSION:
case T4_ERR_RDMA_VERSION:
case T4_ERR_OPCODE:
case T4_ERR_DDP_QUEUE_NUM:
case T4_ERR_MSN:
case T4_ERR_TBIT:
case T4_ERR_MO:
case T4_ERR_MSN_GAP:
case T4_ERR_MSN_RANGE:
case T4_ERR_RQE_ADDR_BOUND:
case T4_ERR_IRD_OVERFLOW:
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break;
default:
printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break;
}
done:
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
c4iw_qp_rem_ref(&qhp->ibqp);
out:
return;
}
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
{
struct c4iw_cq *chp;
unsigned long flag;
chp = get_chp(dev, qid);
if (chp) {
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
} else
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
return 0;
}

View file

@ -0,0 +1,112 @@
/*
* Copyright (c) 2011 Chelsio Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/random.h>
#include "iw_cxgb4.h"
#define RANDOM_SKIP 16
/*
* Trivial bitmap-based allocator. If the random flag is set, the
* allocator is designed to:
* - pseudo-randomize the id returned such that it is not trivially predictable.
* - avoid reuse of recently used id (at the expense of predictability)
*/
u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
{
unsigned long flags;
u32 obj;
spin_lock_irqsave(&alloc->lock, flags);
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
if (obj >= alloc->max)
obj = find_first_zero_bit(alloc->table, alloc->max);
if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last += prandom_u32() % RANDOM_SKIP;
else
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
alloc->last = 0;
set_bit(obj, alloc->table);
obj += alloc->start;
} else
obj = -1;
spin_unlock_irqrestore(&alloc->lock, flags);
return obj;
}
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
{
unsigned long flags;
obj -= alloc->start;
BUG_ON((int)obj < 0);
spin_lock_irqsave(&alloc->lock, flags);
clear_bit(obj, alloc->table);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
u32 reserved, u32 flags)
{
int i;
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last = prandom_u32() % RANDOM_SKIP;
else
alloc->last = 0;
alloc->max = num;
spin_lock_init(&alloc->lock);
alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
bitmap_zero(alloc->table, num);
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
for (i = 0; i < reserved; ++i)
set_bit(i, alloc->table);
return 0;
}
void c4iw_id_table_free(struct c4iw_id_table *alloc)
{
kfree(alloc->table);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,955 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
#include "iw_cxgb4.h"
int use_dsgl = 0;
module_param(use_dsgl, int, 0644);
MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
#define T4_ULPTX_MAX_DMA 1024
#define C4IW_INLINE_THRESHOLD 128
static int inline_threshold = C4IW_INLINE_THRESHOLD;
module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data, int wait)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
addr &= 0x7FFFFFF;
if (wait)
c4iw_init_wr_wait(&wr_wait);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
memset(req, 0, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
(wait ? FW_WR_COMPL(1) : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
ret = c4iw_ofld_send(rdev, skb);
if (ret)
return ret;
if (wait)
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
void *data)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
__be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
else
cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
for (i = 0; i < num_wqe; i++) {
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
len;
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
memset(req, 0, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
FW_WR_COMPL(1));
req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cmd;
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
sc = (struct ulptx_idata *)(req + 1);
sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
to_dp = (u8 *)(sc + 1);
from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
if (data)
memcpy(to_dp, from_dp, copy_len);
else
memset(to_dp, 0, copy_len);
if (copy_len % T4_ULPTX_MIN_IO)
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
ret = c4iw_ofld_send(rdev, skb);
if (ret)
return ret;
len -= C4IW_MAX_INLINE_SIZE;
}
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
u32 remain = len;
u32 dmalen;
int ret = 0;
dma_addr_t daddr;
dma_addr_t save;
daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
return -1;
save = daddr;
while (remain > inline_threshold) {
if (remain < T4_ULPTX_MAX_DMA) {
if (remain & ~T4_ULPTX_MIN_IO)
dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
else
dmalen = remain;
} else
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
!remain);
if (ret)
goto out;
addr += dmalen >> 5;
data += dmalen;
daddr += dmalen;
}
if (remain)
ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
}
/*
* write len bytes of data into addr (32B aligned address)
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
void *data)
{
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) {
if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
printk_ratelimited(KERN_WARNING
"%s: dma map"
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len,
data);
} else
return 0;
} else
return _c4iw_write_mem_inline(rdev, addr, len, data);
} else
return _c4iw_write_mem_inline(rdev, addr, len, data);
}
/*
* Build and write a TPT entry.
* IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
* pbl_size and pbl_addr
* OUT: stag index
*/
static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
{
int err;
struct fw_ri_tpte tpt;
u32 stag_idx;
static atomic_t key;
if (c4iw_fatal_error(rdev))
return -EIO;
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
if (!stag_idx) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
return -ENOMEM;
}
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur += 32;
if (rdev->stats.stag.cur > rdev->stats.stag.max)
rdev->stats.stag.max = rdev->stats.stag.cur;
mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
}
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx);
/* write TPT entry */
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
V_FW_RI_TPTE_STAGSTATE(stag_state) |
V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
V_FW_RI_TPTE_PS(page_size));
tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
tpt.va_hi = cpu_to_be32((u32)(to >> 32));
tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
tpt.len_hi = cpu_to_be32((u32)(len >> 32));
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
}
return err;
}
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
u32 pbl_addr, u32 pbl_size)
{
int err;
PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
__func__, pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
u32 pbl_addr)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
pbl_size, pbl_addr);
}
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
0UL, 0, 0, 0, 0);
}
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
0);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
u32 pbl_size, u32 pbl_addr)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
0UL, 0, 0, pbl_size, pbl_addr);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
{
u32 mmid;
mhp->attr.state = 1;
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
struct c4iw_mr *mhp, int shift)
{
u32 stag = T4_STAG_UNSET;
int ret;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len, shift - 12,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
if (ret)
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
return ret;
}
static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
struct c4iw_mr *mhp, int shift, int npages)
{
u32 stag;
int ret;
if (npages > mhp->attr.pbl_size)
return -ENOMEM;
stag = mhp->attr.stag;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len, shift - 12,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
if (ret)
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
return ret;
}
static int alloc_pbl(struct c4iw_mr *mhp, int npages)
{
mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
npages << 3);
if (!mhp->attr.pbl_addr)
return -ENOMEM;
mhp->attr.pbl_size = npages;
return 0;
}
static int build_phys_page_list(struct ib_phys_buf *buffer_list,
int num_phys_buf, u64 *iova_start,
u64 *total_size, int *npages,
int *shift, __be64 **page_list)
{
u64 mask;
int i, j, n;
mask = 0;
*total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
return -EINVAL;
if (i != 0 && i != num_phys_buf - 1 &&
(buffer_list[i].size & ~PAGE_MASK))
return -EINVAL;
*total_size += buffer_list[i].size;
if (i > 0)
mask |= buffer_list[i].addr;
else
mask |= buffer_list[i].addr & PAGE_MASK;
if (i != num_phys_buf - 1)
mask |= buffer_list[i].addr + buffer_list[i].size;
else
mask |= (buffer_list[i].addr + buffer_list[i].size +
PAGE_SIZE - 1) & PAGE_MASK;
}
if (*total_size > 0xFFFFFFFFULL)
return -ENOMEM;
/* Find largest page shift we can use to cover buffers */
for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
if ((1ULL << *shift) & mask)
break;
buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
buffer_list[0].addr &= ~0ull << *shift;
*npages = 0;
for (i = 0; i < num_phys_buf; ++i)
*npages += (buffer_list[i].size +
(1ULL << *shift) - 1) >> *shift;
if (!*npages)
return -EINVAL;
*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
if (!*page_list)
return -ENOMEM;
n = 0;
for (i = 0; i < num_phys_buf; ++i)
for (j = 0;
j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
++j)
(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
((u64) j << *shift));
PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
__func__, (unsigned long long)*iova_start,
(unsigned long long)mask, *shift, (unsigned long long)*total_size,
*npages);
return 0;
}
int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
struct ib_pd *pd, struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
struct c4iw_mr mh, *mhp;
struct c4iw_pd *php;
struct c4iw_dev *rhp;
__be64 *page_list = NULL;
int shift = 0;
u64 total_size;
int npages;
int ret;
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
/* There can be no memory windows */
if (atomic_read(&mr->usecnt))
return -EINVAL;
mhp = to_c4iw_mr(mr);
rhp = mhp->rhp;
php = to_c4iw_pd(mr->pd);
/* make sure we are on the same adapter */
if (rhp != php->rhp)
return -EINVAL;
memcpy(&mh, mhp, sizeof *mhp);
if (mr_rereg_mask & IB_MR_REREG_PD)
php = to_c4iw_pd(pd);
if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
mh.attr.perms = c4iw_ib_to_tpt_access(acc);
mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
IB_ACCESS_MW_BIND;
}
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
ret = build_phys_page_list(buffer_list, num_phys_buf,
iova_start,
&total_size, &npages,
&shift, &page_list);
if (ret)
return ret;
}
ret = reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret)
return ret;
if (mr_rereg_mask & IB_MR_REREG_PD)
mhp->attr.pdid = php->pdid;
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
mhp->attr.zbva = 0;
mhp->attr.va_fbo = *iova_start;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) total_size;
mhp->attr.pbl_size = npages;
}
return 0;
}
struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
__be64 *page_list;
int shift;
u64 total_size;
int npages;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
int ret;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
ret = -EINVAL;
goto err;
}
if (num_phys_buf > 1 &&
((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
ret = -EINVAL;
goto err;
}
ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
&total_size, &npages, &shift,
&page_list);
if (ret)
goto err;
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
goto err;
}
ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
npages);
kfree(page_list);
if (ret)
goto err_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = *iova_start;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) total_size;
mhp->attr.pbl_size = npages;
ret = register_mem(rhp, php, mhp, shift);
if (ret)
goto err_pbl;
return &mhp->ibmr;
err_pbl:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
err:
kfree(mhp);
return ERR_PTR(ret);
}
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
int ret;
u32 stag = T4_STAG_UNSET;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
mhp->attr.zbva = 0;
mhp->attr.va_fbo = 0;
mhp->attr.page_size = 0;
mhp->attr.len = ~0UL;
mhp->attr.pbl_size = 0;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
if (ret)
goto err1;
ret = finish_mem_reg(mhp, stag);
if (ret)
goto err2;
return &mhp->ibmr;
err2:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
err1:
kfree(mhp);
return ERR_PTR(ret);
}
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
__be64 *pages;
int shift, n, len;
int i, k, entry;
int err = 0;
struct scatterlist *sg;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
PDBG("%s ib_pd %p\n", __func__, pd);
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
if ((length + start) < start)
return ERR_PTR(-EINVAL);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
shift = ffs(mhp->umem->page_size) - 1;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
if (err)
goto err;
pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
goto err_pbl;
}
i = n = 0;
for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev,
pages,
mhp->attr.pbl_addr + (n << 3), i);
if (err)
goto pbl_done;
n += i;
i = 0;
}
}
}
if (i)
err = write_pbl(&mhp->rhp->rdev, pages,
mhp->attr.pbl_addr + (n << 3), i);
pbl_done:
free_page((unsigned long) pages);
if (err)
goto err_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
mhp->attr.len = length;
err = register_mem(rhp, php, mhp, shift);
if (err)
goto err_pbl;
return &mhp->ibmr;
err_pbl:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
err:
ib_umem_release(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mw *mhp;
u32 mmid;
u32 stag = 0;
int ret;
if (type != IB_MW_TYPE_1)
return ERR_PTR(-EINVAL);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
if (ret) {
kfree(mhp);
return ERR_PTR(ret);
}
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
deallocate_window(&rhp->rdev, mhp->attr.stag);
kfree(mhp);
return ERR_PTR(-ENOMEM);
}
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
}
int c4iw_dealloc_mw(struct ib_mw *mw)
{
struct c4iw_dev *rhp;
struct c4iw_mw *mhp;
u32 mmid;
mhp = to_c4iw_mw(mw);
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
deallocate_window(&rhp->rdev, mhp->attr.stag);
kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
return 0;
}
struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
u32 mmid;
u32 stag = 0;
int ret = 0;
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp) {
ret = -ENOMEM;
goto err;
}
mhp->rhp = rhp;
ret = alloc_pbl(mhp, pbl_depth);
if (ret)
goto err1;
mhp->attr.pbl_size = pbl_depth;
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret)
goto err2;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
ret = -ENOMEM;
goto err3;
}
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
err2:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
err1:
kfree(mhp);
err:
return ERR_PTR(ret);
}
struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
int page_list_len)
{
struct c4iw_fr_page_list *c4pl;
struct c4iw_dev *dev = to_c4iw_dev(device);
dma_addr_t dma_addr;
int pll_len = roundup(page_list_len * sizeof(u64), 32);
c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
if (!c4pl)
return ERR_PTR(-ENOMEM);
c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
pll_len, &dma_addr,
GFP_KERNEL);
if (!c4pl->ibpl.page_list) {
kfree(c4pl);
return ERR_PTR(-ENOMEM);
}
dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr;
c4pl->dev = dev;
c4pl->pll_len = pll_len;
PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
__func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
&c4pl->dma_addr);
return &c4pl->ibpl;
}
void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
{
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
__func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
&c4pl->dma_addr);
dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
c4pl->pll_len,
c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
kfree(c4pl);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
{
struct c4iw_dev *rhp;
struct c4iw_mr *mhp;
u32 mmid;
PDBG("%s ib_mr %p\n", __func__, ib_mr);
/* There can be no memory windows */
if (atomic_read(&ib_mr->usecnt))
return -EINVAL;
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
mmid = mhp->attr.stag >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
ib_umem_release(mhp->umem);
PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
kfree(mhp);
return 0;
}

View file

@ -0,0 +1,588 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include "iw_cxgb4.h"
static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
return ERR_PTR(-ENOSYS);
}
static int c4iw_ah_destroy(struct ib_ah *ah)
{
return -ENOSYS;
}
static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_wc *in_wc,
struct ib_grh *in_grh, struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
return -ENOSYS;
}
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_dev *rhp = to_c4iw_dev(context->device);
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
struct c4iw_mm_entry *mm, *tmp;
PDBG("%s context %p\n", __func__, context);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
return 0;
}
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
goto err;
}
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto err_free;
}
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return &context->ibucontext;
err_mm:
kfree(mm);
err_free:
kfree(context);
err:
return ERR_PTR(ret);
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
int len = vma->vm_end - vma->vm_start;
u32 key = vma->vm_pgoff << PAGE_SHIFT;
struct c4iw_rdev *rdev;
int ret = 0;
struct c4iw_mm_entry *mm;
struct c4iw_ucontext *ucontext;
u64 addr;
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1))
return -EINVAL;
rdev = &(to_c4iw_dev(context->device)->rdev);
ucontext = to_c4iw_ucontext(context);
mm = remove_mmap(ucontext, key, len);
if (!mm)
return -EINVAL;
addr = mm->addr;
kfree(mm);
if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
(addr < (pci_resource_start(rdev->lldi.pdev, 0) +
pci_resource_len(rdev->lldi.pdev, 0)))) {
/*
* MA_SYNC register...
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
(addr < (pci_resource_start(rdev->lldi.pdev, 2) +
pci_resource_len(rdev->lldi.pdev, 2)))) {
/*
* Map user DB or OCQP memory...
*/
if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
else {
if (is_t5(rdev->lldi.adapter_type))
vma->vm_page_prot =
t4_pgprot_wc(vma->vm_page_prot);
else
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
}
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else {
/*
* Map WQ or CQ contig dma memory...
*/
ret = remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
}
return ret;
}
static int c4iw_deallocate_pd(struct ib_pd *pd)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
php = to_c4iw_pd(pd);
rhp = php->rhp;
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock);
kfree(php);
return 0;
}
static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct c4iw_pd *php;
u32 pdid;
struct c4iw_dev *rhp;
PDBG("%s ibdev %p\n", __func__, ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
return ERR_PTR(-EINVAL);
php = kzalloc(sizeof(*php), GFP_KERNEL);
if (!php) {
c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
return ERR_PTR(-ENOMEM);
}
php->pdid = pdid;
php->rhp = rhp;
if (context) {
if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
c4iw_deallocate_pd(&php->ibpd);
return ERR_PTR(-EFAULT);
}
}
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur++;
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
return &php->ibpd;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
PDBG("%s ibdev %p\n", __func__, ibdev);
*pkey = 0;
return 0;
}
static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
struct c4iw_dev *dev;
PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
__func__, ibdev, port, index, gid);
dev = to_c4iw_dev(ibdev);
BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
return 0;
}
static int c4iw_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct c4iw_dev *dev;
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = T4_PAGESIZE_MASK;
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
props->max_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
c4iw_max_read_depth);
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->max_cq = dev->rdev.lldi.vr->qp.size;
props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
return 0;
}
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct c4iw_dev *dev;
struct net_device *netdev;
struct in_device *inetdev;
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
if (netdev->mtu >= 4096)
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
else {
inetdev = in_dev_get(netdev);
if (inetdev) {
if (inetdev->ifa_list)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_INIT;
in_dev_put(inetdev);
} else
props->state = IB_PORT_INIT;
}
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
props->active_width = 2;
props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
return 0;
}
static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%u.%u.%u.%u\n",
FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
}
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
PDBG("%s dev 0x%p\n", __func__, dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
static ssize_t show_board(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
static int c4iw_get_mib(struct ib_device *ibdev,
union rdma_protocol_stats *stats)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
memset(stats, 0, sizeof *stats);
stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
return 0;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *c4iw_class_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
int c4iw_register_device(struct c4iw_dev *dev)
{
int ret;
int i;
PDBG("%s c4iw_dev %p\n", __func__, dev);
BUG_ON(!dev->rdev.lldi.ports[0]);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
dev->ibdev.local_dma_lkey = 0;
dev->ibdev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
dev->ibdev.query_pkey = c4iw_query_pkey;
dev->ibdev.query_gid = c4iw_query_gid;
dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
dev->ibdev.mmap = c4iw_mmap;
dev->ibdev.alloc_pd = c4iw_allocate_pd;
dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
dev->ibdev.create_ah = c4iw_ah_create;
dev->ibdev.destroy_ah = c4iw_ah_destroy;
dev->ibdev.create_qp = c4iw_create_qp;
dev->ibdev.modify_qp = c4iw_ib_modify_qp;
dev->ibdev.query_qp = c4iw_ib_query_qp;
dev->ibdev.destroy_qp = c4iw_destroy_qp;
dev->ibdev.create_cq = c4iw_create_cq;
dev->ibdev.destroy_cq = c4iw_destroy_cq;
dev->ibdev.resize_cq = c4iw_resize_cq;
dev->ibdev.poll_cq = c4iw_poll_cq;
dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
dev->ibdev.dereg_mr = c4iw_dereg_mr;
dev->ibdev.alloc_mw = c4iw_alloc_mw;
dev->ibdev.bind_mw = c4iw_bind_mw;
dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
dev->ibdev.attach_mcast = c4iw_multicast_attach;
dev->ibdev.detach_mcast = c4iw_multicast_detach;
dev->ibdev.process_mad = c4iw_process_mad;
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
dev->ibdev.get_protocol_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
return -ENOMEM;
dev->ibdev.iwcm->connect = c4iw_connect;
dev->ibdev.iwcm->accept = c4iw_accept_cr;
dev->ibdev.iwcm->reject = c4iw_reject_cr;
dev->ibdev.iwcm->create_listen = c4iw_create_listen;
dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
if (ret)
goto bail2;
}
return 0;
bail2:
ib_unregister_device(&dev->ibdev);
bail1:
kfree(dev->ibdev.iwcm);
return ret;
}
void c4iw_unregister_device(struct c4iw_dev *dev)
{
int i;
PDBG("%s c4iw_dev %p\n", __func__, dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,453 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Crude resource management */
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/ratelimit.h>
#include "iw_cxgb4.h"
static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
{
u32 i;
if (c4iw_id_table_alloc(&rdev->resource.qid_table,
rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size,
rdev->lldi.vr->qp.size, 0))
return -ENOMEM;
for (i = rdev->lldi.vr->qp.start;
i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
if (!(i & rdev->qpmask))
c4iw_id_free(&rdev->resource.qid_table, i);
return 0;
}
/* nr_* must be power of 2 */
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
{
int err = 0;
err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
C4IW_ID_TABLE_F_RANDOM);
if (err)
goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
nr_pdid, 1, 0);
if (err)
goto pdid_err;
return 0;
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
c4iw_id_table_free(&rdev->resource.tpt_table);
tpt_err:
return -ENOMEM;
}
/*
* returns 0 if no resource available
*/
u32 c4iw_get_resource(struct c4iw_id_table *id_table)
{
u32 entry;
entry = c4iw_id_alloc(id_table);
if (entry == (u32)(-1))
return 0;
return entry;
}
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
PDBG("%s entry 0x%x\n", __func__, entry);
c4iw_id_free(id_table, entry);
}
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
u32 qid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->cqids)) {
entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
entry);
list_del(&entry->entry);
qid = entry->qid;
kfree(entry);
} else {
qid = c4iw_get_resource(&rdev->resource.qid_table);
if (!qid)
goto out;
mutex_lock(&rdev->stats.lock);
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->cqids);
}
/*
* now put the same ids on the qp list since they all
* map to the same db/gts page.
*/
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->qpids);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
mutex_unlock(&rdev->stats.lock);
return qid;
}
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
mutex_unlock(&uctx->lock);
}
u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
u32 qid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->qpids)) {
entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
entry);
list_del(&entry->entry);
qid = entry->qid;
kfree(entry);
} else {
qid = c4iw_get_resource(&rdev->resource.qid_table);
if (!qid) {
mutex_lock(&rdev->stats.lock);
rdev->stats.qid.fail++;
mutex_unlock(&rdev->stats.lock);
goto out;
}
mutex_lock(&rdev->stats.lock);
rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
/*
* now put the same ids on the cq list since they all
* map to the same db/gts page.
*/
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids);
for (i = qid; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
goto out;
entry->qid = i;
list_add_tail(&entry->entry, &uctx->cqids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qid 0x%x\n", __func__, qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
mutex_unlock(&rdev->stats.lock);
return qid;
}
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx)
{
struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qid 0x%x\n", __func__, qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
mutex_unlock(&uctx->lock);
}
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
/*
* PBL Memory Manager. Uses Linux generic allocator.
*/
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
{
unsigned pbl_start, pbl_chunk, pbl_top;
rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
if (!rdev->pbl_pool)
return -ENOMEM;
pbl_start = rdev->lldi.vr->pbl.start;
pbl_chunk = rdev->lldi.vr->pbl.size;
pbl_top = pbl_start + pbl_chunk;
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
PDBG("%s failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all PBL chunks (%x/%x)\n",
pbl_start,
pbl_top - pbl_start);
return 0;
}
pbl_chunk >>= 1;
} else {
PDBG("%s added PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
return 0;
}
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->pbl_pool);
}
/*
* RQT Memory Manager. Uses Linux generic allocator.
*/
#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
if (!addr)
pr_warn_ratelimited(MOD "%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
{
unsigned rqt_start, rqt_chunk, rqt_top;
rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (!rdev->rqt_pool)
return -ENOMEM;
rqt_start = rdev->lldi.vr->rq.start;
rqt_chunk = rdev->lldi.vr->rq.size;
rqt_top = rqt_start + rqt_chunk;
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
PDBG("%s failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start);
return 0;
}
rqt_chunk >>= 1;
} else {
PDBG("%s added RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
return 0;
}
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->rqt_pool);
}
/*
* On-Chip QP Memory.
*/
#define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
mutex_unlock(&rdev->stats.lock);
}
return (u32)addr;
}
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
}
int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
{
unsigned start, chunk, top;
rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
if (!rdev->ocqp_pool)
return -ENOMEM;
start = rdev->lldi.vr->ocq.start;
chunk = rdev->lldi.vr->ocq.size;
top = start + chunk;
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
PDBG("%s failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
printk(KERN_WARNING MOD
"Failed to add all OCQP chunks (%x/%x)\n",
start, top - start);
return 0;
}
chunk >>= 1;
} else {
PDBG("%s added OCQP chunk (%x/%x)\n",
__func__, start, chunk);
start += chunk;
}
}
return 0;
}
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->ocqp_pool);
}

View file

@ -0,0 +1,684 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_H__
#define __T4_H__
#include "t4_hw.h"
#include "t4_regs.h"
#include "t4_msg.h"
#include "t4fw_ri_api.h"
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
__be16 rsvd2;
__be16 qid;
__be16 cidx;
__be16 pidx;
u8 qp_err; /* flit 1 - sw owns */
u8 db_off;
u8 pad;
u16 host_wq_pidx;
u16 host_cidx;
u16 host_pidx;
};
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 5
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_immd)))
#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
sizeof(struct fw_ri_rdma_write_wr) - \
sizeof(struct fw_ri_immd)))
#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
sizeof(struct fw_ri_rdma_write_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)) & ~31UL)
#define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_MAX_FR_DSGL 1024
#define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
static inline int t4_max_fr_depth(int use_dsgl)
{
return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
}
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
union t4_wr {
struct fw_ri_res_wr res;
struct fw_ri_wr ri;
struct fw_ri_rdma_write_wr write;
struct fw_ri_send_wr send;
struct fw_ri_rdma_read_wr read;
struct fw_ri_bind_mw_wr bind;
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
union t4_recv_wr {
struct fw_ri_recv_wr recv;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
};
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
{
wqe->send.opcode = (u8)opcode;
wqe->send.flags = flags;
wqe->send.wrid = wrid;
wqe->send.r1[0] = 0;
wqe->send.r1[1] = 0;
wqe->send.r1[2] = 0;
wqe->send.len16 = len16;
}
/* CQE/AE status codes */
#define T4_ERR_SUCCESS 0x0
#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
/* STAG is offlimt, being 0, */
/* or STAG_key mismatch */
#define T4_ERR_PDID 0x2 /* PDID mismatch */
#define T4_ERR_QPID 0x3 /* QPID mismatch */
#define T4_ERR_ACCESS 0x4 /* Invalid access right */
#define T4_ERR_WRAP 0x5 /* Wrap error */
#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
/* shared memory region */
#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
/* shared memory region */
#define T4_ERR_ECC 0x9 /* ECC error detected */
#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
/* reading PSTAG for a MW */
/* Invalidate */
#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
/* software error */
#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
#define T4_ERR_CRC 0x10 /* CRC error */
#define T4_ERR_MARKER 0x11 /* Marker error */
#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
#define T4_ERR_MSN 0x18 /* MSN error */
#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
/* or READ_REQ */
#define T4_ERR_MSN_GAP 0x1B
#define T4_ERR_MSN_RANGE 0x1C
#define T4_ERR_IRD_OVERFLOW 0x1D
#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
/* software error */
#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
/* mismatch) */
/*
* CQE defs
*/
struct t4_cqe {
__be32 header;
__be32 len;
union {
struct {
__be32 stag;
__be32 msn;
} rcqe;
struct {
u32 nada1;
u16 nada2;
u16 cidx;
} scqe;
struct {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
} u;
__be64 reserved;
__be64 bits_type_ts;
};
/* macros for flit 0 of the cqe */
#define S_CQE_QPID 12
#define M_CQE_QPID 0xFFFFF
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
#define S_CQE_SWCQE 11
#define M_CQE_SWCQE 0x1
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
#define S_CQE_STATUS 5
#define M_CQE_STATUS 0x1F
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
#define S_CQE_TYPE 4
#define M_CQE_TYPE 0x1
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
#define S_CQE_OPCODE 0
#define M_CQE_OPCODE 0xF
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
#define RQ_TYPE(x) (!CQE_TYPE((x)))
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
#define CQE_SEND_OPCODE(x)( \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x)->len))
/* used for RQ completion processing */
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
/* macros for flit 3 of the cqe */
#define S_CQE_GENBIT 63
#define M_CQE_GENBIT 0x1
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
#define S_CQE_OVFBIT 62
#define M_CQE_OVFBIT 0x1
#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
#define S_CQE_IQTYPE 60
#define M_CQE_IQTYPE 0x3
#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
#define M_CQE_TS 0x0fffffffffffffffULL
#define G_CQE_TS(x) ((x) & M_CQE_TS)
#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
struct t4_swsqe {
u64 wr_id;
struct t4_cqe cqe;
int read_len;
int opcode;
int complete;
int signaled;
u16 idx;
int flushed;
struct timespec host_ts;
u64 sge_ts;
};
static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
{
#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
return pgprot_writecombine(prot);
#else
return pgprot_noncached(prot);
#endif
}
enum {
T4_SQ_ONCHIP = (1<<0),
};
struct t4_sq {
union t4_wr *queue;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned long phys_addr;
struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read;
u64 __iomem *udb;
size_t memsize;
u32 qid;
u16 in_use;
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
u16 wq_pidx_inc;
u16 flags;
short flush_cidx;
};
struct t4_swrqe {
u64 wr_id;
struct timespec host_ts;
u64 sge_ts;
};
struct t4_rq {
union t4_recv_wr *queue;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq;
u64 __iomem *udb;
size_t memsize;
u32 qid;
u32 msn;
u32 rqt_hwaddr;
u16 rqt_size;
u16 in_use;
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
u16 wq_pidx_inc;
};
struct t4_wq {
struct t4_sq sq;
struct t4_rq rq;
void __iomem *db;
void __iomem *gts;
struct c4iw_rdev *rdev;
int flushed;
};
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
}
static inline int t4_rq_empty(struct t4_wq *wq)
{
return wq->rq.in_use == 0;
}
static inline int t4_rq_full(struct t4_wq *wq)
{
return wq->rq.in_use == (wq->rq.size - 1);
}
static inline u32 t4_rq_avail(struct t4_wq *wq)
{
return wq->rq.size - 1 - wq->rq.in_use;
}
static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
{
wq->rq.in_use++;
if (++wq->rq.pidx == wq->rq.size)
wq->rq.pidx = 0;
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
}
static inline void t4_rq_consume(struct t4_wq *wq)
{
wq->rq.in_use--;
wq->rq.msn++;
if (++wq->rq.cidx == wq->rq.size)
wq->rq.cidx = 0;
}
static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
{
return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
}
static inline u16 t4_rq_wq_size(struct t4_wq *wq)
{
return wq->rq.size * T4_RQ_NUM_SLOTS;
}
static inline int t4_sq_onchip(struct t4_sq *sq)
{
return sq->flags & T4_SQ_ONCHIP;
}
static inline int t4_sq_empty(struct t4_wq *wq)
{
return wq->sq.in_use == 0;
}
static inline int t4_sq_full(struct t4_wq *wq)
{
return wq->sq.in_use == (wq->sq.size - 1);
}
static inline u32 t4_sq_avail(struct t4_wq *wq)
{
return wq->sq.size - 1 - wq->sq.in_use;
}
static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
{
wq->sq.in_use++;
if (++wq->sq.pidx == wq->sq.size)
wq->sq.pidx = 0;
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
}
static inline void t4_sq_consume(struct t4_wq *wq)
{
BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--;
if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0;
}
static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
{
return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
}
static inline u16 t4_sq_wq_size(struct t4_wq *wq)
{
return wq->sq.size * T4_SQ_NUM_SLOTS;
}
/* This function copies 64 byte coalesced work request to memory
* mapped BAR2 space. For coalesced WRs, the SGE fetches data
* from the FIFO instead of from Host.
*/
static inline void pio_copy(u64 __iomem *dst, u64 *src)
{
int count = 8;
while (count) {
writeq(*src, dst);
src++;
dst++;
count--;
}
}
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
union t4_wr *wqe)
{
/* Flush host queue memory writes. */
wmb();
if (t5) {
if (inc == 1 && wqe) {
PDBG("%s: WC wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
pio_copy(wq->sq.udb + 7, (void *)wqe);
} else {
PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
writel(PIDX_T5(inc), wq->sq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
union t4_recv_wr *wqe)
{
/* Flush host queue memory writes. */
wmb();
if (t5) {
if (inc == 1 && wqe) {
PDBG("%s: WC wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
pio_copy(wq->rq.udb + 7, (void *)wqe);
} else {
PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
writel(PIDX_T5(inc), wq->rq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
{
return wq->rq.queue[wq->rq.size].status.qp_err;
}
static inline void t4_set_wq_in_error(struct t4_wq *wq)
{
wq->rq.queue[wq->rq.size].status.qp_err = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
{
wq->rq.queue[wq->rq.size].status.db_off = 1;
}
static inline void t4_enable_wq_db(struct t4_wq *wq)
{
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
static inline int t4_wq_db_enabled(struct t4_wq *wq)
{
return !wq->rq.queue[wq->rq.size].status.db_off;
}
enum t4_cq_flags {
CQ_ARMED = 1,
};
struct t4_cq {
struct t4_cqe *queue;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_cqe *sw_queue;
void __iomem *gts;
struct c4iw_rdev *rdev;
u64 ugts;
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
int vector;
u16 size; /* including status page */
u16 cidx;
u16 sw_pidx;
u16 sw_cidx;
u16 sw_in_use;
u16 cidx_inc;
u8 gen;
u8 error;
unsigned long flags;
};
static inline int t4_clear_cq_armed(struct t4_cq *cq)
{
return test_and_clear_bit(CQ_ARMED, &cq->flags);
}
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_MASK;
}
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
INGRESSQID(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
}
static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
}
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
}
static inline void t4_swcq_consume(struct t4_cq *cq)
{
BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--;
if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0;
}
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
INGRESSQID(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
if (++cq->cidx == cq->size) {
cq->cidx = 0;
cq->gen ^= 1;
}
}
static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
{
return (CQE_GENBIT(cqe) == cq->gen);
}
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
u16 prev_cidx;
if (cq->cidx == 0)
prev_cidx = cq->size - 1;
else
prev_cidx = cq->cidx - 1;
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW;
cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
/* Ensure CQE is flushed to memory */
rmb();
*cqe = &cq->queue[cq->cidx];
ret = 0;
} else
ret = -ENODATA;
return ret;
}
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
return NULL;
}
if (cq->sw_in_use)
return &cq->sw_queue[cq->sw_cidx];
return NULL;
}
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret = 0;
if (cq->error)
ret = -ENODATA;
else if (cq->sw_in_use)
*cqe = &cq->sw_queue[cq->sw_cidx];
else
ret = t4_next_hw_cqe(cq, cqe);
return ret;
}
static inline int t4_cq_in_error(struct t4_cq *cq)
{
return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
}
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
}
#endif
struct t4_dev_status_page {
u8 db_off;
};

View file

@ -0,0 +1,853 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T4FW_RI_API_H_
#define _T4FW_RI_API_H_
#include "t4fw_api.h"
enum fw_ri_wr_opcode {
FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
FW_RI_READ_REQ = 0x1,
FW_RI_READ_RESP = 0x2,
FW_RI_SEND = 0x3,
FW_RI_SEND_WITH_INV = 0x4,
FW_RI_SEND_WITH_SE = 0x5,
FW_RI_SEND_WITH_SE_INV = 0x6,
FW_RI_TERMINATE = 0x7,
FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
FW_RI_BIND_MW = 0x9,
FW_RI_FAST_REGISTER = 0xa,
FW_RI_LOCAL_INV = 0xb,
FW_RI_QP_MODIFY = 0xc,
FW_RI_BYPASS = 0xd,
FW_RI_RECEIVE = 0xe,
FW_RI_SGE_EC_CR_RETURN = 0xf
};
enum fw_ri_wr_flags {
FW_RI_COMPLETION_FLAG = 0x01,
FW_RI_NOTIFICATION_FLAG = 0x02,
FW_RI_SOLICITED_EVENT_FLAG = 0x04,
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
FW_RI_RDMA_READ_INVALIDATE = 0x20
};
enum fw_ri_mpa_attrs {
FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
FW_RI_MPA_CRC_ENABLE = 0x04,
FW_RI_MPA_IETF_ENABLE = 0x08
};
enum fw_ri_qp_caps {
FW_RI_QP_RDMA_READ_ENABLE = 0x01,
FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
FW_RI_QP_BIND_ENABLE = 0x04,
FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
FW_RI_QP_STAG0_ENABLE = 0x10
};
enum fw_ri_addr_type {
FW_RI_ZERO_BASED_TO = 0x00,
FW_RI_VA_BASED_TO = 0x01
};
enum fw_ri_mem_perms {
FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
FW_RI_MEM_ACCESS_REM_READ = 0x02,
FW_RI_MEM_ACCESS_REM = 0x03,
FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
FW_RI_MEM_ACCESS_LOCAL = 0x0C
};
enum fw_ri_stag_type {
FW_RI_STAG_NSMR = 0x00,
FW_RI_STAG_SMR = 0x01,
FW_RI_STAG_MW = 0x02,
FW_RI_STAG_MW_RELAXED = 0x03
};
enum fw_ri_data_op {
FW_RI_DATA_IMMD = 0x81,
FW_RI_DATA_DSGL = 0x82,
FW_RI_DATA_ISGL = 0x83
};
enum fw_ri_sgl_depth {
FW_RI_SGL_DEPTH_MAX_SQ = 16,
FW_RI_SGL_DEPTH_MAX_RQ = 4
};
struct fw_ri_dsge_pair {
__be32 len[2];
__be64 addr[2];
};
struct fw_ri_dsgl {
__u8 op;
__u8 r1;
__be16 nsge;
__be32 len0;
__be64 addr0;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_dsge_pair sge[0];
#endif
};
struct fw_ri_sge {
__be32 stag;
__be32 len;
__be64 to;
};
struct fw_ri_isgl {
__u8 op;
__u8 r1;
__be16 nsge;
__be32 r2;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_sge sge[0];
#endif
};
struct fw_ri_immd {
__u8 op;
__u8 r1;
__be16 r2;
__be32 immdlen;
#ifndef C99_NOT_SUPPORTED
__u8 data[0];
#endif
};
struct fw_ri_tpte {
__be32 valid_to_pdid;
__be32 locread_to_qpid;
__be32 nosnoop_pbladdr;
__be32 len_lo;
__be32 va_hi;
__be32 va_lo_fbo;
__be32 dca_mwbcnt_pstag;
__be32 len_hi;
};
#define S_FW_RI_TPTE_VALID 31
#define M_FW_RI_TPTE_VALID 0x1
#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
#define G_FW_RI_TPTE_VALID(x) \
(((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
#define S_FW_RI_TPTE_STAGKEY 23
#define M_FW_RI_TPTE_STAGKEY 0xff
#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
#define G_FW_RI_TPTE_STAGKEY(x) \
(((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
#define S_FW_RI_TPTE_STAGSTATE 22
#define M_FW_RI_TPTE_STAGSTATE 0x1
#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
#define G_FW_RI_TPTE_STAGSTATE(x) \
(((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
#define S_FW_RI_TPTE_STAGTYPE 20
#define M_FW_RI_TPTE_STAGTYPE 0x3
#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
#define G_FW_RI_TPTE_STAGTYPE(x) \
(((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
#define S_FW_RI_TPTE_PDID 0
#define M_FW_RI_TPTE_PDID 0xfffff
#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
#define G_FW_RI_TPTE_PDID(x) \
(((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
#define S_FW_RI_TPTE_PERM 28
#define M_FW_RI_TPTE_PERM 0xf
#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
#define G_FW_RI_TPTE_PERM(x) \
(((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
#define S_FW_RI_TPTE_REMINVDIS 27
#define M_FW_RI_TPTE_REMINVDIS 0x1
#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
#define G_FW_RI_TPTE_REMINVDIS(x) \
(((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
#define S_FW_RI_TPTE_ADDRTYPE 26
#define M_FW_RI_TPTE_ADDRTYPE 1
#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
#define G_FW_RI_TPTE_ADDRTYPE(x) \
(((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
#define S_FW_RI_TPTE_MWBINDEN 25
#define M_FW_RI_TPTE_MWBINDEN 0x1
#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
#define G_FW_RI_TPTE_MWBINDEN(x) \
(((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
#define S_FW_RI_TPTE_PS 20
#define M_FW_RI_TPTE_PS 0x1f
#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
#define G_FW_RI_TPTE_PS(x) \
(((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
#define S_FW_RI_TPTE_QPID 0
#define M_FW_RI_TPTE_QPID 0xfffff
#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
#define G_FW_RI_TPTE_QPID(x) \
(((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
#define S_FW_RI_TPTE_NOSNOOP 30
#define M_FW_RI_TPTE_NOSNOOP 0x1
#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
#define G_FW_RI_TPTE_NOSNOOP(x) \
(((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
#define S_FW_RI_TPTE_PBLADDR 0
#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
#define G_FW_RI_TPTE_PBLADDR(x) \
(((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
#define S_FW_RI_TPTE_DCA 24
#define M_FW_RI_TPTE_DCA 0x1f
#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
#define G_FW_RI_TPTE_DCA(x) \
(((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
(((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
};
enum fw_ri_res_op {
FW_RI_RES_OP_WRITE,
FW_RI_RES_OP_RESET,
};
struct fw_ri_res {
union fw_ri_restype {
struct fw_ri_res_sqrq {
__u8 restype;
__u8 op;
__be16 r3;
__be32 eqid;
__be32 r4[2];
__be32 fetchszm_to_iqid;
__be32 dcaen_to_eqsize;
__be64 eqaddr;
} sqrq;
struct fw_ri_res_cq {
__u8 restype;
__u8 op;
__be16 r3;
__be32 iqid;
__be32 r4[2];
__be32 iqandst_to_iqandstindex;
__be16 iqdroprss_to_iqesize;
__be16 iqsize;
__be64 iqaddr;
__be32 iqns_iqro;
__be32 r6_lo;
__be64 r7;
} cq;
} u;
};
struct fw_ri_res_wr {
__be32 op_nres;
__be32 len16_pkd;
__u64 cookie;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_res res[0];
#endif
};
#define S_FW_RI_RES_WR_NRES 0
#define M_FW_RI_RES_WR_NRES 0xff
#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
#define G_FW_RI_RES_WR_NRES(x) \
(((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
#define S_FW_RI_RES_WR_FETCHSZM 26
#define M_FW_RI_RES_WR_FETCHSZM 0x1
#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
#define G_FW_RI_RES_WR_FETCHSZM(x) \
(((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
#define S_FW_RI_RES_WR_STATUSPGNS 25
#define M_FW_RI_RES_WR_STATUSPGNS 0x1
#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
#define G_FW_RI_RES_WR_STATUSPGNS(x) \
(((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
#define S_FW_RI_RES_WR_STATUSPGRO 24
#define M_FW_RI_RES_WR_STATUSPGRO 0x1
#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
#define G_FW_RI_RES_WR_STATUSPGRO(x) \
(((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
#define S_FW_RI_RES_WR_FETCHNS 23
#define M_FW_RI_RES_WR_FETCHNS 0x1
#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
#define G_FW_RI_RES_WR_FETCHNS(x) \
(((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
#define S_FW_RI_RES_WR_FETCHRO 22
#define M_FW_RI_RES_WR_FETCHRO 0x1
#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
#define G_FW_RI_RES_WR_FETCHRO(x) \
(((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
#define S_FW_RI_RES_WR_HOSTFCMODE 20
#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
(((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
#define S_FW_RI_RES_WR_CPRIO 19
#define M_FW_RI_RES_WR_CPRIO 0x1
#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
#define G_FW_RI_RES_WR_CPRIO(x) \
(((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
#define S_FW_RI_RES_WR_ONCHIP 18
#define M_FW_RI_RES_WR_ONCHIP 0x1
#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
#define G_FW_RI_RES_WR_ONCHIP(x) \
(((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
#define S_FW_RI_RES_WR_PCIECHN 16
#define M_FW_RI_RES_WR_PCIECHN 0x3
#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
#define G_FW_RI_RES_WR_PCIECHN(x) \
(((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
#define S_FW_RI_RES_WR_IQID 0
#define M_FW_RI_RES_WR_IQID 0xffff
#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
#define G_FW_RI_RES_WR_IQID(x) \
(((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
#define S_FW_RI_RES_WR_DCAEN 31
#define M_FW_RI_RES_WR_DCAEN 0x1
#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
#define G_FW_RI_RES_WR_DCAEN(x) \
(((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
#define S_FW_RI_RES_WR_DCACPU 26
#define M_FW_RI_RES_WR_DCACPU 0x1f
#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
#define G_FW_RI_RES_WR_DCACPU(x) \
(((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
#define S_FW_RI_RES_WR_FBMIN 23
#define M_FW_RI_RES_WR_FBMIN 0x7
#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
#define G_FW_RI_RES_WR_FBMIN(x) \
(((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
#define S_FW_RI_RES_WR_FBMAX 20
#define M_FW_RI_RES_WR_FBMAX 0x7
#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
#define G_FW_RI_RES_WR_FBMAX(x) \
(((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
(((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
#define S_FW_RI_RES_WR_CIDXFTHRESH 16
#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
(((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
#define S_FW_RI_RES_WR_EQSIZE 0
#define M_FW_RI_RES_WR_EQSIZE 0xffff
#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
#define G_FW_RI_RES_WR_EQSIZE(x) \
(((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
#define S_FW_RI_RES_WR_IQANDST 15
#define M_FW_RI_RES_WR_IQANDST 0x1
#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
#define G_FW_RI_RES_WR_IQANDST(x) \
(((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
#define S_FW_RI_RES_WR_IQANUS 14
#define M_FW_RI_RES_WR_IQANUS 0x1
#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
#define G_FW_RI_RES_WR_IQANUS(x) \
(((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
#define S_FW_RI_RES_WR_IQANUD 12
#define M_FW_RI_RES_WR_IQANUD 0x3
#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
#define G_FW_RI_RES_WR_IQANUD(x) \
(((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
#define S_FW_RI_RES_WR_IQANDSTINDEX 0
#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
(((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
#define S_FW_RI_RES_WR_IQDROPRSS 15
#define M_FW_RI_RES_WR_IQDROPRSS 0x1
#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
#define G_FW_RI_RES_WR_IQDROPRSS(x) \
(((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
#define S_FW_RI_RES_WR_IQGTSMODE 14
#define M_FW_RI_RES_WR_IQGTSMODE 0x1
#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
#define G_FW_RI_RES_WR_IQGTSMODE(x) \
(((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
#define S_FW_RI_RES_WR_IQPCIECH 12
#define M_FW_RI_RES_WR_IQPCIECH 0x3
#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
#define G_FW_RI_RES_WR_IQPCIECH(x) \
(((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
#define S_FW_RI_RES_WR_IQDCAEN 11
#define M_FW_RI_RES_WR_IQDCAEN 0x1
#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
#define G_FW_RI_RES_WR_IQDCAEN(x) \
(((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
#define S_FW_RI_RES_WR_IQDCACPU 6
#define M_FW_RI_RES_WR_IQDCACPU 0x1f
#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
#define G_FW_RI_RES_WR_IQDCACPU(x) \
(((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
(((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
#define S_FW_RI_RES_WR_IQO 3
#define M_FW_RI_RES_WR_IQO 0x1
#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
#define G_FW_RI_RES_WR_IQO(x) \
(((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
#define S_FW_RI_RES_WR_IQCPRIO 2
#define M_FW_RI_RES_WR_IQCPRIO 0x1
#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
#define G_FW_RI_RES_WR_IQCPRIO(x) \
(((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
#define S_FW_RI_RES_WR_IQESIZE 0
#define M_FW_RI_RES_WR_IQESIZE 0x3
#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
#define G_FW_RI_RES_WR_IQESIZE(x) \
(((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
#define S_FW_RI_RES_WR_IQNS 31
#define M_FW_RI_RES_WR_IQNS 0x1
#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
#define G_FW_RI_RES_WR_IQNS(x) \
(((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
#define S_FW_RI_RES_WR_IQRO 30
#define M_FW_RI_RES_WR_IQRO 0x1
#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
#define G_FW_RI_RES_WR_IQRO(x) \
(((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
struct fw_ri_rdma_write_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__be64 r2;
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
#ifndef C99_NOT_SUPPORTED
union {
struct fw_ri_immd immd_src[0];
struct fw_ri_isgl isgl_src[0];
} u;
#endif
};
struct fw_ri_send_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__be32 sendop_pkd;
__be32 stag_inv;
__be32 plen;
__be32 r3;
__be64 r4;
#ifndef C99_NOT_SUPPORTED
union {
struct fw_ri_immd immd_src[0];
struct fw_ri_isgl isgl_src[0];
} u;
#endif
};
#define S_FW_RI_SEND_WR_SENDOP 0
#define M_FW_RI_SEND_WR_SENDOP 0xf
#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
#define G_FW_RI_SEND_WR_SENDOP(x) \
(((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
struct fw_ri_rdma_read_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__be64 r2;
__be32 stag_sink;
__be32 to_sink_hi;
__be32 to_sink_lo;
__be32 plen;
__be32 stag_src;
__be32 to_src_hi;
__be32 to_src_lo;
__be32 r5;
};
struct fw_ri_recv_wr {
__u8 opcode;
__u8 r1;
__u16 wrid;
__u8 r2[3];
__u8 len16;
struct fw_ri_isgl isgl;
};
struct fw_ri_bind_mw_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__u8 qpbinde_to_dcacpu;
__u8 pgsz_shift;
__u8 addr_type;
__u8 mem_perms;
__be32 stag_mr;
__be32 stag_mw;
__be32 r3;
__be64 len_mw;
__be64 va_fbo;
__be64 r4;
};
#define S_FW_RI_BIND_MW_WR_QPBINDE 6
#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
(((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
#define S_FW_RI_BIND_MW_WR_NS 5
#define M_FW_RI_BIND_MW_WR_NS 0x1
#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
#define G_FW_RI_BIND_MW_WR_NS(x) \
(((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
#define S_FW_RI_BIND_MW_WR_DCACPU 0
#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
(((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
struct fw_ri_fr_nsmr_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__u8 qpbinde_to_dcacpu;
__u8 pgsz_shift;
__u8 addr_type;
__u8 mem_perms;
__be32 stag;
__be32 len_hi;
__be32 len_lo;
__be32 va_hi;
__be32 va_lo_fbo;
};
#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
#define S_FW_RI_FR_NSMR_WR_NS 5
#define M_FW_RI_FR_NSMR_WR_NS 0x1
#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
#define G_FW_RI_FR_NSMR_WR_NS(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
#define S_FW_RI_FR_NSMR_WR_DCACPU 0
#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
struct fw_ri_inv_lstag_wr {
__u8 opcode;
__u8 flags;
__u16 wrid;
__u8 r1[3];
__u8 len16;
__be32 r2;
__be32 stag_inv;
};
enum fw_ri_type {
FW_RI_TYPE_INIT,
FW_RI_TYPE_FINI,
FW_RI_TYPE_TERMINATE
};
enum fw_ri_init_p2ptype {
FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
};
struct fw_ri_wr {
__be32 op_compl;
__be32 flowid_len16;
__u64 cookie;
union fw_ri {
struct fw_ri_init {
__u8 type;
__u8 mpareqbit_p2ptype;
__u8 r4[2];
__u8 mpa_attrs;
__u8 qp_caps;
__be16 nrqe;
__be32 pdid;
__be32 qpid;
__be32 sq_eqid;
__be32 rq_eqid;
__be32 scqid;
__be32 rcqid;
__be32 ord_max;
__be32 ird_max;
__be32 iss;
__be32 irs;
__be32 hwrqsize;
__be32 hwrqaddr;
__be64 r5;
union fw_ri_init_p2p {
struct fw_ri_rdma_write_wr write;
struct fw_ri_rdma_read_wr read;
struct fw_ri_send_wr send;
} u;
} init;
struct fw_ri_fini {
__u8 type;
__u8 r3[7];
__be64 r4;
} fini;
struct fw_ri_terminate {
__u8 type;
__u8 r3[3];
__be32 immdlen;
__u8 termmsg[40];
} terminate;
} u;
};
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
#define G_FW_RI_WR_MPAREQBIT(x) \
(((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
#define S_FW_RI_WR_P2PTYPE 0
#define M_FW_RI_WR_P2PTYPE 0xf
#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
struct tcp_options {
__be16 mss;
__u8 wsf;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8:4;
__u8 unknown:1;
__u8:1;
__u8 sack:1;
__u8 tstamp:1;
#else
__u8 tstamp:1;
__u8 sack:1;
__u8:1;
__u8 unknown:1;
__u8:4;
#endif
};
struct cpl_pass_accept_req {
union opcode_tid ot;
__be16 rsvd;
__be16 len;
__be32 hdr_len;
__be16 vlan;
__be16 l2info;
__be32 tos_stid;
struct tcp_options tcpopt;
};
/* cpl_pass_accept_req.hdr_len fields */
#define S_SYN_RX_CHAN 0
#define M_SYN_RX_CHAN 0xF
#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
#define S_TCP_HDR_LEN 10
#define M_TCP_HDR_LEN 0x3F
#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
#define S_IP_HDR_LEN 16
#define M_IP_HDR_LEN 0x3FF
#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
#define S_ETH_HDR_LEN 26
#define M_ETH_HDR_LEN 0x1F
#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
/* cpl_pass_accept_req.l2info fields */
#define S_SYN_MAC_IDX 0
#define M_SYN_MAC_IDX 0x1FF
#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
#define S_SYN_XACT_MATCH 9
#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
#define S_SYN_INTF 12
#define M_SYN_INTF 0xF
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
struct ulptx_idata {
__be32 cmd_more;
__be32 len;
};
#define S_ULPTX_NSGE 0
#define M_ULPTX_NSGE 0xFFFF
#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
#define S_RX_DACK_MODE 29
#define M_RX_DACK_MODE 0x3
#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
#define S_RX_DACK_CHANGE 31
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
CONG_ALG_TAHOE,
CONG_ALG_NEWRENO,
CONG_ALG_HIGHSPEED
};
#define S_CONG_CNTRL 14
#define M_CONG_CNTRL 0x3
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define CONG_CNTRL_VALID (1 << 18)
#endif /* _T4FW_RI_API_H_ */

View file

@ -0,0 +1,80 @@
/*
* Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __C4IW_USER_H__
#define __C4IW_USER_H__
#define C4IW_UVERBS_ABI_VERSION 2
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct c4iw_create_cq_resp {
__u64 key;
__u64 gts_key;
__u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
__u32 reserved; /* explicit padding (optional for i386) */
};
enum {
C4IW_QPF_ONCHIP = (1<<0)
};
struct c4iw_create_qp_resp {
__u64 ma_sync_key;
__u64 sq_key;
__u64 rq_key;
__u64 sq_db_gts_key;
__u64 rq_db_gts_key;
__u64 sq_memsize;
__u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
__u32 rq_size;
__u32 qid_mask;
__u32 flags;
};
struct c4iw_alloc_ucontext_resp {
__u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
#endif