mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-30 15:48:52 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
46
drivers/net/ethernet/mellanox/mlx4/Kconfig
Normal file
46
drivers/net/ethernet/mellanox/mlx4/Kconfig
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
#
|
||||
# Mellanox driver configuration
|
||||
#
|
||||
|
||||
config MLX4_EN
|
||||
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
|
||||
depends on PCI
|
||||
select MLX4_CORE
|
||||
select PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports Mellanox Technologies ConnectX Ethernet
|
||||
devices.
|
||||
|
||||
config MLX4_EN_DCB
|
||||
bool "Data Center Bridging (DCB) Support"
|
||||
default y
|
||||
depends on MLX4_EN && DCB
|
||||
---help---
|
||||
Say Y here if you want to use Data Center Bridging (DCB) in the
|
||||
driver.
|
||||
If set to N, will not be able to configure QoS and ratelimit attributes.
|
||||
This flag is depended on the kernel's DCB support.
|
||||
|
||||
If unsure, set to Y
|
||||
|
||||
config MLX4_EN_VXLAN
|
||||
bool "VXLAN offloads Support"
|
||||
default y
|
||||
depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
|
||||
---help---
|
||||
Say Y here if you want to use VXLAN offloads in the driver.
|
||||
|
||||
config MLX4_CORE
|
||||
tristate
|
||||
depends on PCI
|
||||
default n
|
||||
|
||||
config MLX4_DEBUG
|
||||
bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
|
||||
depends on MLX4_CORE
|
||||
default y
|
||||
---help---
|
||||
This option causes debugging code to be compiled into the
|
||||
mlx4_core driver. The output can be turned on via the
|
||||
debug_level module parameter (which can also be set after
|
||||
the driver is loaded through sysfs).
|
||||
10
drivers/net/ethernet/mellanox/mlx4/Makefile
Normal file
10
drivers/net/ethernet/mellanox/mlx4/Makefile
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
|
||||
|
||||
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
|
||||
mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
|
||||
|
||||
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
|
||||
|
||||
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
|
||||
en_resources.o en_netdev.o en_selftest.o en_clock.o
|
||||
mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
|
||||
419
drivers/net/ethernet/mellanox/mlx4/alloc.c
Normal file
419
drivers/net/ethernet/mellanox/mlx4/alloc.c
Normal file
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
|
||||
{
|
||||
u32 obj;
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
|
||||
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
|
||||
if (obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
obj = find_first_zero_bit(bitmap->table, bitmap->max);
|
||||
}
|
||||
|
||||
if (obj < bitmap->max) {
|
||||
set_bit(obj, bitmap->table);
|
||||
bitmap->last = (obj + 1);
|
||||
if (bitmap->last == bitmap->max)
|
||||
bitmap->last = 0;
|
||||
obj |= bitmap->top;
|
||||
} else
|
||||
obj = -1;
|
||||
|
||||
if (obj != -1)
|
||||
--bitmap->avail;
|
||||
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
|
||||
{
|
||||
mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
|
||||
}
|
||||
|
||||
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
|
||||
{
|
||||
u32 obj;
|
||||
|
||||
if (likely(cnt == 1 && align == 1))
|
||||
return mlx4_bitmap_alloc(bitmap);
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
|
||||
obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
|
||||
bitmap->last, cnt, align - 1);
|
||||
if (obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
|
||||
0, cnt, align - 1);
|
||||
}
|
||||
|
||||
if (obj < bitmap->max) {
|
||||
bitmap_set(bitmap->table, obj, cnt);
|
||||
if (obj == bitmap->last) {
|
||||
bitmap->last = (obj + cnt);
|
||||
if (bitmap->last >= bitmap->max)
|
||||
bitmap->last = 0;
|
||||
}
|
||||
obj |= bitmap->top;
|
||||
} else
|
||||
obj = -1;
|
||||
|
||||
if (obj != -1)
|
||||
bitmap->avail -= cnt;
|
||||
|
||||
spin_unlock(&bitmap->lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
|
||||
{
|
||||
return bitmap->avail;
|
||||
}
|
||||
|
||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
|
||||
int use_rr)
|
||||
{
|
||||
obj &= bitmap->max + bitmap->reserved_top - 1;
|
||||
|
||||
spin_lock(&bitmap->lock);
|
||||
if (!use_rr) {
|
||||
bitmap->last = min(bitmap->last, obj);
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
}
|
||||
bitmap_clear(bitmap->table, obj, cnt);
|
||||
bitmap->avail += cnt;
|
||||
spin_unlock(&bitmap->lock);
|
||||
}
|
||||
|
||||
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
|
||||
u32 reserved_bot, u32 reserved_top)
|
||||
{
|
||||
/* num must be a power of 2 */
|
||||
if (num != roundup_pow_of_two(num))
|
||||
return -EINVAL;
|
||||
|
||||
bitmap->last = 0;
|
||||
bitmap->top = 0;
|
||||
bitmap->max = num - reserved_top;
|
||||
bitmap->mask = mask;
|
||||
bitmap->reserved_top = reserved_top;
|
||||
bitmap->avail = num - reserved_top - reserved_bot;
|
||||
spin_lock_init(&bitmap->lock);
|
||||
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
|
||||
sizeof (long), GFP_KERNEL);
|
||||
if (!bitmap->table)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap_set(bitmap->table, 0, reserved_bot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
|
||||
{
|
||||
kfree(bitmap->table);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handling for queue buffers -- we allocate a bunch of memory and
|
||||
* register it in a memory region at HCA virtual address 0. If the
|
||||
* requested size is > max_direct, we split the allocation into
|
||||
* multiple pages, so we don't require too much contiguous memory.
|
||||
*/
|
||||
|
||||
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
struct mlx4_buf *buf, gfp_t gfp)
|
||||
{
|
||||
dma_addr_t t;
|
||||
|
||||
if (size <= max_direct) {
|
||||
buf->nbufs = 1;
|
||||
buf->npages = 1;
|
||||
buf->page_shift = get_order(size) + PAGE_SHIFT;
|
||||
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
|
||||
size, &t, gfp);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->direct.map = t;
|
||||
|
||||
while (t & ((1 << buf->page_shift) - 1)) {
|
||||
--buf->page_shift;
|
||||
buf->npages *= 2;
|
||||
}
|
||||
|
||||
memset(buf->direct.buf, 0, size);
|
||||
} else {
|
||||
int i;
|
||||
|
||||
buf->direct.buf = NULL;
|
||||
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
gfp);
|
||||
if (!buf->page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf =
|
||||
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
&t, gfp);
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
buf->page_list[i].map = t;
|
||||
|
||||
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (BITS_PER_LONG == 64) {
|
||||
struct page **pages;
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
mlx4_buf_free(dev, size, buf);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
|
||||
|
||||
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (buf->nbufs == 1)
|
||||
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
|
||||
buf->direct.map);
|
||||
else {
|
||||
if (BITS_PER_LONG == 64 && buf->direct.buf)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
buf->page_list[i].buf,
|
||||
buf->page_list[i].map);
|
||||
kfree(buf->page_list);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_buf_free);
|
||||
|
||||
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
|
||||
pgdir = kzalloc(sizeof *pgdir, gfp);
|
||||
if (!pgdir)
|
||||
return NULL;
|
||||
|
||||
bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
|
||||
pgdir->bits[0] = pgdir->order0;
|
||||
pgdir->bits[1] = pgdir->order1;
|
||||
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
|
||||
&pgdir->db_dma, gfp);
|
||||
if (!pgdir->db_page) {
|
||||
kfree(pgdir);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pgdir;
|
||||
}
|
||||
|
||||
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
|
||||
struct mlx4_db *db, int order)
|
||||
{
|
||||
int o;
|
||||
int i;
|
||||
|
||||
for (o = order; o <= 1; ++o) {
|
||||
i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
|
||||
if (i < MLX4_DB_PER_PAGE >> o)
|
||||
goto found;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
|
||||
found:
|
||||
clear_bit(i, pgdir->bits[o]);
|
||||
|
||||
i <<= o;
|
||||
|
||||
if (o > order)
|
||||
set_bit(i ^ 1, pgdir->bits[order]);
|
||||
|
||||
db->u.pgdir = pgdir;
|
||||
db->index = i;
|
||||
db->db = pgdir->db_page + db->index;
|
||||
db->dma = pgdir->db_dma + db->index * 4;
|
||||
db->order = order;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&priv->pgdir_mutex);
|
||||
|
||||
list_for_each_entry(pgdir, &priv->pgdir_list, list)
|
||||
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
|
||||
goto out;
|
||||
|
||||
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
|
||||
if (!pgdir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add(&pgdir->list, &priv->pgdir_list);
|
||||
|
||||
/* This should never fail -- we just allocated an empty page: */
|
||||
WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->pgdir_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_db_alloc);
|
||||
|
||||
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int o;
|
||||
int i;
|
||||
|
||||
mutex_lock(&priv->pgdir_mutex);
|
||||
|
||||
o = db->order;
|
||||
i = db->index;
|
||||
|
||||
if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
|
||||
clear_bit(i ^ 1, db->u.pgdir->order0);
|
||||
++o;
|
||||
}
|
||||
i >>= o;
|
||||
set_bit(i, db->u.pgdir->bits[o]);
|
||||
|
||||
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
||||
list_del(&db->u.pgdir->list);
|
||||
kfree(db->u.pgdir);
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->pgdir_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_db_free);
|
||||
|
||||
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
int size, int max_direct)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*wqres->db.db = 0;
|
||||
|
||||
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_db;
|
||||
|
||||
err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
|
||||
&wqres->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev, &wqres->mtt);
|
||||
err_buf:
|
||||
mlx4_buf_free(dev, size, &wqres->buf);
|
||||
err_db:
|
||||
mlx4_db_free(dev, &wqres->db);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
|
||||
|
||||
void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
int size)
|
||||
{
|
||||
mlx4_mtt_cleanup(dev, &wqres->mtt);
|
||||
mlx4_buf_free(dev, size, &wqres->buf);
|
||||
mlx4_db_free(dev, &wqres->db);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
|
||||
171
drivers/net/ethernet/mellanox/mlx4/catas.c
Normal file
171
drivers/net/ethernet/mellanox/mlx4/catas.c
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
enum {
|
||||
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(catas_lock);
|
||||
|
||||
static LIST_HEAD(catas_list);
|
||||
static struct work_struct catas_work;
|
||||
|
||||
static int internal_err_reset = 1;
|
||||
module_param(internal_err_reset, int, 0644);
|
||||
MODULE_PARM_DESC(internal_err_reset,
|
||||
"Reset device on internal errors if non-zero"
|
||||
" (default 1, in SRIOV mode default is 0)");
|
||||
|
||||
static void dump_err_buf(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
int i;
|
||||
|
||||
mlx4_err(dev, "Internal error detected:\n");
|
||||
for (i = 0; i < priv->fw.catas_size; ++i)
|
||||
mlx4_err(dev, " buf[%02x]: %08x\n",
|
||||
i, swab32(readl(priv->catas_err.map + i)));
|
||||
}
|
||||
|
||||
static void poll_catas(unsigned long dev_ptr)
|
||||
{
|
||||
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
if (readl(priv->catas_err.map)) {
|
||||
/* If the device is off-line, we cannot try to recover it */
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
else {
|
||||
dump_err_buf(dev);
|
||||
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
|
||||
|
||||
if (internal_err_reset) {
|
||||
spin_lock(&catas_lock);
|
||||
list_add(&priv->catas_err.list, &catas_list);
|
||||
spin_unlock(&catas_lock);
|
||||
|
||||
queue_work(mlx4_wq, &catas_work);
|
||||
}
|
||||
}
|
||||
} else
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
}
|
||||
|
||||
static void catas_reset(struct work_struct *work)
|
||||
{
|
||||
struct mlx4_priv *priv, *tmppriv;
|
||||
struct mlx4_dev *dev;
|
||||
|
||||
LIST_HEAD(tlist);
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_splice_init(&catas_list, &tlist);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
|
||||
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
|
||||
struct pci_dev *pdev = priv->dev.pdev;
|
||||
|
||||
/* If the device is off-line, we cannot reset it */
|
||||
if (pci_channel_offline(pdev))
|
||||
continue;
|
||||
|
||||
ret = mlx4_restart_one(priv->dev.pdev);
|
||||
/* 'priv' now is not valid */
|
||||
if (ret)
|
||||
pr_err("mlx4 %s: Reset failed (%d)\n",
|
||||
pci_name(pdev), ret);
|
||||
else {
|
||||
dev = pci_get_drvdata(pdev);
|
||||
mlx4_dbg(dev, "Reset succeeded\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
phys_addr_t addr;
|
||||
|
||||
/*If we are in SRIOV the default of the module param must be 0*/
|
||||
if (mlx4_is_mfunc(dev))
|
||||
internal_err_reset = 0;
|
||||
|
||||
INIT_LIST_HEAD(&priv->catas_err.list);
|
||||
init_timer(&priv->catas_err.timer);
|
||||
priv->catas_err.map = NULL;
|
||||
|
||||
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
|
||||
priv->fw.catas_offset;
|
||||
|
||||
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
|
||||
if (!priv->catas_err.map) {
|
||||
mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
|
||||
(unsigned long long) addr);
|
||||
return;
|
||||
}
|
||||
|
||||
priv->catas_err.timer.data = (unsigned long) dev;
|
||||
priv->catas_err.timer.function = poll_catas;
|
||||
priv->catas_err.timer.expires =
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
|
||||
add_timer(&priv->catas_err.timer);
|
||||
}
|
||||
|
||||
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
del_timer_sync(&priv->catas_err.timer);
|
||||
|
||||
if (priv->catas_err.map)
|
||||
iounmap(priv->catas_err.map);
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_del(&priv->catas_err.list);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
}
|
||||
|
||||
void __init mlx4_catas_init(void)
|
||||
{
|
||||
INIT_WORK(&catas_work, catas_reset);
|
||||
}
|
||||
2651
drivers/net/ethernet/mellanox/mlx4/cmd.c
Normal file
2651
drivers/net/ethernet/mellanox/mlx4/cmd.c
Normal file
File diff suppressed because it is too large
Load diff
359
drivers/net/ethernet/mellanox/mlx4/cq.c
Normal file
359
drivers/net/ethernet/mellanox/mlx4/cq.c
Normal file
|
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/mlx4/cq.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
#define MLX4_CQ_STATUS_OK ( 0 << 28)
|
||||
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
|
||||
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
|
||||
#define MLX4_CQ_FLAG_CC ( 1 << 18)
|
||||
#define MLX4_CQ_FLAG_OI ( 1 << 17)
|
||||
#define MLX4_CQ_STATE_ARMED ( 9 << 8)
|
||||
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
|
||||
#define MLX4_EQ_STATE_FIRED (10 << 8)
|
||||
|
||||
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
||||
{
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
|
||||
cqn & (dev->caps.num_cqs - 1));
|
||||
if (!cq) {
|
||||
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
++cq->arm_sn;
|
||||
|
||||
cq->comp(cq);
|
||||
}
|
||||
|
||||
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
||||
{
|
||||
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
spin_lock(&cq_table->lock);
|
||||
|
||||
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
spin_unlock(&cq_table->lock);
|
||||
|
||||
if (!cq) {
|
||||
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
cq->event(cq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int cq_num)
|
||||
{
|
||||
return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
|
||||
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int cq_num, u32 opmod)
|
||||
{
|
||||
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int cq_num)
|
||||
{
|
||||
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
|
||||
cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
u16 count, u16 period)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_cq_context *cq_context;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
cq_context->cq_max_count = cpu_to_be16(count);
|
||||
cq_context->cq_period = cpu_to_be16(period);
|
||||
|
||||
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_modify);
|
||||
|
||||
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
int entries, struct mlx4_mtt *mtt)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_cq_context *cq_context;
|
||||
u64 mtt_addr;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
|
||||
cq_context->log_page_size = mtt->page_shift - 12;
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
cq_context->mtt_base_addr_h = mtt_addr >> 32;
|
||||
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
||||
|
||||
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
|
||||
|
||||
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
||||
int err;
|
||||
|
||||
*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
|
||||
if (*cqn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_put;
|
||||
return 0;
|
||||
|
||||
err_put:
|
||||
mlx4_table_put(dev, &cq_table->table, *cqn);
|
||||
|
||||
err_out:
|
||||
mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
|
||||
{
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
|
||||
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
return err;
|
||||
else {
|
||||
*cqn = get_param_l(&out_param);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return __mlx4_cq_alloc_icm(dev, cqn);
|
||||
}
|
||||
|
||||
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
||||
|
||||
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
|
||||
mlx4_table_put(dev, &cq_table->table, cqn);
|
||||
mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
|
||||
}
|
||||
|
||||
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, cqn);
|
||||
err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
|
||||
MLX4_CMD_FREE_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
|
||||
} else
|
||||
__mlx4_cq_free_icm(dev, cqn);
|
||||
}
|
||||
|
||||
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
|
||||
struct mlx4_cq *cq, unsigned vector, int collapsed,
|
||||
int timestamp_en)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_cq_context *cq_context;
|
||||
u64 mtt_addr;
|
||||
int err;
|
||||
|
||||
if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
|
||||
return -EINVAL;
|
||||
|
||||
cq->vector = vector;
|
||||
|
||||
err = mlx4_cq_alloc_icm(dev, &cq->cqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
if (err)
|
||||
goto err_icm;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto err_radix;
|
||||
}
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
cq_context->flags = cpu_to_be32(!!collapsed << 18);
|
||||
if (timestamp_en)
|
||||
cq_context->flags |= cpu_to_be32(1 << 19);
|
||||
|
||||
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
|
||||
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
|
||||
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
||||
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
cq_context->mtt_base_addr_h = mtt_addr >> 32;
|
||||
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
||||
cq_context->db_rec_addr = cpu_to_be64(db_rec);
|
||||
|
||||
err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
if (err)
|
||||
goto err_radix;
|
||||
|
||||
cq->cons_index = 0;
|
||||
cq->arm_sn = 1;
|
||||
cq->uar = uar;
|
||||
atomic_set(&cq->refcount, 1);
|
||||
init_completion(&cq->free);
|
||||
|
||||
cq->irq = priv->eq_table.eq[cq->vector].irq;
|
||||
return 0;
|
||||
|
||||
err_radix:
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
err_icm:
|
||||
mlx4_cq_free_icm(dev, cq->cqn);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
|
||||
|
||||
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_cq_table *cq_table = &priv->cq_table;
|
||||
int err;
|
||||
|
||||
err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
|
||||
if (err)
|
||||
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
|
||||
|
||||
synchronize_irq(priv->eq_table.eq[cq->vector].irq);
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
mlx4_cq_free_icm(dev, cq->cqn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_free);
|
||||
|
||||
int mlx4_init_cq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
||||
int err;
|
||||
|
||||
spin_lock_init(&cq_table->lock);
|
||||
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
|
||||
if (mlx4_is_slave(dev))
|
||||
return 0;
|
||||
|
||||
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
|
||||
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
if (mlx4_is_slave(dev))
|
||||
return;
|
||||
/* Nothing to do to clean up radix_tree */
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
|
||||
}
|
||||
334
drivers/net/ethernet/mellanox/mlx4/en_clock.c
Normal file
334
drivers/net/ethernet/mellanox/mlx4/en_clock.c
Normal file
|
|
@ -0,0 +1,334 @@
|
|||
/*
|
||||
* Copyright (c) 2012 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int port_up = 0;
|
||||
int err = 0;
|
||||
|
||||
if (priv->hwtstamp_config.tx_type == tx_type &&
|
||||
priv->hwtstamp_config.rx_filter == rx_filter)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
if (priv->port_up) {
|
||||
port_up = 1;
|
||||
mlx4_en_stop_port(dev, 1);
|
||||
}
|
||||
|
||||
mlx4_en_free_resources(priv);
|
||||
|
||||
en_warn(priv, "Changing Time Stamp configuration\n");
|
||||
|
||||
priv->hwtstamp_config.tx_type = tx_type;
|
||||
priv->hwtstamp_config.rx_filter = rx_filter;
|
||||
|
||||
if (rx_filter != HWTSTAMP_FILTER_NONE)
|
||||
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
else
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
err = mlx4_en_alloc_resources(priv);
|
||||
if (err) {
|
||||
en_err(priv, "Failed reallocating port resources\n");
|
||||
goto out;
|
||||
}
|
||||
if (port_up) {
|
||||
err = mlx4_en_start_port(dev);
|
||||
if (err)
|
||||
en_err(priv, "Failed starting port\n");
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
netdev_features_change(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
|
||||
*/
|
||||
static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
|
||||
{
|
||||
struct mlx4_en_dev *mdev =
|
||||
container_of(tc, struct mlx4_en_dev, cycles);
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
|
||||
return mlx4_read_clock(dev) & tc->mask;
|
||||
}
|
||||
|
||||
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
|
||||
{
|
||||
u64 hi, lo;
|
||||
struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
|
||||
|
||||
lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
|
||||
hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
|
||||
|
||||
return hi | lo;
|
||||
}
|
||||
|
||||
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
|
||||
struct skb_shared_hwtstamps *hwts,
|
||||
u64 timestamp)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 nsec;
|
||||
|
||||
read_lock_irqsave(&mdev->clock_lock, flags);
|
||||
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
|
||||
read_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
|
||||
hwts->hwtstamp = ns_to_ktime(nsec);
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_remove_timestamp - disable PTP device
|
||||
* @mdev: board private structure
|
||||
*
|
||||
* Stop the PTP support.
|
||||
**/
|
||||
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
if (mdev->ptp_clock) {
|
||||
ptp_clock_unregister(mdev->ptp_clock);
|
||||
mdev->ptp_clock = NULL;
|
||||
mlx4_info(mdev, "removed PHC\n");
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
|
||||
mdev->overflow_period);
|
||||
unsigned long flags;
|
||||
|
||||
if (timeout) {
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_read(&mdev->clock);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
mdev->last_overflow_check = jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
|
||||
* @ptp: ptp clock structure
|
||||
* @delta: Desired frequency change in parts per billion
|
||||
*
|
||||
* Adjust the frequency of the PHC cycle counter by the indicated delta from
|
||||
* the base frequency.
|
||||
**/
|
||||
static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
|
||||
{
|
||||
u64 adj;
|
||||
u32 diff, mult;
|
||||
int neg_adj = 0;
|
||||
unsigned long flags;
|
||||
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
|
||||
ptp_clock_info);
|
||||
|
||||
if (delta < 0) {
|
||||
neg_adj = 1;
|
||||
delta = -delta;
|
||||
}
|
||||
mult = mdev->nominal_c_mult;
|
||||
adj = mult;
|
||||
adj *= delta;
|
||||
diff = div_u64(adj, 1000000000ULL);
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_read(&mdev->clock);
|
||||
mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_phc_adjtime - Shift the time of the hardware clock
|
||||
* @ptp: ptp clock structure
|
||||
* @delta: Desired change in nanoseconds
|
||||
*
|
||||
* Adjust the timer by resetting the timecounter structure.
|
||||
**/
|
||||
static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
|
||||
ptp_clock_info);
|
||||
unsigned long flags;
|
||||
s64 now;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
now = timecounter_read(&mdev->clock);
|
||||
now += delta;
|
||||
timecounter_init(&mdev->clock, &mdev->cycles, now);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_phc_gettime - Reads the current time from the hardware clock
|
||||
* @ptp: ptp clock structure
|
||||
* @ts: timespec structure to hold the current time value
|
||||
*
|
||||
* Read the timecounter and return the correct value in ns after converting
|
||||
* it into a struct timespec.
|
||||
**/
|
||||
static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
|
||||
ptp_clock_info);
|
||||
unsigned long flags;
|
||||
u32 remainder;
|
||||
u64 ns;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
ns = timecounter_read(&mdev->clock);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
|
||||
ts->tv_nsec = remainder;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_phc_settime - Set the current time on the hardware clock
|
||||
* @ptp: ptp clock structure
|
||||
* @ts: timespec containing the new time for the cycle counter
|
||||
*
|
||||
* Reset the timecounter to use a new base value instead of the kernel
|
||||
* wall timer value.
|
||||
**/
|
||||
static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
|
||||
const struct timespec *ts)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
|
||||
ptp_clock_info);
|
||||
u64 ns = timespec_to_ns(ts);
|
||||
unsigned long flags;
|
||||
|
||||
/* reset the timecounter */
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_init(&mdev->clock, &mdev->cycles, ns);
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx4_en_phc_enable - enable or disable an ancillary feature
|
||||
* @ptp: ptp clock structure
|
||||
* @request: Desired resource to enable or disable
|
||||
* @on: Caller passes one to enable or zero to disable
|
||||
*
|
||||
* Enable (or disable) ancillary features of the PHC subsystem.
|
||||
* Currently, no ancillary features are supported.
|
||||
**/
|
||||
static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
|
||||
struct ptp_clock_request __always_unused *request,
|
||||
int __always_unused on)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.max_adj = 100000000,
|
||||
.n_alarm = 0,
|
||||
.n_ext_ts = 0,
|
||||
.n_per_out = 0,
|
||||
.n_pins = 0,
|
||||
.pps = 0,
|
||||
.adjfreq = mlx4_en_phc_adjfreq,
|
||||
.adjtime = mlx4_en_phc_adjtime,
|
||||
.gettime = mlx4_en_phc_gettime,
|
||||
.settime = mlx4_en_phc_settime,
|
||||
.enable = mlx4_en_phc_enable,
|
||||
};
|
||||
|
||||
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
unsigned long flags;
|
||||
u64 ns;
|
||||
|
||||
rwlock_init(&mdev->clock_lock);
|
||||
|
||||
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
|
||||
mdev->cycles.read = mlx4_en_read_clock;
|
||||
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
|
||||
/* Using shift to make calculation more accurate. Since current HW
|
||||
* clock frequency is 427 MHz, and cycles are given using a 48 bits
|
||||
* register, the biggest shift when calculating using u64, is 14
|
||||
* (max_cycles * multiplier < 2^64)
|
||||
*/
|
||||
mdev->cycles.shift = 14;
|
||||
mdev->cycles.mult =
|
||||
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
|
||||
mdev->nominal_c_mult = mdev->cycles.mult;
|
||||
|
||||
write_lock_irqsave(&mdev->clock_lock, flags);
|
||||
timecounter_init(&mdev->clock, &mdev->cycles,
|
||||
ktime_to_ns(ktime_get_real()));
|
||||
write_unlock_irqrestore(&mdev->clock_lock, flags);
|
||||
|
||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||
* sure counter is checked at least once every wrap around.
|
||||
*/
|
||||
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
|
||||
do_div(ns, NSEC_PER_SEC / 2 / HZ);
|
||||
mdev->overflow_period = ns;
|
||||
|
||||
/* Configure the PHC */
|
||||
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
|
||||
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
|
||||
|
||||
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
|
||||
&mdev->pdev->dev);
|
||||
if (IS_ERR(mdev->ptp_clock)) {
|
||||
mdev->ptp_clock = NULL;
|
||||
mlx4_err(mdev, "ptp_clock_register failed\n");
|
||||
} else {
|
||||
mlx4_info(mdev, "registered PHC clock\n");
|
||||
}
|
||||
|
||||
}
|
||||
232
drivers/net/ethernet/mellanox/mlx4/en_cq.c
Normal file
232
drivers/net/ethernet/mellanox/mlx4/en_cq.c
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mlx4/cq.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_cq **pcq,
|
||||
int entries, int ring, enum cq_type mode,
|
||||
int node)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_cq *cq;
|
||||
int err;
|
||||
|
||||
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
|
||||
if (!cq) {
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
if (!cq) {
|
||||
en_err(priv, "Failed to allocate CQ structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
cq->size = entries;
|
||||
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
|
||||
|
||||
cq->ring = ring;
|
||||
cq->is_tx = mode;
|
||||
|
||||
/* Allocate HW buffers on provided NUMA node.
|
||||
* dev->numa_node is used in mtt range allocation flow.
|
||||
*/
|
||||
set_dev_node(&mdev->dev->pdev->dev, node);
|
||||
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
|
||||
cq->buf_size, 2 * PAGE_SIZE);
|
||||
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
|
||||
if (err)
|
||||
goto err_cq;
|
||||
|
||||
err = mlx4_en_map_buffer(&cq->wqres.buf);
|
||||
if (err)
|
||||
goto err_res;
|
||||
|
||||
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
|
||||
*pcq = cq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_res:
|
||||
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
||||
err_cq:
|
||||
kfree(cq);
|
||||
*pcq = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
int cq_idx)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int err = 0;
|
||||
char name[25];
|
||||
int timestamp_en = 0;
|
||||
struct cpu_rmap *rmap =
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
priv->dev->rx_cpu_rmap;
|
||||
#else
|
||||
NULL;
|
||||
#endif
|
||||
|
||||
cq->dev = mdev->pndev[priv->port];
|
||||
cq->mcq.set_ci_db = cq->wqres.db.db;
|
||||
cq->mcq.arm_db = cq->wqres.db.db + 1;
|
||||
*cq->mcq.set_ci_db = 0;
|
||||
*cq->mcq.arm_db = 0;
|
||||
memset(cq->buf, 0, cq->buf_size);
|
||||
|
||||
if (cq->is_tx == RX) {
|
||||
if (mdev->dev->caps.comp_pool) {
|
||||
if (!cq->vector) {
|
||||
sprintf(name, "%s-%d", priv->dev->name,
|
||||
cq->ring);
|
||||
/* Set IRQ for specific name (per ring) */
|
||||
if (mlx4_assign_eq(mdev->dev, name, rmap,
|
||||
&cq->vector)) {
|
||||
cq->vector = (cq->ring + 1 + priv->port)
|
||||
% mdev->dev->caps.num_comp_vectors;
|
||||
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
|
||||
name);
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
cq->vector = (cq->ring + 1 + priv->port) %
|
||||
mdev->dev->caps.num_comp_vectors;
|
||||
}
|
||||
|
||||
cq->irq_desc =
|
||||
irq_to_desc(mlx4_eq_get_irq(mdev->dev,
|
||||
cq->vector));
|
||||
} else {
|
||||
/* For TX we use the same irq per
|
||||
ring we assigned for the RX */
|
||||
struct mlx4_en_cq *rx_cq;
|
||||
|
||||
cq_idx = cq_idx % priv->rx_ring_num;
|
||||
rx_cq = priv->rx_cq[cq_idx];
|
||||
cq->vector = rx_cq->vector;
|
||||
}
|
||||
|
||||
if (!cq->is_tx)
|
||||
cq->size = priv->rx_ring[cq->ring]->actual_size;
|
||||
|
||||
if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
|
||||
(!cq->is_tx && priv->hwtstamp_config.rx_filter))
|
||||
timestamp_en = 1;
|
||||
|
||||
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
|
||||
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
|
||||
cq->vector, 0, timestamp_en);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
|
||||
cq->mcq.event = mlx4_en_cq_event;
|
||||
|
||||
if (cq->is_tx) {
|
||||
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
|
||||
NAPI_POLL_WEIGHT);
|
||||
} else {
|
||||
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
|
||||
|
||||
err = irq_set_affinity_hint(cq->mcq.irq,
|
||||
ring->affinity_mask);
|
||||
if (err)
|
||||
mlx4_warn(mdev, "Failed setting affinity hint\n");
|
||||
|
||||
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
|
||||
napi_hash_add(&cq->napi);
|
||||
}
|
||||
|
||||
napi_enable(&cq->napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_cq *cq = *pcq;
|
||||
|
||||
mlx4_en_unmap_buffer(&cq->wqres.buf);
|
||||
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
||||
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
|
||||
mlx4_release_eq(priv->mdev->dev, cq->vector);
|
||||
}
|
||||
cq->vector = 0;
|
||||
cq->buf_size = 0;
|
||||
cq->buf = NULL;
|
||||
kfree(cq);
|
||||
*pcq = NULL;
|
||||
}
|
||||
|
||||
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
napi_disable(&cq->napi);
|
||||
if (!cq->is_tx) {
|
||||
napi_hash_del(&cq->napi);
|
||||
synchronize_rcu();
|
||||
irq_set_affinity_hint(cq->mcq.irq, NULL);
|
||||
}
|
||||
netif_napi_del(&cq->napi);
|
||||
|
||||
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
|
||||
}
|
||||
|
||||
/* Set rx cq moderation parameters */
|
||||
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
|
||||
cq->moder_cnt, cq->moder_time);
|
||||
}
|
||||
|
||||
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
|
||||
&priv->mdev->uar_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
263
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
Normal file
263
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
Normal file
|
|
@ -0,0 +1,263 @@
|
|||
/*
|
||||
* Copyright (c) 2011 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dcbnl.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
|
||||
struct ieee_ets *ets)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct ieee_ets *my_ets = &priv->ets;
|
||||
|
||||
/* No IEEE PFC settings available */
|
||||
if (!my_ets)
|
||||
return -EINVAL;
|
||||
|
||||
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
|
||||
ets->cbs = my_ets->cbs;
|
||||
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
|
||||
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
|
||||
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
|
||||
{
|
||||
int i;
|
||||
int total_ets_bw = 0;
|
||||
int has_ets_tc = 0;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
|
||||
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
|
||||
i, ets->prio_tc[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
has_ets_tc = 1;
|
||||
total_ets_bw += ets->tc_tx_bw[i];
|
||||
break;
|
||||
default:
|
||||
en_err(priv, "TC[%d]: Not supported TSA: %d\n",
|
||||
i, ets->tc_tsa[i]);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
|
||||
en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
|
||||
total_ets_bw);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
|
||||
struct ieee_ets *ets, u16 *ratelimit)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int num_strict = 0;
|
||||
int i;
|
||||
__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
|
||||
__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
|
||||
|
||||
ets = ets ?: &priv->ets;
|
||||
ratelimit = ratelimit ?: priv->maxrate;
|
||||
|
||||
/* higher TC means higher priority => lower pg */
|
||||
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
pg[i] = num_strict++;
|
||||
tc_tx_bw[i] = MLX4_EN_BW_MAX;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
pg[i] = MLX4_EN_TC_ETS;
|
||||
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
|
||||
ratelimit);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
err = mlx4_en_ets_validate(priv, ets);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx4_en_config_port_scheduler(priv, ets, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(&priv->ets, ets, sizeof(priv->ets));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
|
||||
struct ieee_pfc *pfc)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
||||
pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
|
||||
pfc->pfc_en = priv->prof->tx_ppp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
|
||||
struct ieee_pfc *pfc)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_port_profile *prof = priv->prof;
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
|
||||
pfc->pfc_cap,
|
||||
pfc->pfc_en,
|
||||
pfc->mbc,
|
||||
pfc->delay);
|
||||
|
||||
prof->rx_pause = !pfc->pfc_en;
|
||||
prof->tx_pause = !pfc->pfc_en;
|
||||
prof->rx_ppp = pfc->pfc_en;
|
||||
prof->tx_ppp = pfc->pfc_en;
|
||||
|
||||
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
||||
priv->rx_skb_size + ETH_FCS_LEN,
|
||||
prof->tx_pause,
|
||||
prof->tx_ppp,
|
||||
prof->rx_pause,
|
||||
prof->rx_ppp);
|
||||
if (err)
|
||||
en_err(priv, "Failed setting pause params\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
|
||||
{
|
||||
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
|
||||
}
|
||||
|
||||
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
||||
{
|
||||
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
|
||||
(mode & DCB_CAP_DCBX_VER_CEE) ||
|
||||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(mode & DCB_CAP_DCBX_HOST))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
|
||||
static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
|
||||
struct ieee_maxrate *maxrate)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
||||
maxrate->tc_maxrate[i] =
|
||||
priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
|
||||
struct ieee_maxrate *maxrate)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
u16 tmp[IEEE_8021QAZ_MAX_TCS];
|
||||
int i, err;
|
||||
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
/* Convert from Kbps into HW units, rounding result up.
|
||||
* Setting to 0, means unlimited BW.
|
||||
*/
|
||||
tmp[i] = div_u64(maxrate->tc_maxrate[i] +
|
||||
MLX4_RATELIMIT_UNITS_IN_KB - 1,
|
||||
MLX4_RATELIMIT_UNITS_IN_KB);
|
||||
}
|
||||
|
||||
err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
|
||||
.ieee_getets = mlx4_en_dcbnl_ieee_getets,
|
||||
.ieee_setets = mlx4_en_dcbnl_ieee_setets,
|
||||
.ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
|
||||
.ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
|
||||
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
|
||||
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
|
||||
|
||||
.getdcbx = mlx4_en_dcbnl_getdcbx,
|
||||
.setdcbx = mlx4_en_dcbnl_setdcbx,
|
||||
};
|
||||
|
||||
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
|
||||
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
|
||||
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
|
||||
|
||||
.getdcbx = mlx4_en_dcbnl_getdcbx,
|
||||
.setdcbx = mlx4_en_dcbnl_setdcbx,
|
||||
};
|
||||
1349
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
Normal file
1349
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
Normal file
File diff suppressed because it is too large
Load diff
368
drivers/net/ethernet/mellanox/mlx4/en_main.c
Normal file
368
drivers/net/ethernet/mellanox/mlx4/en_main.c
Normal file
|
|
@ -0,0 +1,368 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/mlx4/driver.h>
|
||||
#include <linux/mlx4/device.h>
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
|
||||
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
|
||||
|
||||
static const char mlx4_en_version[] =
|
||||
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
|
||||
DRV_VERSION " (" DRV_RELDATE ")\n";
|
||||
|
||||
#define MLX4_EN_PARM_INT(X, def_val, desc) \
|
||||
static unsigned int X = def_val;\
|
||||
module_param(X , uint, 0444); \
|
||||
MODULE_PARM_DESC(X, desc);
|
||||
|
||||
|
||||
/*
|
||||
* Device scope module parameters
|
||||
*/
|
||||
|
||||
/* Enable RSS UDP traffic */
|
||||
MLX4_EN_PARM_INT(udp_rss, 1,
|
||||
"Enable RSS for incoming UDP traffic or disabled (0)");
|
||||
|
||||
/* Priority pausing */
|
||||
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
|
||||
" Per priority bit mask");
|
||||
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
|
||||
" Per priority bit mask");
|
||||
|
||||
MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
|
||||
"Threshold for using inline data (range: 17-104, default: 104)");
|
||||
|
||||
#define MAX_PFC_TX 0xff
|
||||
#define MAX_PFC_RX 0xff
|
||||
|
||||
void en_print(const char *level, const struct mlx4_en_priv *priv,
|
||||
const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct va_format vaf;
|
||||
|
||||
va_start(args, format);
|
||||
|
||||
vaf.fmt = format;
|
||||
vaf.va = &args;
|
||||
if (priv->registered)
|
||||
printk("%s%s: %s: %pV",
|
||||
level, DRV_NAME, priv->dev->name, &vaf);
|
||||
else
|
||||
printk("%s%s: %s: Port %d: %pV",
|
||||
level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
|
||||
priv->port, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void mlx4_en_update_loopback_state(struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
||||
priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
|
||||
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
|
||||
|
||||
/* Drop the packet if SRIOV is not enabled
|
||||
* and not performing the selftest or flb disabled
|
||||
*/
|
||||
if (mlx4_is_mfunc(priv->mdev->dev) &&
|
||||
!(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
|
||||
priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
|
||||
|
||||
/* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
|
||||
* is requested
|
||||
*/
|
||||
if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
|
||||
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
|
||||
}
|
||||
|
||||
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
struct mlx4_en_profile *params = &mdev->profile;
|
||||
int i;
|
||||
|
||||
params->udp_rss = udp_rss;
|
||||
params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
|
||||
MLX4_EN_MIN_TX_RING_P_UP :
|
||||
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
|
||||
|
||||
if (params->udp_rss && !(mdev->dev->caps.flags
|
||||
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
|
||||
mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
|
||||
params->udp_rss = 0;
|
||||
}
|
||||
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
|
||||
params->prof[i].rx_pause = 1;
|
||||
params->prof[i].rx_ppp = pfcrx;
|
||||
params->prof[i].tx_pause = 1;
|
||||
params->prof[i].tx_ppp = pfctx;
|
||||
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
|
||||
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
|
||||
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
|
||||
MLX4_EN_NUM_UP;
|
||||
params->prof[i].rss_rings = 0;
|
||||
params->prof[i].inline_thold = inline_thold;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
|
||||
{
|
||||
struct mlx4_en_dev *endev = ctx;
|
||||
|
||||
return endev->pndev[port];
|
||||
}
|
||||
|
||||
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
|
||||
enum mlx4_dev_event event, unsigned long port)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
|
||||
struct mlx4_en_priv *priv;
|
||||
|
||||
switch (event) {
|
||||
case MLX4_DEV_EVENT_PORT_UP:
|
||||
case MLX4_DEV_EVENT_PORT_DOWN:
|
||||
if (!mdev->pndev[port])
|
||||
return;
|
||||
priv = netdev_priv(mdev->pndev[port]);
|
||||
/* To prevent races, we poll the link state in a separate
|
||||
task rather than changing it here */
|
||||
priv->link_state = event;
|
||||
queue_work(mdev->workqueue, &priv->linkstate_task);
|
||||
break;
|
||||
|
||||
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
|
||||
mlx4_err(mdev, "Internal error detected, restarting device\n");
|
||||
break;
|
||||
|
||||
case MLX4_DEV_EVENT_SLAVE_INIT:
|
||||
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
|
||||
break;
|
||||
default:
|
||||
if (port < 1 || port > dev->caps.num_ports ||
|
||||
!mdev->pndev[port])
|
||||
return;
|
||||
mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
|
||||
(int) port);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = endev_ptr;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
mdev->device_up = false;
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||
if (mdev->pndev[i])
|
||||
mlx4_en_destroy_netdev(mdev->pndev[i]);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_remove_timestamp(mdev);
|
||||
|
||||
flush_workqueue(mdev->workqueue);
|
||||
destroy_workqueue(mdev->workqueue);
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
iounmap(mdev->uar_map);
|
||||
mlx4_uar_free(dev, &mdev->priv_uar);
|
||||
mlx4_pd_free(dev, mdev->priv_pdn);
|
||||
kfree(mdev);
|
||||
}
|
||||
|
||||
static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_en_dev *mdev;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
printk_once(KERN_INFO "%s", mlx4_en_version);
|
||||
|
||||
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
|
||||
if (!mdev) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_res;
|
||||
}
|
||||
|
||||
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
|
||||
goto err_free_dev;
|
||||
|
||||
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
|
||||
goto err_pd;
|
||||
|
||||
mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
|
||||
PAGE_SIZE);
|
||||
if (!mdev->uar_map)
|
||||
goto err_uar;
|
||||
spin_lock_init(&mdev->uar_lock);
|
||||
|
||||
mdev->dev = dev;
|
||||
mdev->dma_device = &(dev->pdev->dev);
|
||||
mdev->pdev = dev->pdev;
|
||||
mdev->device_up = false;
|
||||
|
||||
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
|
||||
if (!mdev->LSO_support)
|
||||
mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
|
||||
|
||||
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
|
||||
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
|
||||
0, 0, &mdev->mr)) {
|
||||
mlx4_err(mdev, "Failed allocating memory region\n");
|
||||
goto err_map;
|
||||
}
|
||||
if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
|
||||
mlx4_err(mdev, "Failed enabling memory region\n");
|
||||
goto err_mr;
|
||||
}
|
||||
|
||||
/* Build device profile according to supplied module parameters */
|
||||
err = mlx4_en_get_profile(mdev);
|
||||
if (err) {
|
||||
mlx4_err(mdev, "Bad module parameters, aborting\n");
|
||||
goto err_mr;
|
||||
}
|
||||
|
||||
/* Configure which ports to start according to module parameters */
|
||||
mdev->port_cnt = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||
mdev->port_cnt++;
|
||||
|
||||
/* Initialize time stamp mechanism */
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_init_timestamp(mdev);
|
||||
|
||||
/* Set default number of RX rings*/
|
||||
mlx4_en_set_num_rx_rings(mdev);
|
||||
|
||||
/* Create our own workqueue for reset/multicast tasks
|
||||
* Note: we cannot use the shared workqueue because of deadlocks caused
|
||||
* by the rtnl lock */
|
||||
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
|
||||
if (!mdev->workqueue) {
|
||||
err = -ENOMEM;
|
||||
goto err_mr;
|
||||
}
|
||||
|
||||
/* At this stage all non-port specific tasks are complete:
|
||||
* mark the card state as up */
|
||||
mutex_init(&mdev->state_lock);
|
||||
mdev->device_up = true;
|
||||
|
||||
/* Setup ports */
|
||||
|
||||
/* Create a netdev for each port */
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||
mlx4_info(mdev, "Activating port:%d\n", i);
|
||||
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
|
||||
mdev->pndev[i] = NULL;
|
||||
}
|
||||
|
||||
return mdev;
|
||||
|
||||
err_mr:
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
err_map:
|
||||
if (mdev->uar_map)
|
||||
iounmap(mdev->uar_map);
|
||||
err_uar:
|
||||
mlx4_uar_free(dev, &mdev->priv_uar);
|
||||
err_pd:
|
||||
mlx4_pd_free(dev, mdev->priv_pdn);
|
||||
err_free_dev:
|
||||
kfree(mdev);
|
||||
err_free_res:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct mlx4_interface mlx4_en_interface = {
|
||||
.add = mlx4_en_add,
|
||||
.remove = mlx4_en_remove,
|
||||
.event = mlx4_en_event,
|
||||
.get_dev = mlx4_en_get_netdev,
|
||||
.protocol = MLX4_PROT_ETH,
|
||||
};
|
||||
|
||||
static void mlx4_en_verify_params(void)
|
||||
{
|
||||
if (pfctx > MAX_PFC_TX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfctx, MAX_PFC_TX);
|
||||
pfctx = 0;
|
||||
}
|
||||
|
||||
if (pfcrx > MAX_PFC_RX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfcrx, MAX_PFC_RX);
|
||||
pfcrx = 0;
|
||||
}
|
||||
|
||||
if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
|
||||
inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
|
||||
inline_thold = MAX_INLINE;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init mlx4_en_init(void)
|
||||
{
|
||||
mlx4_en_verify_params();
|
||||
|
||||
return mlx4_register_interface(&mlx4_en_interface);
|
||||
}
|
||||
|
||||
static void __exit mlx4_en_cleanup(void)
|
||||
{
|
||||
mlx4_unregister_interface(&mlx4_en_interface);
|
||||
}
|
||||
|
||||
module_init(mlx4_en_init);
|
||||
module_exit(mlx4_en_cleanup);
|
||||
|
||||
2663
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
Normal file
2663
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
Normal file
File diff suppressed because it is too large
Load diff
228
drivers/net/ethernet/mellanox/mlx4/en_port.c
Normal file
228
drivers/net/ethernet/mellanox/mlx4/en_port.c
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "en_port.h"
|
||||
#include "mlx4_en.h"
|
||||
|
||||
|
||||
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_set_vlan_fltr_mbox *filter;
|
||||
int i;
|
||||
int j;
|
||||
int index = 0;
|
||||
u32 entry;
|
||||
int err = 0;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
filter = mailbox->buf;
|
||||
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
|
||||
entry = 0;
|
||||
for (j = 0; j < 32; j++)
|
||||
if (test_bit(index++, priv->active_vlans))
|
||||
entry |= 1 << j;
|
||||
filter->entry[i] = cpu_to_be32(entry);
|
||||
}
|
||||
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
|
||||
{
|
||||
struct mlx4_en_query_port_context *qport_context;
|
||||
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
|
||||
struct mlx4_en_port_state *state = &priv->port_state;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
|
||||
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
goto out;
|
||||
qport_context = mailbox->buf;
|
||||
|
||||
/* This command is always accessed from Ethtool context
|
||||
* already synchronized, no need in locking */
|
||||
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
|
||||
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
|
||||
case MLX4_EN_1G_SPEED:
|
||||
state->link_speed = 1000;
|
||||
break;
|
||||
case MLX4_EN_10G_SPEED_XAUI:
|
||||
case MLX4_EN_10G_SPEED_XFI:
|
||||
state->link_speed = 10000;
|
||||
break;
|
||||
case MLX4_EN_40G_SPEED:
|
||||
state->link_speed = 40000;
|
||||
break;
|
||||
default:
|
||||
state->link_speed = -1;
|
||||
break;
|
||||
}
|
||||
state->transciver = qport_context->transceiver;
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||
{
|
||||
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
|
||||
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
|
||||
struct net_device_stats *stats = &priv->stats;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
u64 in_mod = reset << 8 | port;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
|
||||
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
mlx4_en_stats = mailbox->buf;
|
||||
|
||||
spin_lock_bh(&priv->stats_lock);
|
||||
|
||||
stats->rx_packets = 0;
|
||||
stats->rx_bytes = 0;
|
||||
priv->port_stats.rx_chksum_good = 0;
|
||||
priv->port_stats.rx_chksum_none = 0;
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
stats->rx_packets += priv->rx_ring[i]->packets;
|
||||
stats->rx_bytes += priv->rx_ring[i]->bytes;
|
||||
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
||||
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
||||
}
|
||||
stats->tx_packets = 0;
|
||||
stats->tx_bytes = 0;
|
||||
priv->port_stats.tx_chksum_offload = 0;
|
||||
priv->port_stats.queue_stopped = 0;
|
||||
priv->port_stats.wake_queue = 0;
|
||||
priv->port_stats.tso_packets = 0;
|
||||
priv->port_stats.xmit_more = 0;
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
|
||||
|
||||
stats->tx_packets += ring->packets;
|
||||
stats->tx_bytes += ring->bytes;
|
||||
priv->port_stats.tx_chksum_offload += ring->tx_csum;
|
||||
priv->port_stats.queue_stopped += ring->queue_stopped;
|
||||
priv->port_stats.wake_queue += ring->wake_queue;
|
||||
priv->port_stats.tso_packets += ring->tso_packets;
|
||||
priv->port_stats.xmit_more += ring->xmit_more;
|
||||
}
|
||||
|
||||
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
|
||||
be32_to_cpu(mlx4_en_stats->RdropLength) +
|
||||
be32_to_cpu(mlx4_en_stats->RJBBR) +
|
||||
be32_to_cpu(mlx4_en_stats->RCRC) +
|
||||
be32_to_cpu(mlx4_en_stats->RRUNT);
|
||||
stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
|
||||
stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
|
||||
stats->collisions = 0;
|
||||
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
||||
stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
||||
stats->rx_frame_errors = 0;
|
||||
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
stats->tx_aborted_errors = 0;
|
||||
stats->tx_carrier_errors = 0;
|
||||
stats->tx_fifo_errors = 0;
|
||||
stats->tx_heartbeat_errors = 0;
|
||||
stats->tx_window_errors = 0;
|
||||
|
||||
priv->pkstats.broadcast =
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
|
||||
priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
|
||||
priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
|
||||
priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
|
||||
priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
|
||||
priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
|
||||
priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
|
||||
priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
|
||||
priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
|
||||
priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
|
||||
priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
|
||||
priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
|
||||
priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
|
||||
priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
|
||||
priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
|
||||
priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
|
||||
priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
|
||||
spin_unlock_bh(&priv->stats_lock);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
|
||||
560
drivers/net/ethernet/mellanox/mlx4/en_port.h
Normal file
560
drivers/net/ethernet/mellanox/mlx4/en_port.h
Normal file
|
|
@ -0,0 +1,560 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _MLX4_EN_PORT_H_
|
||||
#define _MLX4_EN_PORT_H_
|
||||
|
||||
|
||||
#define SET_PORT_GEN_ALL_VALID 0x7
|
||||
#define SET_PORT_PROMISC_SHIFT 31
|
||||
#define SET_PORT_MC_PROMISC_SHIFT 30
|
||||
|
||||
#define MLX4_EN_NUM_TC 8
|
||||
|
||||
#define VLAN_FLTR_SIZE 128
|
||||
struct mlx4_set_vlan_fltr_mbox {
|
||||
__be32 entry[VLAN_FLTR_SIZE];
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
MLX4_MCAST_CONFIG = 0,
|
||||
MLX4_MCAST_DISABLE = 1,
|
||||
MLX4_MCAST_ENABLE = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_EN_1G_SPEED = 0x02,
|
||||
MLX4_EN_10G_SPEED_XFI = 0x01,
|
||||
MLX4_EN_10G_SPEED_XAUI = 0x00,
|
||||
MLX4_EN_40G_SPEED = 0x40,
|
||||
MLX4_EN_OTHER_SPEED = 0x0f,
|
||||
};
|
||||
|
||||
struct mlx4_en_query_port_context {
|
||||
u8 link_up;
|
||||
#define MLX4_EN_LINK_UP_MASK 0x80
|
||||
u8 reserved;
|
||||
__be16 mtu;
|
||||
u8 reserved2;
|
||||
u8 link_speed;
|
||||
#define MLX4_EN_SPEED_MASK 0x43
|
||||
u16 reserved3[5];
|
||||
__be64 mac;
|
||||
u8 transceiver;
|
||||
};
|
||||
|
||||
|
||||
struct mlx4_en_stat_out_mbox {
|
||||
/* Received frames with a length of 64 octets */
|
||||
__be64 R64_prio_0;
|
||||
__be64 R64_prio_1;
|
||||
__be64 R64_prio_2;
|
||||
__be64 R64_prio_3;
|
||||
__be64 R64_prio_4;
|
||||
__be64 R64_prio_5;
|
||||
__be64 R64_prio_6;
|
||||
__be64 R64_prio_7;
|
||||
__be64 R64_novlan;
|
||||
/* Received frames with a length of 127 octets */
|
||||
__be64 R127_prio_0;
|
||||
__be64 R127_prio_1;
|
||||
__be64 R127_prio_2;
|
||||
__be64 R127_prio_3;
|
||||
__be64 R127_prio_4;
|
||||
__be64 R127_prio_5;
|
||||
__be64 R127_prio_6;
|
||||
__be64 R127_prio_7;
|
||||
__be64 R127_novlan;
|
||||
/* Received frames with a length of 255 octets */
|
||||
__be64 R255_prio_0;
|
||||
__be64 R255_prio_1;
|
||||
__be64 R255_prio_2;
|
||||
__be64 R255_prio_3;
|
||||
__be64 R255_prio_4;
|
||||
__be64 R255_prio_5;
|
||||
__be64 R255_prio_6;
|
||||
__be64 R255_prio_7;
|
||||
__be64 R255_novlan;
|
||||
/* Received frames with a length of 511 octets */
|
||||
__be64 R511_prio_0;
|
||||
__be64 R511_prio_1;
|
||||
__be64 R511_prio_2;
|
||||
__be64 R511_prio_3;
|
||||
__be64 R511_prio_4;
|
||||
__be64 R511_prio_5;
|
||||
__be64 R511_prio_6;
|
||||
__be64 R511_prio_7;
|
||||
__be64 R511_novlan;
|
||||
/* Received frames with a length of 1023 octets */
|
||||
__be64 R1023_prio_0;
|
||||
__be64 R1023_prio_1;
|
||||
__be64 R1023_prio_2;
|
||||
__be64 R1023_prio_3;
|
||||
__be64 R1023_prio_4;
|
||||
__be64 R1023_prio_5;
|
||||
__be64 R1023_prio_6;
|
||||
__be64 R1023_prio_7;
|
||||
__be64 R1023_novlan;
|
||||
/* Received frames with a length of 1518 octets */
|
||||
__be64 R1518_prio_0;
|
||||
__be64 R1518_prio_1;
|
||||
__be64 R1518_prio_2;
|
||||
__be64 R1518_prio_3;
|
||||
__be64 R1518_prio_4;
|
||||
__be64 R1518_prio_5;
|
||||
__be64 R1518_prio_6;
|
||||
__be64 R1518_prio_7;
|
||||
__be64 R1518_novlan;
|
||||
/* Received frames with a length of 1522 octets */
|
||||
__be64 R1522_prio_0;
|
||||
__be64 R1522_prio_1;
|
||||
__be64 R1522_prio_2;
|
||||
__be64 R1522_prio_3;
|
||||
__be64 R1522_prio_4;
|
||||
__be64 R1522_prio_5;
|
||||
__be64 R1522_prio_6;
|
||||
__be64 R1522_prio_7;
|
||||
__be64 R1522_novlan;
|
||||
/* Received frames with a length of 1548 octets */
|
||||
__be64 R1548_prio_0;
|
||||
__be64 R1548_prio_1;
|
||||
__be64 R1548_prio_2;
|
||||
__be64 R1548_prio_3;
|
||||
__be64 R1548_prio_4;
|
||||
__be64 R1548_prio_5;
|
||||
__be64 R1548_prio_6;
|
||||
__be64 R1548_prio_7;
|
||||
__be64 R1548_novlan;
|
||||
/* Received frames with a length of 1548 < octets < MTU */
|
||||
__be64 R2MTU_prio_0;
|
||||
__be64 R2MTU_prio_1;
|
||||
__be64 R2MTU_prio_2;
|
||||
__be64 R2MTU_prio_3;
|
||||
__be64 R2MTU_prio_4;
|
||||
__be64 R2MTU_prio_5;
|
||||
__be64 R2MTU_prio_6;
|
||||
__be64 R2MTU_prio_7;
|
||||
__be64 R2MTU_novlan;
|
||||
/* Received frames with a length of MTU< octets and good CRC */
|
||||
__be64 RGIANT_prio_0;
|
||||
__be64 RGIANT_prio_1;
|
||||
__be64 RGIANT_prio_2;
|
||||
__be64 RGIANT_prio_3;
|
||||
__be64 RGIANT_prio_4;
|
||||
__be64 RGIANT_prio_5;
|
||||
__be64 RGIANT_prio_6;
|
||||
__be64 RGIANT_prio_7;
|
||||
__be64 RGIANT_novlan;
|
||||
/* Received broadcast frames with good CRC */
|
||||
__be64 RBCAST_prio_0;
|
||||
__be64 RBCAST_prio_1;
|
||||
__be64 RBCAST_prio_2;
|
||||
__be64 RBCAST_prio_3;
|
||||
__be64 RBCAST_prio_4;
|
||||
__be64 RBCAST_prio_5;
|
||||
__be64 RBCAST_prio_6;
|
||||
__be64 RBCAST_prio_7;
|
||||
__be64 RBCAST_novlan;
|
||||
/* Received multicast frames with good CRC */
|
||||
__be64 MCAST_prio_0;
|
||||
__be64 MCAST_prio_1;
|
||||
__be64 MCAST_prio_2;
|
||||
__be64 MCAST_prio_3;
|
||||
__be64 MCAST_prio_4;
|
||||
__be64 MCAST_prio_5;
|
||||
__be64 MCAST_prio_6;
|
||||
__be64 MCAST_prio_7;
|
||||
__be64 MCAST_novlan;
|
||||
/* Received unicast not short or GIANT frames with good CRC */
|
||||
__be64 RTOTG_prio_0;
|
||||
__be64 RTOTG_prio_1;
|
||||
__be64 RTOTG_prio_2;
|
||||
__be64 RTOTG_prio_3;
|
||||
__be64 RTOTG_prio_4;
|
||||
__be64 RTOTG_prio_5;
|
||||
__be64 RTOTG_prio_6;
|
||||
__be64 RTOTG_prio_7;
|
||||
__be64 RTOTG_novlan;
|
||||
|
||||
/* Count of total octets of received frames, includes framing characters */
|
||||
__be64 RTTLOCT_prio_0;
|
||||
/* Count of total octets of received frames, not including framing
|
||||
characters */
|
||||
__be64 RTTLOCT_NOFRM_prio_0;
|
||||
/* Count of Total number of octets received
|
||||
(only for frames without errors) */
|
||||
__be64 ROCT_prio_0;
|
||||
|
||||
__be64 RTTLOCT_prio_1;
|
||||
__be64 RTTLOCT_NOFRM_prio_1;
|
||||
__be64 ROCT_prio_1;
|
||||
|
||||
__be64 RTTLOCT_prio_2;
|
||||
__be64 RTTLOCT_NOFRM_prio_2;
|
||||
__be64 ROCT_prio_2;
|
||||
|
||||
__be64 RTTLOCT_prio_3;
|
||||
__be64 RTTLOCT_NOFRM_prio_3;
|
||||
__be64 ROCT_prio_3;
|
||||
|
||||
__be64 RTTLOCT_prio_4;
|
||||
__be64 RTTLOCT_NOFRM_prio_4;
|
||||
__be64 ROCT_prio_4;
|
||||
|
||||
__be64 RTTLOCT_prio_5;
|
||||
__be64 RTTLOCT_NOFRM_prio_5;
|
||||
__be64 ROCT_prio_5;
|
||||
|
||||
__be64 RTTLOCT_prio_6;
|
||||
__be64 RTTLOCT_NOFRM_prio_6;
|
||||
__be64 ROCT_prio_6;
|
||||
|
||||
__be64 RTTLOCT_prio_7;
|
||||
__be64 RTTLOCT_NOFRM_prio_7;
|
||||
__be64 ROCT_prio_7;
|
||||
|
||||
__be64 RTTLOCT_novlan;
|
||||
__be64 RTTLOCT_NOFRM_novlan;
|
||||
__be64 ROCT_novlan;
|
||||
|
||||
/* Count of Total received frames including bad frames */
|
||||
__be64 RTOT_prio_0;
|
||||
/* Count of Total number of received frames with 802.1Q encapsulation */
|
||||
__be64 R1Q_prio_0;
|
||||
__be64 reserved1;
|
||||
|
||||
__be64 RTOT_prio_1;
|
||||
__be64 R1Q_prio_1;
|
||||
__be64 reserved2;
|
||||
|
||||
__be64 RTOT_prio_2;
|
||||
__be64 R1Q_prio_2;
|
||||
__be64 reserved3;
|
||||
|
||||
__be64 RTOT_prio_3;
|
||||
__be64 R1Q_prio_3;
|
||||
__be64 reserved4;
|
||||
|
||||
__be64 RTOT_prio_4;
|
||||
__be64 R1Q_prio_4;
|
||||
__be64 reserved5;
|
||||
|
||||
__be64 RTOT_prio_5;
|
||||
__be64 R1Q_prio_5;
|
||||
__be64 reserved6;
|
||||
|
||||
__be64 RTOT_prio_6;
|
||||
__be64 R1Q_prio_6;
|
||||
__be64 reserved7;
|
||||
|
||||
__be64 RTOT_prio_7;
|
||||
__be64 R1Q_prio_7;
|
||||
__be64 reserved8;
|
||||
|
||||
__be64 RTOT_novlan;
|
||||
__be64 R1Q_novlan;
|
||||
__be64 reserved9;
|
||||
|
||||
/* Total number of Successfully Received Control Frames */
|
||||
__be64 RCNTL;
|
||||
__be64 reserved10;
|
||||
__be64 reserved11;
|
||||
__be64 reserved12;
|
||||
/* Count of received frames with a length/type field value between 46
|
||||
(42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
|
||||
inclusive */
|
||||
__be64 RInRangeLengthErr;
|
||||
/* Count of received frames with length/type field between 1501 and 1535
|
||||
decimal, inclusive */
|
||||
__be64 ROutRangeLengthErr;
|
||||
/* Count of received frames that are longer than max allowed size for
|
||||
802.3 frames (1518/1522) */
|
||||
__be64 RFrmTooLong;
|
||||
/* Count frames received with PCS error */
|
||||
__be64 PCS;
|
||||
|
||||
/* Transmit frames with a length of 64 octets */
|
||||
__be64 T64_prio_0;
|
||||
__be64 T64_prio_1;
|
||||
__be64 T64_prio_2;
|
||||
__be64 T64_prio_3;
|
||||
__be64 T64_prio_4;
|
||||
__be64 T64_prio_5;
|
||||
__be64 T64_prio_6;
|
||||
__be64 T64_prio_7;
|
||||
__be64 T64_novlan;
|
||||
__be64 T64_loopbk;
|
||||
/* Transmit frames with a length of 65 to 127 octets. */
|
||||
__be64 T127_prio_0;
|
||||
__be64 T127_prio_1;
|
||||
__be64 T127_prio_2;
|
||||
__be64 T127_prio_3;
|
||||
__be64 T127_prio_4;
|
||||
__be64 T127_prio_5;
|
||||
__be64 T127_prio_6;
|
||||
__be64 T127_prio_7;
|
||||
__be64 T127_novlan;
|
||||
__be64 T127_loopbk;
|
||||
/* Transmit frames with a length of 128 to 255 octets */
|
||||
__be64 T255_prio_0;
|
||||
__be64 T255_prio_1;
|
||||
__be64 T255_prio_2;
|
||||
__be64 T255_prio_3;
|
||||
__be64 T255_prio_4;
|
||||
__be64 T255_prio_5;
|
||||
__be64 T255_prio_6;
|
||||
__be64 T255_prio_7;
|
||||
__be64 T255_novlan;
|
||||
__be64 T255_loopbk;
|
||||
/* Transmit frames with a length of 256 to 511 octets */
|
||||
__be64 T511_prio_0;
|
||||
__be64 T511_prio_1;
|
||||
__be64 T511_prio_2;
|
||||
__be64 T511_prio_3;
|
||||
__be64 T511_prio_4;
|
||||
__be64 T511_prio_5;
|
||||
__be64 T511_prio_6;
|
||||
__be64 T511_prio_7;
|
||||
__be64 T511_novlan;
|
||||
__be64 T511_loopbk;
|
||||
/* Transmit frames with a length of 512 to 1023 octets */
|
||||
__be64 T1023_prio_0;
|
||||
__be64 T1023_prio_1;
|
||||
__be64 T1023_prio_2;
|
||||
__be64 T1023_prio_3;
|
||||
__be64 T1023_prio_4;
|
||||
__be64 T1023_prio_5;
|
||||
__be64 T1023_prio_6;
|
||||
__be64 T1023_prio_7;
|
||||
__be64 T1023_novlan;
|
||||
__be64 T1023_loopbk;
|
||||
/* Transmit frames with a length of 1024 to 1518 octets */
|
||||
__be64 T1518_prio_0;
|
||||
__be64 T1518_prio_1;
|
||||
__be64 T1518_prio_2;
|
||||
__be64 T1518_prio_3;
|
||||
__be64 T1518_prio_4;
|
||||
__be64 T1518_prio_5;
|
||||
__be64 T1518_prio_6;
|
||||
__be64 T1518_prio_7;
|
||||
__be64 T1518_novlan;
|
||||
__be64 T1518_loopbk;
|
||||
/* Counts transmit frames with a length of 1519 to 1522 bytes */
|
||||
__be64 T1522_prio_0;
|
||||
__be64 T1522_prio_1;
|
||||
__be64 T1522_prio_2;
|
||||
__be64 T1522_prio_3;
|
||||
__be64 T1522_prio_4;
|
||||
__be64 T1522_prio_5;
|
||||
__be64 T1522_prio_6;
|
||||
__be64 T1522_prio_7;
|
||||
__be64 T1522_novlan;
|
||||
__be64 T1522_loopbk;
|
||||
/* Transmit frames with a length of 1523 to 1548 octets */
|
||||
__be64 T1548_prio_0;
|
||||
__be64 T1548_prio_1;
|
||||
__be64 T1548_prio_2;
|
||||
__be64 T1548_prio_3;
|
||||
__be64 T1548_prio_4;
|
||||
__be64 T1548_prio_5;
|
||||
__be64 T1548_prio_6;
|
||||
__be64 T1548_prio_7;
|
||||
__be64 T1548_novlan;
|
||||
__be64 T1548_loopbk;
|
||||
/* Counts transmit frames with a length of 1549 to MTU bytes */
|
||||
__be64 T2MTU_prio_0;
|
||||
__be64 T2MTU_prio_1;
|
||||
__be64 T2MTU_prio_2;
|
||||
__be64 T2MTU_prio_3;
|
||||
__be64 T2MTU_prio_4;
|
||||
__be64 T2MTU_prio_5;
|
||||
__be64 T2MTU_prio_6;
|
||||
__be64 T2MTU_prio_7;
|
||||
__be64 T2MTU_novlan;
|
||||
__be64 T2MTU_loopbk;
|
||||
/* Transmit frames with a length greater than MTU octets and a good CRC. */
|
||||
__be64 TGIANT_prio_0;
|
||||
__be64 TGIANT_prio_1;
|
||||
__be64 TGIANT_prio_2;
|
||||
__be64 TGIANT_prio_3;
|
||||
__be64 TGIANT_prio_4;
|
||||
__be64 TGIANT_prio_5;
|
||||
__be64 TGIANT_prio_6;
|
||||
__be64 TGIANT_prio_7;
|
||||
__be64 TGIANT_novlan;
|
||||
__be64 TGIANT_loopbk;
|
||||
/* Transmit broadcast frames with a good CRC */
|
||||
__be64 TBCAST_prio_0;
|
||||
__be64 TBCAST_prio_1;
|
||||
__be64 TBCAST_prio_2;
|
||||
__be64 TBCAST_prio_3;
|
||||
__be64 TBCAST_prio_4;
|
||||
__be64 TBCAST_prio_5;
|
||||
__be64 TBCAST_prio_6;
|
||||
__be64 TBCAST_prio_7;
|
||||
__be64 TBCAST_novlan;
|
||||
__be64 TBCAST_loopbk;
|
||||
/* Transmit multicast frames with a good CRC */
|
||||
__be64 TMCAST_prio_0;
|
||||
__be64 TMCAST_prio_1;
|
||||
__be64 TMCAST_prio_2;
|
||||
__be64 TMCAST_prio_3;
|
||||
__be64 TMCAST_prio_4;
|
||||
__be64 TMCAST_prio_5;
|
||||
__be64 TMCAST_prio_6;
|
||||
__be64 TMCAST_prio_7;
|
||||
__be64 TMCAST_novlan;
|
||||
__be64 TMCAST_loopbk;
|
||||
/* Transmit good frames that are neither broadcast nor multicast */
|
||||
__be64 TTOTG_prio_0;
|
||||
__be64 TTOTG_prio_1;
|
||||
__be64 TTOTG_prio_2;
|
||||
__be64 TTOTG_prio_3;
|
||||
__be64 TTOTG_prio_4;
|
||||
__be64 TTOTG_prio_5;
|
||||
__be64 TTOTG_prio_6;
|
||||
__be64 TTOTG_prio_7;
|
||||
__be64 TTOTG_novlan;
|
||||
__be64 TTOTG_loopbk;
|
||||
|
||||
/* total octets of transmitted frames, including framing characters */
|
||||
__be64 TTTLOCT_prio_0;
|
||||
/* total octets of transmitted frames, not including framing characters */
|
||||
__be64 TTTLOCT_NOFRM_prio_0;
|
||||
/* ifOutOctets */
|
||||
__be64 TOCT_prio_0;
|
||||
|
||||
__be64 TTTLOCT_prio_1;
|
||||
__be64 TTTLOCT_NOFRM_prio_1;
|
||||
__be64 TOCT_prio_1;
|
||||
|
||||
__be64 TTTLOCT_prio_2;
|
||||
__be64 TTTLOCT_NOFRM_prio_2;
|
||||
__be64 TOCT_prio_2;
|
||||
|
||||
__be64 TTTLOCT_prio_3;
|
||||
__be64 TTTLOCT_NOFRM_prio_3;
|
||||
__be64 TOCT_prio_3;
|
||||
|
||||
__be64 TTTLOCT_prio_4;
|
||||
__be64 TTTLOCT_NOFRM_prio_4;
|
||||
__be64 TOCT_prio_4;
|
||||
|
||||
__be64 TTTLOCT_prio_5;
|
||||
__be64 TTTLOCT_NOFRM_prio_5;
|
||||
__be64 TOCT_prio_5;
|
||||
|
||||
__be64 TTTLOCT_prio_6;
|
||||
__be64 TTTLOCT_NOFRM_prio_6;
|
||||
__be64 TOCT_prio_6;
|
||||
|
||||
__be64 TTTLOCT_prio_7;
|
||||
__be64 TTTLOCT_NOFRM_prio_7;
|
||||
__be64 TOCT_prio_7;
|
||||
|
||||
__be64 TTTLOCT_novlan;
|
||||
__be64 TTTLOCT_NOFRM_novlan;
|
||||
__be64 TOCT_novlan;
|
||||
|
||||
__be64 TTTLOCT_loopbk;
|
||||
__be64 TTTLOCT_NOFRM_loopbk;
|
||||
__be64 TOCT_loopbk;
|
||||
|
||||
/* Total frames transmitted with a good CRC that are not aborted */
|
||||
__be64 TTOT_prio_0;
|
||||
/* Total number of frames transmitted with 802.1Q encapsulation */
|
||||
__be64 T1Q_prio_0;
|
||||
__be64 reserved13;
|
||||
|
||||
__be64 TTOT_prio_1;
|
||||
__be64 T1Q_prio_1;
|
||||
__be64 reserved14;
|
||||
|
||||
__be64 TTOT_prio_2;
|
||||
__be64 T1Q_prio_2;
|
||||
__be64 reserved15;
|
||||
|
||||
__be64 TTOT_prio_3;
|
||||
__be64 T1Q_prio_3;
|
||||
__be64 reserved16;
|
||||
|
||||
__be64 TTOT_prio_4;
|
||||
__be64 T1Q_prio_4;
|
||||
__be64 reserved17;
|
||||
|
||||
__be64 TTOT_prio_5;
|
||||
__be64 T1Q_prio_5;
|
||||
__be64 reserved18;
|
||||
|
||||
__be64 TTOT_prio_6;
|
||||
__be64 T1Q_prio_6;
|
||||
__be64 reserved19;
|
||||
|
||||
__be64 TTOT_prio_7;
|
||||
__be64 T1Q_prio_7;
|
||||
__be64 reserved20;
|
||||
|
||||
__be64 TTOT_novlan;
|
||||
__be64 T1Q_novlan;
|
||||
__be64 reserved21;
|
||||
|
||||
__be64 TTOT_loopbk;
|
||||
__be64 T1Q_loopbk;
|
||||
__be64 reserved22;
|
||||
|
||||
/* Received frames with a length greater than MTU octets and a bad CRC */
|
||||
__be32 RJBBR;
|
||||
/* Received frames with a bad CRC that are not runts, jabbers,
|
||||
or alignment errors */
|
||||
__be32 RCRC;
|
||||
/* Received frames with SFD with a length of less than 64 octets and a
|
||||
bad CRC */
|
||||
__be32 RRUNT;
|
||||
/* Received frames with a length less than 64 octets and a good CRC */
|
||||
__be32 RSHORT;
|
||||
/* Total Number of Received Packets Dropped */
|
||||
__be32 RDROP;
|
||||
/* Drop due to overflow */
|
||||
__be32 RdropOvflw;
|
||||
/* Drop due to overflow */
|
||||
__be32 RdropLength;
|
||||
/* Total of good frames. Does not include frames received with
|
||||
frame-too-long, FCS, or length errors */
|
||||
__be32 RTOTFRMS;
|
||||
/* Total dropped Xmited packets */
|
||||
__be32 TDROP;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
115
drivers/net/ethernet/mellanox/mlx4/en_resources.c
Normal file
115
drivers/net/ethernet/mellanox/mlx4/en_resources.c
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
|
||||
int is_tx, int rss, int qpn, int cqn,
|
||||
int user_prio, struct mlx4_qp_context *context)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
memset(context, 0, sizeof *context);
|
||||
context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
|
||||
context->pd = cpu_to_be32(mdev->priv_pdn);
|
||||
context->mtu_msgmax = 0xff;
|
||||
if (!is_tx && !rss)
|
||||
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
|
||||
if (is_tx)
|
||||
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
|
||||
else
|
||||
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
|
||||
context->usr_page = cpu_to_be32(mdev->priv_uar.index);
|
||||
context->local_qpn = cpu_to_be32(qpn);
|
||||
context->pri_path.ackto = 1 & 0x07;
|
||||
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
|
||||
if (user_prio >= 0) {
|
||||
context->pri_path.sched_queue |= user_prio << 3;
|
||||
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
|
||||
}
|
||||
context->pri_path.counter_index = 0xff;
|
||||
context->cqn_send = cpu_to_be32(cqn);
|
||||
context->cqn_recv = cpu_to_be32(cqn);
|
||||
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
|
||||
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
|
||||
context->param3 |= cpu_to_be32(1 << 30);
|
||||
|
||||
if (!is_tx && !rss &&
|
||||
(mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
|
||||
en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
|
||||
context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int mlx4_en_map_buffer(struct mlx4_buf *buf)
|
||||
{
|
||||
struct page **pages;
|
||||
int i;
|
||||
|
||||
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
|
||||
return 0;
|
||||
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
|
||||
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
|
||||
{
|
||||
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
|
||||
return;
|
||||
|
||||
vunmap(buf->direct.buf);
|
||||
}
|
||||
|
||||
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
1191
drivers/net/ethernet/mellanox/mlx4/en_rx.c
Normal file
1191
drivers/net/ethernet/mellanox/mlx4/en_rx.c
Normal file
File diff suppressed because it is too large
Load diff
178
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
Normal file
178
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mlx4/driver.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
|
||||
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
|
||||
{
|
||||
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ethhdr *ethh;
|
||||
unsigned char *packet;
|
||||
unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
|
||||
/* build the pkt before xmit */
|
||||
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
|
||||
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
|
||||
packet = (unsigned char *)skb_put(skb, packet_size);
|
||||
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
|
||||
memset(ethh->h_source, 0, ETH_ALEN);
|
||||
ethh->h_proto = htons(ETH_P_ARP);
|
||||
skb_set_mac_header(skb, 0);
|
||||
for (i = 0; i < packet_size; ++i) /* fill our packet */
|
||||
packet[i] = (unsigned char)(i & 0xff);
|
||||
|
||||
/* xmit the pkt */
|
||||
err = mlx4_en_xmit(skb, priv->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
|
||||
{
|
||||
u32 loopback_ok = 0;
|
||||
int i;
|
||||
|
||||
|
||||
priv->loopback_ok = 0;
|
||||
priv->validate_loopback = 1;
|
||||
|
||||
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
||||
|
||||
/* xmit */
|
||||
if (mlx4_en_test_loopback_xmit(priv)) {
|
||||
en_err(priv, "Transmitting loopback packet failed\n");
|
||||
goto mlx4_en_test_loopback_exit;
|
||||
}
|
||||
|
||||
/* polling for result */
|
||||
for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
|
||||
msleep(MLX4_EN_LOOPBACK_TIMEOUT);
|
||||
if (priv->loopback_ok) {
|
||||
loopback_ok = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!loopback_ok)
|
||||
en_err(priv, "Loopback packet didn't arrive\n");
|
||||
|
||||
mlx4_en_test_loopback_exit:
|
||||
|
||||
priv->validate_loopback = 0;
|
||||
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
||||
return !loopback_ok;
|
||||
}
|
||||
|
||||
|
||||
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
|
||||
{
|
||||
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
|
||||
return -ENOMEM;
|
||||
if (priv->port_state.link_state == 1)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
|
||||
{
|
||||
|
||||
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
|
||||
return -ENOMEM;
|
||||
|
||||
/* The device supports 1G, 10G and 40G speeds */
|
||||
if (priv->port_state.link_speed != 1000 &&
|
||||
priv->port_state.link_speed != 10000 &&
|
||||
priv->port_state.link_speed != 40000)
|
||||
return priv->port_state.link_speed;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int i, carrier_ok;
|
||||
|
||||
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
|
||||
|
||||
if (*flags & ETH_TEST_FL_OFFLINE) {
|
||||
/* disable the interface */
|
||||
carrier_ok = netif_carrier_ok(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
/* Wait until all tx queues are empty.
|
||||
* there should not be any additional incoming traffic
|
||||
* since we turned the carrier off */
|
||||
msleep(200);
|
||||
|
||||
if (priv->mdev->dev->caps.flags &
|
||||
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
|
||||
buf[3] = mlx4_en_test_registers(priv);
|
||||
if (priv->port_up)
|
||||
buf[4] = mlx4_en_test_loopback(priv);
|
||||
}
|
||||
|
||||
if (carrier_ok)
|
||||
netif_carrier_on(dev);
|
||||
|
||||
}
|
||||
buf[0] = mlx4_test_interrupts(mdev->dev);
|
||||
buf[1] = mlx4_en_test_link(priv);
|
||||
buf[2] = mlx4_en_test_speed(priv);
|
||||
|
||||
for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
|
||||
if (buf[i])
|
||||
*flags |= ETH_TEST_FL_FAILED;
|
||||
}
|
||||
}
|
||||
1009
drivers/net/ethernet/mellanox/mlx4/en_tx.c
Normal file
1009
drivers/net/ethernet/mellanox/mlx4/en_tx.c
Normal file
File diff suppressed because it is too large
Load diff
1409
drivers/net/ethernet/mellanox/mlx4/eq.c
Normal file
1409
drivers/net/ethernet/mellanox/mlx4/eq.c
Normal file
File diff suppressed because it is too large
Load diff
2146
drivers/net/ethernet/mellanox/mlx4/fw.c
Normal file
2146
drivers/net/ethernet/mellanox/mlx4/fw.c
Normal file
File diff suppressed because it is too large
Load diff
230
drivers/net/ethernet/mellanox/mlx4/fw.h
Normal file
230
drivers/net/ethernet/mellanox/mlx4/fw.h
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef MLX4_FW_H
|
||||
#define MLX4_FW_H
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
struct mlx4_mod_stat_cfg {
|
||||
u8 log_pg_sz;
|
||||
u8 log_pg_sz_m;
|
||||
};
|
||||
|
||||
struct mlx4_dev_cap {
|
||||
int max_srq_sz;
|
||||
int max_qp_sz;
|
||||
int reserved_qps;
|
||||
int max_qps;
|
||||
int reserved_srqs;
|
||||
int max_srqs;
|
||||
int max_cq_sz;
|
||||
int reserved_cqs;
|
||||
int max_cqs;
|
||||
int max_mpts;
|
||||
int reserved_eqs;
|
||||
int max_eqs;
|
||||
int reserved_mtts;
|
||||
int max_mrw_sz;
|
||||
int reserved_mrws;
|
||||
int max_mtt_seg;
|
||||
int max_requester_per_qp;
|
||||
int max_responder_per_qp;
|
||||
int max_rdma_global;
|
||||
int local_ca_ack_delay;
|
||||
int num_ports;
|
||||
u32 max_msg_sz;
|
||||
int ib_mtu[MLX4_MAX_PORTS + 1];
|
||||
int max_port_width[MLX4_MAX_PORTS + 1];
|
||||
int max_vl[MLX4_MAX_PORTS + 1];
|
||||
int max_gids[MLX4_MAX_PORTS + 1];
|
||||
int max_pkeys[MLX4_MAX_PORTS + 1];
|
||||
u64 def_mac[MLX4_MAX_PORTS + 1];
|
||||
u16 eth_mtu[MLX4_MAX_PORTS + 1];
|
||||
int trans_type[MLX4_MAX_PORTS + 1];
|
||||
int vendor_oui[MLX4_MAX_PORTS + 1];
|
||||
u16 wavelength[MLX4_MAX_PORTS + 1];
|
||||
u64 trans_code[MLX4_MAX_PORTS + 1];
|
||||
u16 stat_rate_support;
|
||||
int fs_log_max_ucast_qp_range_size;
|
||||
int fs_max_num_qp_per_entry;
|
||||
u64 flags;
|
||||
u64 flags2;
|
||||
int reserved_uars;
|
||||
int uar_size;
|
||||
int min_page_sz;
|
||||
int bf_reg_size;
|
||||
int bf_regs_per_page;
|
||||
int max_sq_sg;
|
||||
int max_sq_desc_sz;
|
||||
int max_rq_sg;
|
||||
int max_rq_desc_sz;
|
||||
int max_qp_per_mcg;
|
||||
int reserved_mgms;
|
||||
int max_mcgs;
|
||||
int reserved_pds;
|
||||
int max_pds;
|
||||
int reserved_xrcds;
|
||||
int max_xrcds;
|
||||
int qpc_entry_sz;
|
||||
int rdmarc_entry_sz;
|
||||
int altc_entry_sz;
|
||||
int aux_entry_sz;
|
||||
int srq_entry_sz;
|
||||
int cqc_entry_sz;
|
||||
int eqc_entry_sz;
|
||||
int dmpt_entry_sz;
|
||||
int cmpt_entry_sz;
|
||||
int mtt_entry_sz;
|
||||
int resize_srq;
|
||||
u32 bmme_flags;
|
||||
u32 reserved_lkey;
|
||||
u64 max_icm_sz;
|
||||
int max_gso_sz;
|
||||
int max_rss_tbl_sz;
|
||||
u8 supported_port_types[MLX4_MAX_PORTS + 1];
|
||||
u8 suggested_type[MLX4_MAX_PORTS + 1];
|
||||
u8 default_sense[MLX4_MAX_PORTS + 1];
|
||||
u8 log_max_macs[MLX4_MAX_PORTS + 1];
|
||||
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
|
||||
u32 max_counters;
|
||||
};
|
||||
|
||||
struct mlx4_func_cap {
|
||||
u8 num_ports;
|
||||
u8 flags;
|
||||
u32 pf_context_behaviour;
|
||||
int qp_quota;
|
||||
int cq_quota;
|
||||
int srq_quota;
|
||||
int mpt_quota;
|
||||
int mtt_quota;
|
||||
int max_eq;
|
||||
int reserved_eq;
|
||||
int mcg_quota;
|
||||
u32 qp0_qkey;
|
||||
u32 qp0_tunnel_qpn;
|
||||
u32 qp0_proxy_qpn;
|
||||
u32 qp1_tunnel_qpn;
|
||||
u32 qp1_proxy_qpn;
|
||||
u8 physical_port;
|
||||
u8 port_flags;
|
||||
u8 flags1;
|
||||
u64 phys_port_id;
|
||||
};
|
||||
|
||||
struct mlx4_adapter {
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
u8 inta_pin;
|
||||
};
|
||||
|
||||
struct mlx4_init_hca_param {
|
||||
u64 qpc_base;
|
||||
u64 rdmarc_base;
|
||||
u64 auxc_base;
|
||||
u64 altc_base;
|
||||
u64 srqc_base;
|
||||
u64 cqc_base;
|
||||
u64 eqc_base;
|
||||
u64 mc_base;
|
||||
u64 dmpt_base;
|
||||
u64 cmpt_base;
|
||||
u64 mtt_base;
|
||||
u64 global_caps;
|
||||
u16 log_mc_entry_sz;
|
||||
u16 log_mc_hash_sz;
|
||||
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
|
||||
u8 log_num_qps;
|
||||
u8 log_num_srqs;
|
||||
u8 log_num_cqs;
|
||||
u8 log_num_eqs;
|
||||
u8 log_rd_per_qp;
|
||||
u8 log_mc_table_sz;
|
||||
u8 log_mpt_sz;
|
||||
u8 log_uar_sz;
|
||||
u8 mw_enabled; /* Enable memory windows */
|
||||
u8 uar_page_sz; /* log pg sz in 4k chunks */
|
||||
u8 steering_mode; /* for QUERY_HCA */
|
||||
u64 dev_cap_enabled;
|
||||
u16 cqe_size; /* For use only when CQE stride feature enabled */
|
||||
u16 eqe_size; /* For use only when EQE stride feature enabled */
|
||||
};
|
||||
|
||||
struct mlx4_init_ib_param {
|
||||
int port_width;
|
||||
int vl_cap;
|
||||
int mtu_cap;
|
||||
u16 gid_cap;
|
||||
u16 pkey_cap;
|
||||
int set_guid0;
|
||||
u64 guid0;
|
||||
int set_node_guid;
|
||||
u64 node_guid;
|
||||
int set_si_guid;
|
||||
u64 si_guid;
|
||||
};
|
||||
|
||||
struct mlx4_set_ib_param {
|
||||
int set_si_guid;
|
||||
int reset_qkey_viol;
|
||||
u64 si_guid;
|
||||
u32 cap_mask;
|
||||
};
|
||||
|
||||
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
||||
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
|
||||
struct mlx4_func_cap *func_cap);
|
||||
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
||||
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
|
||||
int mlx4_RUN_FW(struct mlx4_dev *dev);
|
||||
int mlx4_QUERY_FW(struct mlx4_dev *dev);
|
||||
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
|
||||
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
|
||||
int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
|
||||
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
|
||||
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
|
||||
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
|
||||
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
||||
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
|
||||
int mlx4_NOP(struct mlx4_dev *dev);
|
||||
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
|
||||
void mlx4_opreq_action(struct work_struct *work);
|
||||
|
||||
#endif /* MLX4_FW_H */
|
||||
461
drivers/net/ethernet/mellanox/mlx4/icm.c
Normal file
461
drivers/net/ethernet/mellanox/mlx4/icm.c
Normal file
|
|
@ -0,0 +1,461 @@
|
|||
/*
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
#include "fw.h"
|
||||
|
||||
/*
|
||||
* We allocate in as big chunks as we can, up to a maximum of 256 KB
|
||||
* per chunk.
|
||||
*/
|
||||
enum {
|
||||
MLX4_ICM_ALLOC_SIZE = 1 << 18,
|
||||
MLX4_TABLE_CHUNK_SIZE = 1 << 18
|
||||
};
|
||||
|
||||
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (chunk->nsg > 0)
|
||||
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
__free_pages(sg_page(&chunk->mem[i]),
|
||||
get_order(chunk->mem[i].length));
|
||||
}
|
||||
|
||||
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
}
|
||||
|
||||
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
||||
{
|
||||
struct mlx4_icm_chunk *chunk, *tmp;
|
||||
|
||||
if (!icm)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
|
||||
if (coherent)
|
||||
mlx4_free_icm_coherent(dev, chunk);
|
||||
else
|
||||
mlx4_free_icm_pages(dev, chunk);
|
||||
|
||||
kfree(chunk);
|
||||
}
|
||||
|
||||
kfree(icm);
|
||||
}
|
||||
|
||||
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
|
||||
gfp_t gfp_mask, int node)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_node(node, gfp_mask, order);
|
||||
if (!page) {
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_set_page(mem, page, PAGE_SIZE << order, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
|
||||
int order, gfp_t gfp_mask)
|
||||
{
|
||||
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
|
||||
&sg_dma_address(mem), gfp_mask);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
BUG_ON(mem->offset);
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
gfp_t gfp_mask, int coherent)
|
||||
{
|
||||
struct mlx4_icm *icm;
|
||||
struct mlx4_icm_chunk *chunk = NULL;
|
||||
int cur_order;
|
||||
int ret;
|
||||
|
||||
/* We use sg_set_buf for coherent allocs, which assumes low memory */
|
||||
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
|
||||
|
||||
icm = kmalloc_node(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!icm) {
|
||||
icm = kmalloc(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!icm)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
icm->refcount = 0;
|
||||
INIT_LIST_HEAD(&icm->chunk_list);
|
||||
|
||||
cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
|
||||
|
||||
while (npages > 0) {
|
||||
if (!chunk) {
|
||||
chunk = kmalloc_node(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!chunk) {
|
||||
chunk = kmalloc(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN));
|
||||
if (!chunk)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
list_add_tail(&chunk->list, &icm->chunk_list);
|
||||
}
|
||||
|
||||
while (1 << cur_order > npages)
|
||||
--cur_order;
|
||||
|
||||
if (coherent)
|
||||
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
|
||||
&chunk->mem[chunk->npages],
|
||||
cur_order, gfp_mask);
|
||||
else
|
||||
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
|
||||
cur_order, gfp_mask,
|
||||
dev->numa_node);
|
||||
|
||||
if (ret) {
|
||||
if (--cur_order < 0)
|
||||
goto fail;
|
||||
else
|
||||
continue;
|
||||
}
|
||||
|
||||
++chunk->npages;
|
||||
|
||||
if (coherent)
|
||||
++chunk->nsg;
|
||||
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (chunk->nsg <= 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
|
||||
chunk = NULL;
|
||||
|
||||
npages -= 1 << cur_order;
|
||||
}
|
||||
|
||||
if (!coherent && chunk) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (chunk->nsg <= 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return icm;
|
||||
|
||||
fail:
|
||||
mlx4_free_icm(dev, icm, coherent);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
|
||||
{
|
||||
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
|
||||
}
|
||||
|
||||
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
|
||||
{
|
||||
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
|
||||
{
|
||||
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
|
||||
}
|
||||
|
||||
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
|
||||
{
|
||||
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
|
||||
gfp_t gfp)
|
||||
{
|
||||
u32 i = (obj & (table->num_obj - 1)) /
|
||||
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (table->icm[i]) {
|
||||
++table->icm[i]->refcount;
|
||||
goto out;
|
||||
}
|
||||
|
||||
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
|
||||
(table->lowmem ? gfp : GFP_HIGHUSER) |
|
||||
__GFP_NOWARN, table->coherent);
|
||||
if (!table->icm[i]) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
|
||||
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
table->icm[i] = NULL;
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
++table->icm[i]->refcount;
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
|
||||
{
|
||||
u32 i;
|
||||
u64 offset;
|
||||
|
||||
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (--table->icm[i]->refcount == 0) {
|
||||
offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
|
||||
mlx4_UNMAP_ICM(dev, table->virt + offset,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
table->icm[i] = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&table->mutex);
|
||||
}
|
||||
|
||||
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
int offset, dma_offset, i;
|
||||
u64 idx;
|
||||
struct mlx4_icm_chunk *chunk;
|
||||
struct mlx4_icm *icm;
|
||||
struct page *page = NULL;
|
||||
|
||||
if (!table->lowmem)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
|
||||
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
|
||||
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
|
||||
|
||||
if (!icm)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(chunk, &icm->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i) {
|
||||
if (dma_handle && dma_offset >= 0) {
|
||||
if (sg_dma_len(&chunk->mem[i]) > dma_offset)
|
||||
*dma_handle = sg_dma_address(&chunk->mem[i]) +
|
||||
dma_offset;
|
||||
dma_offset -= sg_dma_len(&chunk->mem[i]);
|
||||
}
|
||||
/*
|
||||
* DMA mapping can merge pages but not split them,
|
||||
* so if we found the page, dma_handle has already
|
||||
* been assigned to.
|
||||
*/
|
||||
if (chunk->mem[i].length > offset) {
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return page ? lowmem_page_address(page) + offset : NULL;
|
||||
}
|
||||
|
||||
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end)
|
||||
{
|
||||
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
for (i = start; i <= end; i += inc) {
|
||||
err = mlx4_table_get(dev, table, i, GFP_KERNEL);
|
||||
if (err)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i > start) {
|
||||
i -= inc;
|
||||
mlx4_table_put(dev, table, i);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
|
||||
mlx4_table_put(dev, table, i);
|
||||
}
|
||||
|
||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||
int use_lowmem, int use_coherent)
|
||||
{
|
||||
int obj_per_chunk;
|
||||
int num_icm;
|
||||
unsigned chunk_size;
|
||||
int i;
|
||||
u64 size;
|
||||
|
||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
|
||||
if (!table->icm)
|
||||
return -ENOMEM;
|
||||
table->virt = virt;
|
||||
table->num_icm = num_icm;
|
||||
table->num_obj = nobj;
|
||||
table->obj_size = obj_size;
|
||||
table->lowmem = use_lowmem;
|
||||
table->coherent = use_coherent;
|
||||
mutex_init(&table->mutex);
|
||||
|
||||
size = (u64) nobj * obj_size;
|
||||
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
|
||||
chunk_size = MLX4_TABLE_CHUNK_SIZE;
|
||||
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
|
||||
chunk_size = PAGE_ALIGN(size -
|
||||
i * MLX4_TABLE_CHUNK_SIZE);
|
||||
|
||||
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
|
||||
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
|
||||
__GFP_NOWARN, use_coherent);
|
||||
if (!table->icm[i])
|
||||
goto err;
|
||||
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
|
||||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
table->icm[i] = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a reference to this ICM chunk so that it never
|
||||
* gets freed (since it contains reserved firmware objects).
|
||||
*/
|
||||
++table->icm[i]->refcount;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
}
|
||||
129
drivers/net/ethernet/mellanox/mlx4/icm.h
Normal file
129
drivers/net/ethernet/mellanox/mlx4/icm.h
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef MLX4_ICM_H
|
||||
#define MLX4_ICM_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define MLX4_ICM_CHUNK_LEN \
|
||||
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
|
||||
(sizeof (struct scatterlist)))
|
||||
|
||||
enum {
|
||||
MLX4_ICM_PAGE_SHIFT = 12,
|
||||
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
|
||||
};
|
||||
|
||||
struct mlx4_icm_chunk {
|
||||
struct list_head list;
|
||||
int npages;
|
||||
int nsg;
|
||||
struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
|
||||
};
|
||||
|
||||
struct mlx4_icm {
|
||||
struct list_head chunk_list;
|
||||
int refcount;
|
||||
};
|
||||
|
||||
struct mlx4_icm_iter {
|
||||
struct mlx4_icm *icm;
|
||||
struct mlx4_icm_chunk *chunk;
|
||||
int page_idx;
|
||||
};
|
||||
|
||||
struct mlx4_dev;
|
||||
|
||||
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
gfp_t gfp_mask, int coherent);
|
||||
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
|
||||
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
|
||||
gfp_t gfp);
|
||||
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
|
||||
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end);
|
||||
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end);
|
||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||
int use_lowmem, int use_coherent);
|
||||
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
|
||||
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
|
||||
|
||||
static inline void mlx4_icm_first(struct mlx4_icm *icm,
|
||||
struct mlx4_icm_iter *iter)
|
||||
{
|
||||
iter->icm = icm;
|
||||
iter->chunk = list_empty(&icm->chunk_list) ?
|
||||
NULL : list_entry(icm->chunk_list.next,
|
||||
struct mlx4_icm_chunk, list);
|
||||
iter->page_idx = 0;
|
||||
}
|
||||
|
||||
static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
|
||||
{
|
||||
return !iter->chunk;
|
||||
}
|
||||
|
||||
static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
|
||||
{
|
||||
if (++iter->page_idx >= iter->chunk->nsg) {
|
||||
if (iter->chunk->list.next == &iter->icm->chunk_list) {
|
||||
iter->chunk = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
iter->chunk = list_entry(iter->chunk->list.next,
|
||||
struct mlx4_icm_chunk, list);
|
||||
iter->page_idx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
|
||||
{
|
||||
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
|
||||
}
|
||||
|
||||
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
|
||||
{
|
||||
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
|
||||
}
|
||||
|
||||
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
||||
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
|
||||
|
||||
#endif /* MLX4_ICM_H */
|
||||
188
drivers/net/ethernet/mellanox/mlx4/intf.c
Normal file
188
drivers/net/ethernet/mellanox/mlx4/intf.c
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
struct mlx4_device_context {
|
||||
struct list_head list;
|
||||
struct mlx4_interface *intf;
|
||||
void *context;
|
||||
};
|
||||
|
||||
static LIST_HEAD(intf_list);
|
||||
static LIST_HEAD(dev_list);
|
||||
static DEFINE_MUTEX(intf_mutex);
|
||||
|
||||
static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
|
||||
{
|
||||
struct mlx4_device_context *dev_ctx;
|
||||
|
||||
dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
|
||||
if (!dev_ctx)
|
||||
return;
|
||||
|
||||
dev_ctx->intf = intf;
|
||||
dev_ctx->context = intf->add(&priv->dev);
|
||||
|
||||
if (dev_ctx->context) {
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
} else
|
||||
kfree(dev_ctx);
|
||||
}
|
||||
|
||||
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
|
||||
{
|
||||
struct mlx4_device_context *dev_ctx;
|
||||
|
||||
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
||||
if (dev_ctx->intf == intf) {
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
list_del(&dev_ctx->list);
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
|
||||
intf->remove(&priv->dev, dev_ctx->context);
|
||||
kfree(dev_ctx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx4_register_interface(struct mlx4_interface *intf)
|
||||
{
|
||||
struct mlx4_priv *priv;
|
||||
|
||||
if (!intf->add || !intf->remove)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_add_tail(&intf->list, &intf_list);
|
||||
list_for_each_entry(priv, &dev_list, dev_list)
|
||||
mlx4_add_device(intf, priv);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_register_interface);
|
||||
|
||||
void mlx4_unregister_interface(struct mlx4_interface *intf)
|
||||
{
|
||||
struct mlx4_priv *priv;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(priv, &dev_list, dev_list)
|
||||
mlx4_remove_device(intf, priv);
|
||||
|
||||
list_del(&intf->list);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
|
||||
|
||||
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
|
||||
unsigned long param)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_device_context *dev_ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->ctx_lock, flags);
|
||||
|
||||
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
||||
if (dev_ctx->intf->event)
|
||||
dev_ctx->intf->event(dev, dev_ctx->context, type, param);
|
||||
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
}
|
||||
|
||||
int mlx4_register_device(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_interface *intf;
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_add_tail(&priv->dev_list, &dev_list);
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx4_add_device(intf, priv);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
if (!mlx4_is_slave(dev))
|
||||
mlx4_start_catas_poll(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_unregister_device(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_interface *intf;
|
||||
|
||||
if (!mlx4_is_slave(dev))
|
||||
mlx4_stop_catas_poll(dev);
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx4_remove_device(intf, priv);
|
||||
|
||||
list_del(&priv->dev_list);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
}
|
||||
|
||||
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_device_context *dev_ctx;
|
||||
unsigned long flags;
|
||||
void *result = NULL;
|
||||
|
||||
spin_lock_irqsave(&priv->ctx_lock, flags);
|
||||
|
||||
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
|
||||
if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
|
||||
result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
|
||||
2998
drivers/net/ethernet/mellanox/mlx4/main.c
Normal file
2998
drivers/net/ethernet/mellanox/mlx4/main.c
Normal file
File diff suppressed because it is too large
Load diff
1613
drivers/net/ethernet/mellanox/mlx4/mcg.c
Normal file
1613
drivers/net/ethernet/mellanox/mlx4/mcg.c
Normal file
File diff suppressed because it is too large
Load diff
1316
drivers/net/ethernet/mellanox/mlx4/mlx4.h
Normal file
1316
drivers/net/ethernet/mellanox/mlx4/mlx4.h
Normal file
File diff suppressed because it is too large
Load diff
882
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
Normal file
882
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
Normal file
|
|
@ -0,0 +1,882 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _MLX4_EN_H_
|
||||
#define _MLX4_EN_H_
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
#include <linux/dcbnl.h>
|
||||
#endif
|
||||
#include <linux/cpu_rmap.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
#include <linux/mlx4/cq.h>
|
||||
#include <linux/mlx4/srq.h>
|
||||
#include <linux/mlx4/doorbell.h>
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "en_port.h"
|
||||
|
||||
#define DRV_NAME "mlx4_en"
|
||||
#define DRV_VERSION "2.2-1"
|
||||
#define DRV_RELDATE "Feb 2014"
|
||||
|
||||
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
|
||||
|
||||
/*
|
||||
* Device constants
|
||||
*/
|
||||
|
||||
|
||||
#define MLX4_EN_PAGE_SHIFT 12
|
||||
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
|
||||
#define DEF_RX_RINGS 16
|
||||
#define MAX_RX_RINGS 128
|
||||
#define MIN_RX_RINGS 4
|
||||
#define TXBB_SIZE 64
|
||||
#define HEADROOM (2048 / TXBB_SIZE + 1)
|
||||
#define STAMP_STRIDE 64
|
||||
#define STAMP_DWORDS (STAMP_STRIDE / 4)
|
||||
#define STAMP_SHIFT 31
|
||||
#define STAMP_VAL 0x7fffffff
|
||||
#define STATS_DELAY (HZ / 4)
|
||||
#define SERVICE_TASK_DELAY (HZ / 4)
|
||||
#define MAX_NUM_OF_FS_RULES 256
|
||||
|
||||
#define MLX4_EN_FILTER_HASH_SHIFT 4
|
||||
#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
|
||||
|
||||
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
|
||||
#define MAX_DESC_SIZE 512
|
||||
#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
|
||||
|
||||
/*
|
||||
* OS related constants and tunables
|
||||
*/
|
||||
|
||||
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
|
||||
|
||||
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
|
||||
|
||||
/* Use the maximum between 16384 and a single page */
|
||||
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
|
||||
|
||||
#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
|
||||
|
||||
/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
|
||||
* and 4K allocations) */
|
||||
enum {
|
||||
FRAG_SZ0 = 1536 - NET_IP_ALIGN,
|
||||
FRAG_SZ1 = 4096,
|
||||
FRAG_SZ2 = 4096,
|
||||
FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
|
||||
};
|
||||
#define MLX4_EN_MAX_RX_FRAGS 4
|
||||
|
||||
/* Maximum ring sizes */
|
||||
#define MLX4_EN_MAX_TX_SIZE 8192
|
||||
#define MLX4_EN_MAX_RX_SIZE 8192
|
||||
|
||||
/* Minimum ring size for our page-allocation scheme to work */
|
||||
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
|
||||
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
|
||||
|
||||
#define MLX4_EN_SMALL_PKT_SIZE 64
|
||||
#define MLX4_EN_MIN_TX_RING_P_UP 1
|
||||
#define MLX4_EN_MAX_TX_RING_P_UP 32
|
||||
#define MLX4_EN_NUM_UP 8
|
||||
#define MLX4_EN_DEF_TX_RING_SIZE 512
|
||||
#define MLX4_EN_DEF_RX_RING_SIZE 1024
|
||||
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
|
||||
MLX4_EN_NUM_UP)
|
||||
|
||||
#define MLX4_EN_DEFAULT_TX_WORK 256
|
||||
|
||||
/* Target number of packets to coalesce with interrupt moderation */
|
||||
#define MLX4_EN_RX_COAL_TARGET 44
|
||||
#define MLX4_EN_RX_COAL_TIME 0x10
|
||||
|
||||
#define MLX4_EN_TX_COAL_PKTS 16
|
||||
#define MLX4_EN_TX_COAL_TIME 0x10
|
||||
|
||||
#define MLX4_EN_RX_RATE_LOW 400000
|
||||
#define MLX4_EN_RX_COAL_TIME_LOW 0
|
||||
#define MLX4_EN_RX_RATE_HIGH 450000
|
||||
#define MLX4_EN_RX_COAL_TIME_HIGH 128
|
||||
#define MLX4_EN_RX_SIZE_THRESH 1024
|
||||
#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
|
||||
#define MLX4_EN_SAMPLE_INTERVAL 0
|
||||
#define MLX4_EN_AVG_PKT_SMALL 256
|
||||
|
||||
#define MLX4_EN_AUTO_CONF 0xffff
|
||||
|
||||
#define MLX4_EN_DEF_RX_PAUSE 1
|
||||
#define MLX4_EN_DEF_TX_PAUSE 1
|
||||
|
||||
/* Interval between successive polls in the Tx routine when polling is used
|
||||
instead of interrupts (in per-core Tx rings) - should be power of 2 */
|
||||
#define MLX4_EN_TX_POLL_MODER 16
|
||||
#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
|
||||
|
||||
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
|
||||
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
|
||||
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
|
||||
|
||||
#define MLX4_EN_MIN_MTU 46
|
||||
#define ETH_BCAST 0xffffffffffffULL
|
||||
|
||||
#define MLX4_EN_LOOPBACK_RETRIES 5
|
||||
#define MLX4_EN_LOOPBACK_TIMEOUT 100
|
||||
|
||||
#ifdef MLX4_EN_PERF_STAT
|
||||
/* Number of samples to 'average' */
|
||||
#define AVG_SIZE 128
|
||||
#define AVG_FACTOR 1024
|
||||
#define NUM_PERF_STATS NUM_PERF_COUNTERS
|
||||
|
||||
#define INC_PERF_COUNTER(cnt) (++(cnt))
|
||||
#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
|
||||
#define AVG_PERF_COUNTER(cnt, sample) \
|
||||
((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
|
||||
#define GET_PERF_COUNTER(cnt) (cnt)
|
||||
#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
|
||||
|
||||
#else
|
||||
|
||||
#define NUM_PERF_STATS 0
|
||||
#define INC_PERF_COUNTER(cnt) do {} while (0)
|
||||
#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
|
||||
#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
|
||||
#define GET_PERF_COUNTER(cnt) (0)
|
||||
#define GET_AVG_PERF_COUNTER(cnt) (0)
|
||||
#endif /* MLX4_EN_PERF_STAT */
|
||||
|
||||
/* Constants for TX flow */
|
||||
enum {
|
||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||
MAX_BF = 256,
|
||||
MIN_PKT_LEN = 17,
|
||||
};
|
||||
|
||||
/*
|
||||
* Configurables
|
||||
*/
|
||||
|
||||
enum cq_type {
|
||||
RX = 0,
|
||||
TX = 1,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Useful macros
|
||||
*/
|
||||
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
|
||||
#define XNOR(x, y) (!(x) == !(y))
|
||||
|
||||
|
||||
struct mlx4_en_tx_info {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t map0_dma;
|
||||
u32 map0_byte_count;
|
||||
u32 nr_txbb;
|
||||
u32 nr_bytes;
|
||||
u8 linear;
|
||||
u8 data_offset;
|
||||
u8 inl;
|
||||
u8 ts_requested;
|
||||
u8 nr_maps;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
|
||||
#define MLX4_EN_BIT_DESC_OWN 0x80000000
|
||||
#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
|
||||
#define MLX4_EN_MEMTYPE_PAD 0x100
|
||||
#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
|
||||
|
||||
|
||||
struct mlx4_en_tx_desc {
|
||||
struct mlx4_wqe_ctrl_seg ctrl;
|
||||
union {
|
||||
struct mlx4_wqe_data_seg data; /* at least one data segment */
|
||||
struct mlx4_wqe_lso_seg lso;
|
||||
struct mlx4_wqe_inline_seg inl;
|
||||
};
|
||||
};
|
||||
|
||||
#define MLX4_EN_USE_SRQ 0x01000000
|
||||
|
||||
#define MLX4_EN_CX3_LOW_ID 0x1000
|
||||
#define MLX4_EN_CX3_HIGH_ID 0x1005
|
||||
|
||||
struct mlx4_en_rx_alloc {
|
||||
struct page *page;
|
||||
dma_addr_t dma;
|
||||
u32 page_offset;
|
||||
u32 page_size;
|
||||
};
|
||||
|
||||
struct mlx4_en_tx_ring {
|
||||
/* cache line used and dirtied in tx completion
|
||||
* (mlx4_en_free_tx_buf())
|
||||
*/
|
||||
u32 last_nr_txbb;
|
||||
u32 cons;
|
||||
unsigned long wake_queue;
|
||||
|
||||
/* cache line used and dirtied in mlx4_en_xmit() */
|
||||
u32 prod ____cacheline_aligned_in_smp;
|
||||
unsigned long bytes;
|
||||
unsigned long packets;
|
||||
unsigned long tx_csum;
|
||||
unsigned long tso_packets;
|
||||
unsigned long xmit_more;
|
||||
struct mlx4_bf bf;
|
||||
unsigned long queue_stopped;
|
||||
|
||||
/* Following part should be mostly read */
|
||||
cpumask_t affinity_mask;
|
||||
struct mlx4_qp qp;
|
||||
struct mlx4_hwq_resources wqres;
|
||||
u32 size; /* number of TXBBs */
|
||||
u32 size_mask;
|
||||
u16 stride;
|
||||
u16 cqn; /* index of port CQ associated with this ring */
|
||||
u32 buf_size;
|
||||
__be32 doorbell_qpn;
|
||||
__be32 mr_key;
|
||||
void *buf;
|
||||
struct mlx4_en_tx_info *tx_info;
|
||||
u8 *bounce_buf;
|
||||
struct mlx4_qp_context context;
|
||||
int qpn;
|
||||
enum mlx4_qp_state qp_state;
|
||||
u8 queue_index;
|
||||
bool bf_enabled;
|
||||
bool bf_alloced;
|
||||
struct netdev_queue *tx_queue;
|
||||
int hwtstamp_tx_type;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct mlx4_en_rx_desc {
|
||||
/* actual number of entries depends on rx ring stride */
|
||||
struct mlx4_wqe_data_seg data[0];
|
||||
};
|
||||
|
||||
struct mlx4_en_rx_ring {
|
||||
struct mlx4_hwq_resources wqres;
|
||||
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
|
||||
u32 size ; /* number of Rx descs*/
|
||||
u32 actual_size;
|
||||
u32 size_mask;
|
||||
u16 stride;
|
||||
u16 log_stride;
|
||||
u16 cqn; /* index of port CQ associated with this ring */
|
||||
u32 prod;
|
||||
u32 cons;
|
||||
u32 buf_size;
|
||||
u8 fcs_del;
|
||||
void *buf;
|
||||
void *rx_info;
|
||||
unsigned long bytes;
|
||||
unsigned long packets;
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned long yields;
|
||||
unsigned long misses;
|
||||
unsigned long cleaned;
|
||||
#endif
|
||||
unsigned long csum_ok;
|
||||
unsigned long csum_none;
|
||||
int hwtstamp_rx_filter;
|
||||
cpumask_var_t affinity_mask;
|
||||
};
|
||||
|
||||
struct mlx4_en_cq {
|
||||
struct mlx4_cq mcq;
|
||||
struct mlx4_hwq_resources wqres;
|
||||
int ring;
|
||||
struct net_device *dev;
|
||||
struct napi_struct napi;
|
||||
int size;
|
||||
int buf_size;
|
||||
unsigned vector;
|
||||
enum cq_type is_tx;
|
||||
u16 moder_time;
|
||||
u16 moder_cnt;
|
||||
struct mlx4_cqe *buf;
|
||||
#define MLX4_EN_OPCODE_ERROR 0x1e
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int state;
|
||||
#define MLX4_EN_CQ_STATE_IDLE 0
|
||||
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
|
||||
#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
|
||||
#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
|
||||
#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
|
||||
#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
|
||||
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
struct irq_desc *irq_desc;
|
||||
};
|
||||
|
||||
struct mlx4_en_port_profile {
|
||||
u32 flags;
|
||||
u32 tx_ring_num;
|
||||
u32 rx_ring_num;
|
||||
u32 tx_ring_size;
|
||||
u32 rx_ring_size;
|
||||
u8 rx_pause;
|
||||
u8 rx_ppp;
|
||||
u8 tx_pause;
|
||||
u8 tx_ppp;
|
||||
int rss_rings;
|
||||
int inline_thold;
|
||||
};
|
||||
|
||||
struct mlx4_en_profile {
|
||||
int rss_xor;
|
||||
int udp_rss;
|
||||
u8 rss_mask;
|
||||
u32 active_ports;
|
||||
u32 small_pkt_int;
|
||||
u8 no_reset;
|
||||
u8 num_tx_rings_p_up;
|
||||
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_en_dev {
|
||||
struct mlx4_dev *dev;
|
||||
struct pci_dev *pdev;
|
||||
struct mutex state_lock;
|
||||
struct net_device *pndev[MLX4_MAX_PORTS + 1];
|
||||
u32 port_cnt;
|
||||
bool device_up;
|
||||
struct mlx4_en_profile profile;
|
||||
u32 LSO_support;
|
||||
struct workqueue_struct *workqueue;
|
||||
struct device *dma_device;
|
||||
void __iomem *uar_map;
|
||||
struct mlx4_uar priv_uar;
|
||||
struct mlx4_mr mr;
|
||||
u32 priv_pdn;
|
||||
spinlock_t uar_lock;
|
||||
u8 mac_removed[MLX4_MAX_PORTS + 1];
|
||||
rwlock_t clock_lock;
|
||||
u32 nominal_c_mult;
|
||||
struct cyclecounter cycles;
|
||||
struct timecounter clock;
|
||||
unsigned long last_overflow_check;
|
||||
unsigned long overflow_period;
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_clock_info;
|
||||
};
|
||||
|
||||
|
||||
struct mlx4_en_rss_map {
|
||||
int base_qpn;
|
||||
struct mlx4_qp qps[MAX_RX_RINGS];
|
||||
enum mlx4_qp_state state[MAX_RX_RINGS];
|
||||
struct mlx4_qp indir_qp;
|
||||
enum mlx4_qp_state indir_state;
|
||||
};
|
||||
|
||||
struct mlx4_en_port_state {
|
||||
int link_state;
|
||||
int link_speed;
|
||||
int transciver;
|
||||
};
|
||||
|
||||
struct mlx4_en_pkt_stats {
|
||||
unsigned long broadcast;
|
||||
unsigned long rx_prio[8];
|
||||
unsigned long tx_prio[8];
|
||||
#define NUM_PKT_STATS 17
|
||||
};
|
||||
|
||||
struct mlx4_en_port_stats {
|
||||
unsigned long tso_packets;
|
||||
unsigned long xmit_more;
|
||||
unsigned long queue_stopped;
|
||||
unsigned long wake_queue;
|
||||
unsigned long tx_timeout;
|
||||
unsigned long rx_alloc_failed;
|
||||
unsigned long rx_chksum_good;
|
||||
unsigned long rx_chksum_none;
|
||||
unsigned long tx_chksum_offload;
|
||||
#define NUM_PORT_STATS 9
|
||||
};
|
||||
|
||||
struct mlx4_en_perf_stats {
|
||||
u32 tx_poll;
|
||||
u64 tx_pktsz_avg;
|
||||
u32 inflight_avg;
|
||||
u16 tx_coal_avg;
|
||||
u16 rx_coal_avg;
|
||||
u32 napi_quota;
|
||||
#define NUM_PERF_COUNTERS 6
|
||||
};
|
||||
|
||||
enum mlx4_en_mclist_act {
|
||||
MCLIST_NONE,
|
||||
MCLIST_REM,
|
||||
MCLIST_ADD,
|
||||
};
|
||||
|
||||
struct mlx4_en_mc_list {
|
||||
struct list_head list;
|
||||
enum mlx4_en_mclist_act action;
|
||||
u8 addr[ETH_ALEN];
|
||||
u64 reg_id;
|
||||
u64 tunnel_reg_id;
|
||||
};
|
||||
|
||||
struct mlx4_en_frag_info {
|
||||
u16 frag_size;
|
||||
u16 frag_prefix_size;
|
||||
u16 frag_stride;
|
||||
u16 frag_align;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
/* Minimal TC BW - setting to 0 will block traffic */
|
||||
#define MLX4_EN_BW_MIN 1
|
||||
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
|
||||
|
||||
#define MLX4_EN_TC_ETS 7
|
||||
|
||||
#endif
|
||||
|
||||
struct ethtool_flow_id {
|
||||
struct list_head list;
|
||||
struct ethtool_rx_flow_spec flow_spec;
|
||||
u64 id;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_EN_FLAG_PROMISC = (1 << 0),
|
||||
MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
|
||||
/* whether we need to enable hardware loopback by putting dmac
|
||||
* in Tx WQE
|
||||
*/
|
||||
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
|
||||
/* whether we need to drop packets that hardware loopback-ed */
|
||||
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
|
||||
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
|
||||
};
|
||||
|
||||
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
|
||||
#define MLX4_EN_MAC_HASH_IDX 5
|
||||
|
||||
struct mlx4_en_priv {
|
||||
struct mlx4_en_dev *mdev;
|
||||
struct mlx4_en_port_profile *prof;
|
||||
struct net_device *dev;
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
struct net_device_stats stats;
|
||||
struct net_device_stats ret_stats;
|
||||
struct mlx4_en_port_state port_state;
|
||||
spinlock_t stats_lock;
|
||||
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
|
||||
/* To allow rules removal while port is going down */
|
||||
struct list_head ethtool_list;
|
||||
|
||||
unsigned long last_moder_packets[MAX_RX_RINGS];
|
||||
unsigned long last_moder_tx_packets;
|
||||
unsigned long last_moder_bytes[MAX_RX_RINGS];
|
||||
unsigned long last_moder_jiffies;
|
||||
int last_moder_time[MAX_RX_RINGS];
|
||||
u16 rx_usecs;
|
||||
u16 rx_frames;
|
||||
u16 tx_usecs;
|
||||
u16 tx_frames;
|
||||
u32 pkt_rate_low;
|
||||
u16 rx_usecs_low;
|
||||
u32 pkt_rate_high;
|
||||
u16 rx_usecs_high;
|
||||
u16 sample_interval;
|
||||
u16 adaptive_rx_coal;
|
||||
u32 msg_enable;
|
||||
u32 loopback_ok;
|
||||
u32 validate_loopback;
|
||||
|
||||
struct mlx4_hwq_resources res;
|
||||
int link_state;
|
||||
int last_link_state;
|
||||
bool port_up;
|
||||
int port;
|
||||
int registered;
|
||||
int allocated;
|
||||
int stride;
|
||||
unsigned char current_mac[ETH_ALEN + 2];
|
||||
int mac_index;
|
||||
unsigned max_mtu;
|
||||
int base_qpn;
|
||||
int cqe_factor;
|
||||
int cqe_size;
|
||||
|
||||
struct mlx4_en_rss_map rss_map;
|
||||
__be32 ctrl_flags;
|
||||
u32 flags;
|
||||
u8 num_tx_rings_p_up;
|
||||
u32 tx_work_limit;
|
||||
u32 tx_ring_num;
|
||||
u32 rx_ring_num;
|
||||
u32 rx_skb_size;
|
||||
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
|
||||
u16 num_frags;
|
||||
u16 log_rx_info;
|
||||
|
||||
struct mlx4_en_tx_ring **tx_ring;
|
||||
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
|
||||
struct mlx4_en_cq **tx_cq;
|
||||
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
|
||||
struct mlx4_qp drop_qp;
|
||||
struct work_struct rx_mode_task;
|
||||
struct work_struct watchdog_task;
|
||||
struct work_struct linkstate_task;
|
||||
struct delayed_work stats_task;
|
||||
struct delayed_work service_task;
|
||||
#ifdef CONFIG_MLX4_EN_VXLAN
|
||||
struct work_struct vxlan_add_task;
|
||||
struct work_struct vxlan_del_task;
|
||||
#endif
|
||||
struct mlx4_en_perf_stats pstats;
|
||||
struct mlx4_en_pkt_stats pkstats;
|
||||
struct mlx4_en_port_stats port_stats;
|
||||
u64 stats_bitmap;
|
||||
struct list_head mc_list;
|
||||
struct list_head curr_list;
|
||||
u64 broadcast_id;
|
||||
struct mlx4_en_stat_out_mbox hw_stats;
|
||||
int vids[128];
|
||||
bool wol;
|
||||
struct device *ddev;
|
||||
int base_tx_qpn;
|
||||
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
|
||||
struct hwtstamp_config hwtstamp_config;
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
struct ieee_ets ets;
|
||||
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
|
||||
#endif
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
spinlock_t filters_lock;
|
||||
int last_filter_id;
|
||||
struct list_head filters;
|
||||
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
|
||||
#endif
|
||||
u64 tunnel_reg_id;
|
||||
__be16 vxlan_port;
|
||||
|
||||
u32 pflags;
|
||||
};
|
||||
|
||||
enum mlx4_en_wol {
|
||||
MLX4_EN_WOL_MAGIC = (1ULL << 61),
|
||||
MLX4_EN_WOL_ENABLED = (1ULL << 62),
|
||||
};
|
||||
|
||||
struct mlx4_mac_entry {
|
||||
struct hlist_node hlist;
|
||||
unsigned char mac[ETH_ALEN + 2];
|
||||
u64 reg_id;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
|
||||
{
|
||||
return buf + idx * cqe_sz;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
|
||||
{
|
||||
spin_lock_init(&cq->poll_lock);
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
}
|
||||
|
||||
/* called from the device poll rutine to get ownership of a cq */
|
||||
static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock(&cq->poll_lock);
|
||||
if (cq->state & MLX4_CQ_LOCKED) {
|
||||
WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
|
||||
cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
|
||||
rc = false;
|
||||
} else
|
||||
/* we don't care if someone yielded */
|
||||
cq->state = MLX4_EN_CQ_STATE_NAPI;
|
||||
spin_unlock(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* returns true is someone tried to get the cq while napi had it */
|
||||
static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock(&cq->poll_lock);
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
|
||||
MLX4_EN_CQ_STATE_NAPI_YIELD));
|
||||
|
||||
if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
spin_unlock(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* called from mlx4_en_low_latency_poll() */
|
||||
static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock_bh(&cq->poll_lock);
|
||||
if ((cq->state & MLX4_CQ_LOCKED)) {
|
||||
struct net_device *dev = cq->dev;
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
|
||||
|
||||
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
|
||||
rc = false;
|
||||
rx_ring->yields++;
|
||||
} else
|
||||
/* preserve yield marks */
|
||||
cq->state |= MLX4_EN_CQ_STATE_POLL;
|
||||
spin_unlock_bh(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* returns true if someone tried to get the cq while it was locked */
|
||||
static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock_bh(&cq->poll_lock);
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
|
||||
|
||||
if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
spin_unlock_bh(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* true if a socket is polling, even if it did not get the lock */
|
||||
static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
|
||||
{
|
||||
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
|
||||
return cq->state & CQ_USER_PEND;
|
||||
}
|
||||
#else
|
||||
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
|
||||
|
||||
void mlx4_en_update_loopback_state(struct net_device *dev,
|
||||
netdev_features_t features);
|
||||
|
||||
void mlx4_en_destroy_netdev(struct net_device *dev);
|
||||
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||
struct mlx4_en_port_profile *prof);
|
||||
|
||||
int mlx4_en_start_port(struct net_device *dev);
|
||||
void mlx4_en_stop_port(struct net_device *dev, int detach);
|
||||
|
||||
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
|
||||
|
||||
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
|
||||
int entries, int ring, enum cq_type mode, int node);
|
||||
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
|
||||
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
int cq_idx);
|
||||
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||
|
||||
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback);
|
||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring,
|
||||
int qpn, u32 size, u16 stride,
|
||||
int node, int queue_index);
|
||||
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring);
|
||||
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
int cq, int user_prio);
|
||||
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring);
|
||||
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
||||
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring **pring,
|
||||
u32 size, u16 stride, int node);
|
||||
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring **pring,
|
||||
u32 size, u16 stride);
|
||||
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
|
||||
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring *ring);
|
||||
int mlx4_en_process_rx_cq(struct net_device *dev,
|
||||
struct mlx4_en_cq *cq,
|
||||
int budget);
|
||||
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
|
||||
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
|
||||
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
|
||||
int is_tx, int rss, int qpn, int cqn, int user_prio,
|
||||
struct mlx4_qp_context *context);
|
||||
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
|
||||
int mlx4_en_map_buffer(struct mlx4_buf *buf);
|
||||
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
|
||||
|
||||
void mlx4_en_calc_rx_buf(struct net_device *dev);
|
||||
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
|
||||
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
|
||||
void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
|
||||
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
|
||||
|
||||
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
|
||||
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
|
||||
|
||||
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
|
||||
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
|
||||
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
|
||||
#endif
|
||||
|
||||
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
|
||||
#endif
|
||||
|
||||
#define MLX4_EN_NUM_SELF_TEST 5
|
||||
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
|
||||
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
|
||||
|
||||
/*
|
||||
* Functions for time stamping
|
||||
*/
|
||||
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
|
||||
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
|
||||
struct skb_shared_hwtstamps *hwts,
|
||||
u64 timestamp);
|
||||
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
|
||||
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
|
||||
int mlx4_en_timestamp_config(struct net_device *dev,
|
||||
int tx_type,
|
||||
int rx_filter);
|
||||
|
||||
/* Globals
|
||||
*/
|
||||
extern const struct ethtool_ops mlx4_en_ethtool_ops;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* printk / logging functions
|
||||
*/
|
||||
|
||||
__printf(3, 4)
|
||||
void en_print(const char *level, const struct mlx4_en_priv *priv,
|
||||
const char *format, ...);
|
||||
|
||||
#define en_dbg(mlevel, priv, format, ...) \
|
||||
do { \
|
||||
if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
|
||||
en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
#define en_warn(priv, format, ...) \
|
||||
en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
|
||||
#define en_err(priv, format, ...) \
|
||||
en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
|
||||
#define en_info(priv, format, ...) \
|
||||
en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
|
||||
|
||||
#define mlx4_err(mdev, format, ...) \
|
||||
pr_err(DRV_NAME " %s: " format, \
|
||||
dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
|
||||
#define mlx4_info(mdev, format, ...) \
|
||||
pr_info(DRV_NAME " %s: " format, \
|
||||
dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
|
||||
#define mlx4_warn(mdev, format, ...) \
|
||||
pr_warn(DRV_NAME " %s: " format, \
|
||||
dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
|
||||
|
||||
#endif
|
||||
1167
drivers/net/ethernet/mellanox/mlx4/mr.c
Normal file
1167
drivers/net/ethernet/mellanox/mlx4/mr.c
Normal file
File diff suppressed because it is too large
Load diff
286
drivers/net/ethernet/mellanox/mlx4/pd.c
Normal file
286
drivers/net/ethernet/mellanox/mlx4/pd.c
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io-mapping.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
enum {
|
||||
MLX4_NUM_RESERVED_UARS = 8
|
||||
};
|
||||
|
||||
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
|
||||
if (*pdn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
|
||||
|
||||
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
|
||||
{
|
||||
mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_pd_free);
|
||||
|
||||
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
*xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap);
|
||||
if (*xrcdn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
|
||||
{
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
err = mlx4_cmd_imm(dev, 0, &out_param,
|
||||
RES_XRCD, RES_OP_RESERVE,
|
||||
MLX4_CMD_ALLOC_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*xrcdn = get_param_l(&out_param);
|
||||
return 0;
|
||||
}
|
||||
return __mlx4_xrcd_alloc(dev, xrcdn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
|
||||
|
||||
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
|
||||
{
|
||||
mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
|
||||
}
|
||||
|
||||
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, xrcdn);
|
||||
err = mlx4_cmd(dev, in_param, RES_XRCD,
|
||||
RES_OP_RESERVE, MLX4_CMD_FREE_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn);
|
||||
} else
|
||||
__mlx4_xrcd_free(dev, xrcdn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
|
||||
|
||||
int mlx4_init_pd_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
|
||||
(1 << NOT_MASKED_PD_BITS) - 1,
|
||||
dev->caps.reserved_pds, 0);
|
||||
}
|
||||
|
||||
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
|
||||
{
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
|
||||
}
|
||||
|
||||
int mlx4_init_xrcd_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16),
|
||||
(1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0);
|
||||
}
|
||||
|
||||
void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
|
||||
{
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
|
||||
}
|
||||
|
||||
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
||||
{
|
||||
int offset;
|
||||
|
||||
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
|
||||
if (uar->index == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
if (mlx4_is_slave(dev))
|
||||
offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
|
||||
dev->caps.uar_page_size);
|
||||
else
|
||||
offset = uar->index;
|
||||
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
|
||||
uar->map = NULL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
|
||||
|
||||
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
||||
{
|
||||
mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_uar_free);
|
||||
|
||||
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_uar *uar;
|
||||
int err = 0;
|
||||
int idx;
|
||||
|
||||
if (!priv->bf_mapping)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&priv->bf_mutex);
|
||||
if (!list_empty(&priv->bf_list))
|
||||
uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
|
||||
else {
|
||||
if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
|
||||
if (!uar) {
|
||||
uar = kmalloc(sizeof(*uar), GFP_KERNEL);
|
||||
if (!uar) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
err = mlx4_uar_alloc(dev, uar);
|
||||
if (err)
|
||||
goto free_kmalloc;
|
||||
|
||||
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
if (!uar->map) {
|
||||
err = -ENOMEM;
|
||||
goto free_uar;
|
||||
}
|
||||
|
||||
uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
|
||||
if (!uar->bf_map) {
|
||||
err = -ENOMEM;
|
||||
goto unamp_uar;
|
||||
}
|
||||
uar->free_bf_bmap = 0;
|
||||
list_add(&uar->bf_list, &priv->bf_list);
|
||||
}
|
||||
|
||||
bf->uar = uar;
|
||||
idx = ffz(uar->free_bf_bmap);
|
||||
uar->free_bf_bmap |= 1 << idx;
|
||||
bf->uar = uar;
|
||||
bf->offset = 0;
|
||||
bf->buf_size = dev->caps.bf_reg_size / 2;
|
||||
bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
|
||||
if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
|
||||
list_del_init(&uar->bf_list);
|
||||
|
||||
goto out;
|
||||
|
||||
unamp_uar:
|
||||
bf->uar = NULL;
|
||||
iounmap(uar->map);
|
||||
|
||||
free_uar:
|
||||
mlx4_uar_free(dev, uar);
|
||||
|
||||
free_kmalloc:
|
||||
kfree(uar);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->bf_mutex);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
|
||||
|
||||
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int idx;
|
||||
|
||||
if (!bf->uar || !bf->uar->bf_map)
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->bf_mutex);
|
||||
idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
|
||||
bf->uar->free_bf_bmap &= ~(1 << idx);
|
||||
if (!bf->uar->free_bf_bmap) {
|
||||
if (!list_empty(&bf->uar->bf_list))
|
||||
list_del(&bf->uar->bf_list);
|
||||
|
||||
io_mapping_unmap(bf->uar->bf_map);
|
||||
iounmap(bf->uar->map);
|
||||
mlx4_uar_free(dev, bf->uar);
|
||||
kfree(bf->uar);
|
||||
} else if (list_empty(&bf->uar->bf_list))
|
||||
list_add(&bf->uar->bf_list, &priv->bf_list);
|
||||
|
||||
mutex_unlock(&priv->bf_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_bf_free);
|
||||
|
||||
int mlx4_init_uar_table(struct mlx4_dev *dev)
|
||||
{
|
||||
if (dev->caps.num_uars <= 128) {
|
||||
mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
|
||||
dev->caps.num_uars);
|
||||
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
|
||||
dev->caps.num_uars, dev->caps.num_uars - 1,
|
||||
dev->caps.reserved_uars, 0);
|
||||
}
|
||||
|
||||
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
|
||||
{
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
|
||||
}
|
||||
1313
drivers/net/ethernet/mellanox/mlx4/port.c
Normal file
1313
drivers/net/ethernet/mellanox/mlx4/port.c
Normal file
File diff suppressed because it is too large
Load diff
266
drivers/net/ethernet/mellanox/mlx4/profile.c
Normal file
266
drivers/net/ethernet/mellanox/mlx4/profile.c
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "fw.h"
|
||||
|
||||
enum {
|
||||
MLX4_RES_QP,
|
||||
MLX4_RES_RDMARC,
|
||||
MLX4_RES_ALTC,
|
||||
MLX4_RES_AUXC,
|
||||
MLX4_RES_SRQ,
|
||||
MLX4_RES_CQ,
|
||||
MLX4_RES_EQ,
|
||||
MLX4_RES_DMPT,
|
||||
MLX4_RES_CMPT,
|
||||
MLX4_RES_MTT,
|
||||
MLX4_RES_MCG,
|
||||
MLX4_RES_NUM
|
||||
};
|
||||
|
||||
static const char *res_name[] = {
|
||||
[MLX4_RES_QP] = "QP",
|
||||
[MLX4_RES_RDMARC] = "RDMARC",
|
||||
[MLX4_RES_ALTC] = "ALTC",
|
||||
[MLX4_RES_AUXC] = "AUXC",
|
||||
[MLX4_RES_SRQ] = "SRQ",
|
||||
[MLX4_RES_CQ] = "CQ",
|
||||
[MLX4_RES_EQ] = "EQ",
|
||||
[MLX4_RES_DMPT] = "DMPT",
|
||||
[MLX4_RES_CMPT] = "CMPT",
|
||||
[MLX4_RES_MTT] = "MTT",
|
||||
[MLX4_RES_MCG] = "MCG",
|
||||
};
|
||||
|
||||
u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
struct mlx4_profile *request,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *init_hca)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_resource {
|
||||
u64 size;
|
||||
u64 start;
|
||||
int type;
|
||||
u32 num;
|
||||
int log_num;
|
||||
};
|
||||
|
||||
u64 total_size = 0;
|
||||
struct mlx4_resource *profile;
|
||||
struct mlx4_resource tmp;
|
||||
struct sysinfo si;
|
||||
int i, j;
|
||||
|
||||
profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
|
||||
if (!profile)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* We want to scale the number of MTTs with the size of the
|
||||
* system memory, since it makes sense to register a lot of
|
||||
* memory on a system with a lot of memory. As a heuristic,
|
||||
* make sure we have enough MTTs to cover twice the system
|
||||
* memory (with PAGE_SIZE entries).
|
||||
*
|
||||
* This number has to be a power of two and fit into 32 bits
|
||||
* due to device limitations, so cap this at 2^31 as well.
|
||||
* That limits us to 8TB of memory registration per HCA with
|
||||
* 4KB pages, which is probably OK for the next few months.
|
||||
*/
|
||||
si_meminfo(&si);
|
||||
request->num_mtt =
|
||||
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
|
||||
min(1UL << (31 - log_mtts_per_seg),
|
||||
si.totalram >> (log_mtts_per_seg - 1))));
|
||||
|
||||
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
|
||||
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
|
||||
profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
|
||||
profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
|
||||
profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
|
||||
profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
|
||||
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
|
||||
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
|
||||
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
|
||||
profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
|
||||
profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
|
||||
|
||||
profile[MLX4_RES_QP].num = request->num_qp;
|
||||
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
|
||||
profile[MLX4_RES_ALTC].num = request->num_qp;
|
||||
profile[MLX4_RES_AUXC].num = request->num_qp;
|
||||
profile[MLX4_RES_SRQ].num = request->num_srq;
|
||||
profile[MLX4_RES_CQ].num = request->num_cq;
|
||||
profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
|
||||
dev->phys_caps.num_phys_eqs :
|
||||
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
|
||||
profile[MLX4_RES_DMPT].num = request->num_mpt;
|
||||
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
|
||||
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
|
||||
profile[MLX4_RES_MCG].num = request->num_mcg;
|
||||
|
||||
for (i = 0; i < MLX4_RES_NUM; ++i) {
|
||||
profile[i].type = i;
|
||||
profile[i].num = roundup_pow_of_two(profile[i].num);
|
||||
profile[i].log_num = ilog2(profile[i].num);
|
||||
profile[i].size *= profile[i].num;
|
||||
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort the resources in decreasing order of size. Since they
|
||||
* all have sizes that are powers of 2, we'll be able to keep
|
||||
* resources aligned to their size and pack them without gaps
|
||||
* using the sorted order.
|
||||
*/
|
||||
for (i = MLX4_RES_NUM; i > 0; --i)
|
||||
for (j = 1; j < i; ++j) {
|
||||
if (profile[j].size > profile[j - 1].size) {
|
||||
tmp = profile[j];
|
||||
profile[j] = profile[j - 1];
|
||||
profile[j - 1] = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < MLX4_RES_NUM; ++i) {
|
||||
if (profile[i].size) {
|
||||
profile[i].start = total_size;
|
||||
total_size += profile[i].size;
|
||||
}
|
||||
|
||||
if (total_size > dev_cap->max_icm_sz) {
|
||||
mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
|
||||
(unsigned long long) total_size,
|
||||
(unsigned long long) dev_cap->max_icm_sz);
|
||||
kfree(profile);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (profile[i].size)
|
||||
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
|
||||
i, res_name[profile[i].type],
|
||||
profile[i].log_num,
|
||||
(unsigned long long) profile[i].start,
|
||||
(unsigned long long) profile[i].size);
|
||||
}
|
||||
|
||||
mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
|
||||
(int) (total_size >> 10));
|
||||
|
||||
for (i = 0; i < MLX4_RES_NUM; ++i) {
|
||||
switch (profile[i].type) {
|
||||
case MLX4_RES_QP:
|
||||
dev->caps.num_qps = profile[i].num;
|
||||
init_hca->qpc_base = profile[i].start;
|
||||
init_hca->log_num_qps = profile[i].log_num;
|
||||
break;
|
||||
case MLX4_RES_RDMARC:
|
||||
for (priv->qp_table.rdmarc_shift = 0;
|
||||
request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
|
||||
++priv->qp_table.rdmarc_shift)
|
||||
; /* nothing */
|
||||
dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
|
||||
priv->qp_table.rdmarc_base = (u32) profile[i].start;
|
||||
init_hca->rdmarc_base = profile[i].start;
|
||||
init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
|
||||
break;
|
||||
case MLX4_RES_ALTC:
|
||||
init_hca->altc_base = profile[i].start;
|
||||
break;
|
||||
case MLX4_RES_AUXC:
|
||||
init_hca->auxc_base = profile[i].start;
|
||||
break;
|
||||
case MLX4_RES_SRQ:
|
||||
dev->caps.num_srqs = profile[i].num;
|
||||
init_hca->srqc_base = profile[i].start;
|
||||
init_hca->log_num_srqs = profile[i].log_num;
|
||||
break;
|
||||
case MLX4_RES_CQ:
|
||||
dev->caps.num_cqs = profile[i].num;
|
||||
init_hca->cqc_base = profile[i].start;
|
||||
init_hca->log_num_cqs = profile[i].log_num;
|
||||
break;
|
||||
case MLX4_RES_EQ:
|
||||
dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
|
||||
MAX_MSIX));
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
|
||||
break;
|
||||
case MLX4_RES_DMPT:
|
||||
dev->caps.num_mpts = profile[i].num;
|
||||
priv->mr_table.mpt_base = profile[i].start;
|
||||
init_hca->dmpt_base = profile[i].start;
|
||||
init_hca->log_mpt_sz = profile[i].log_num;
|
||||
break;
|
||||
case MLX4_RES_CMPT:
|
||||
init_hca->cmpt_base = profile[i].start;
|
||||
break;
|
||||
case MLX4_RES_MTT:
|
||||
dev->caps.num_mtts = profile[i].num;
|
||||
priv->mr_table.mtt_base = profile[i].start;
|
||||
init_hca->mtt_base = profile[i].start;
|
||||
break;
|
||||
case MLX4_RES_MCG:
|
||||
init_hca->mc_base = profile[i].start;
|
||||
init_hca->log_mc_entry_sz =
|
||||
ilog2(mlx4_get_mgm_entry_size(dev));
|
||||
init_hca->log_mc_table_sz = profile[i].log_num;
|
||||
if (dev->caps.steering_mode ==
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
dev->caps.num_mgms = profile[i].num;
|
||||
} else {
|
||||
init_hca->log_mc_hash_sz =
|
||||
profile[i].log_num - 1;
|
||||
dev->caps.num_mgms = profile[i].num >> 1;
|
||||
dev->caps.num_amgms = profile[i].num >> 1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* PDs don't take any HCA memory, but we assign them as part
|
||||
* of the HCA profile anyway.
|
||||
*/
|
||||
dev->caps.num_pds = MLX4_NUM_PDS;
|
||||
|
||||
kfree(profile);
|
||||
return total_size;
|
||||
}
|
||||
633
drivers/net/ethernet/mellanox/mlx4/qp.c
Normal file
633
drivers/net/ethernet/mellanox/mlx4/qp.c
Normal file
|
|
@ -0,0 +1,633 @@
|
|||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
|
||||
qp = __mlx4_qp_lookup(dev, qpn);
|
||||
if (qp)
|
||||
atomic_inc(&qp->refcount);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
|
||||
if (!qp) {
|
||||
mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
|
||||
return;
|
||||
}
|
||||
|
||||
qp->event(qp, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&qp->refcount))
|
||||
complete(&qp->free);
|
||||
}
|
||||
|
||||
/* used for INIT/CLOSE port logic */
|
||||
static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
|
||||
{
|
||||
/* this procedure is called after we already know we are on the master */
|
||||
/* qp0 is either the proxy qp0, or the real qp0 */
|
||||
u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
|
||||
*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
|
||||
|
||||
*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
|
||||
qp->qpn <= dev->phys_caps.base_sqpn + 1;
|
||||
|
||||
return *real_qp0 || *proxy_qp0;
|
||||
}
|
||||
|
||||
static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
|
||||
struct mlx4_qp_context *context,
|
||||
enum mlx4_qp_optpar optpar,
|
||||
int sqd_event, struct mlx4_qp *qp, int native)
|
||||
{
|
||||
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
|
||||
[MLX4_QP_STATE_RST] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_INIT] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
|
||||
[MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_RTR] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_RTS] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
|
||||
[MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_SQD] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
|
||||
[MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_SQER] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
|
||||
},
|
||||
[MLX4_QP_STATE_ERR] = {
|
||||
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
||||
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
||||
}
|
||||
};
|
||||
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int ret = 0;
|
||||
int real_qp0 = 0;
|
||||
int proxy_qp0 = 0;
|
||||
u8 port;
|
||||
|
||||
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
|
||||
!op[cur_state][new_state])
|
||||
return -EINVAL;
|
||||
|
||||
if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
|
||||
ret = mlx4_cmd(dev, 0, qp->qpn, 2,
|
||||
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
|
||||
if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
|
||||
cur_state != MLX4_QP_STATE_RST &&
|
||||
is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
|
||||
port = (qp->qpn & 1) + 1;
|
||||
if (proxy_qp0)
|
||||
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
|
||||
else
|
||||
priv->mfunc.master.qp0_state[port].qp0_active = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
|
||||
u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
context->mtt_base_addr_h = mtt_addr >> 32;
|
||||
context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
||||
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
|
||||
memcpy(mailbox->buf + 8, context, sizeof *context);
|
||||
|
||||
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
|
||||
cpu_to_be32(qp->qpn);
|
||||
|
||||
ret = mlx4_cmd(dev, mailbox->dma,
|
||||
qp->qpn | (!!sqd_event << 31),
|
||||
new_state == MLX4_QP_STATE_RST ? 2 : 0,
|
||||
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
|
||||
|
||||
if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
|
||||
port = (qp->qpn & 1) + 1;
|
||||
if (cur_state != MLX4_QP_STATE_ERR &&
|
||||
cur_state != MLX4_QP_STATE_RST &&
|
||||
new_state == MLX4_QP_STATE_ERR) {
|
||||
if (proxy_qp0)
|
||||
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
|
||||
else
|
||||
priv->mfunc.master.qp0_state[port].qp0_active = 0;
|
||||
} else if (new_state == MLX4_QP_STATE_RTR) {
|
||||
if (proxy_qp0)
|
||||
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
|
||||
else
|
||||
priv->mfunc.master.qp0_state[port].qp0_active = 1;
|
||||
}
|
||||
}
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
|
||||
struct mlx4_qp_context *context,
|
||||
enum mlx4_qp_optpar optpar,
|
||||
int sqd_event, struct mlx4_qp *qp)
|
||||
{
|
||||
return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
|
||||
optpar, sqd_event, qp, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
|
||||
|
||||
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
|
||||
int *base)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
|
||||
*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
|
||||
if (*base == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, cnt);
|
||||
set_param_h(&in_param, align);
|
||||
err = mlx4_cmd_imm(dev, in_param, &out_param,
|
||||
RES_QP, RES_OP_RESERVE,
|
||||
MLX4_CMD_ALLOC_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*base = get_param_l(&out_param);
|
||||
return 0;
|
||||
}
|
||||
return __mlx4_qp_reserve_range(dev, cnt, align, base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
|
||||
|
||||
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
|
||||
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
|
||||
return;
|
||||
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
|
||||
}
|
||||
|
||||
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, base_qpn);
|
||||
set_param_h(&in_param, cnt);
|
||||
err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
|
||||
MLX4_CMD_FREE_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err) {
|
||||
mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
|
||||
base_qpn, cnt);
|
||||
}
|
||||
} else
|
||||
__mlx4_qp_release_range(dev, base_qpn, cnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
|
||||
|
||||
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
int err;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_qp;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_auxc;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_altc;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_rdmarc;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_rdmarc:
|
||||
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
|
||||
|
||||
err_put_altc:
|
||||
mlx4_table_put(dev, &qp_table->altc_table, qpn);
|
||||
|
||||
err_put_auxc:
|
||||
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
|
||||
|
||||
err_put_qp:
|
||||
mlx4_table_put(dev, &qp_table->qp_table, qpn);
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
|
||||
{
|
||||
u64 param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(¶m, qpn);
|
||||
return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM,
|
||||
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
}
|
||||
return __mlx4_qp_alloc_icm(dev, qpn, gfp);
|
||||
}
|
||||
|
||||
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
|
||||
mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
|
||||
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
|
||||
mlx4_table_put(dev, &qp_table->altc_table, qpn);
|
||||
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
|
||||
mlx4_table_put(dev, &qp_table->qp_table, qpn);
|
||||
}
|
||||
|
||||
static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, qpn);
|
||||
if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
|
||||
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED))
|
||||
mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
|
||||
} else
|
||||
__mlx4_qp_free_icm(dev, qpn);
|
||||
}
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
int err;
|
||||
|
||||
if (!qpn)
|
||||
return -EINVAL;
|
||||
|
||||
qp->qpn = qpn;
|
||||
|
||||
err = mlx4_qp_alloc_icm(dev, qpn, gfp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
|
||||
(dev->caps.num_qps - 1), qp);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
if (err)
|
||||
goto err_icm;
|
||||
|
||||
atomic_set(&qp->refcount, 1);
|
||||
init_completion(&qp->free);
|
||||
|
||||
return 0;
|
||||
|
||||
err_icm:
|
||||
mlx4_qp_free_icm(dev, qpn);
|
||||
return err;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
|
||||
|
||||
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_update_qp_context *cmd;
|
||||
u64 pri_addr_path_mask = 0;
|
||||
u64 qp_mask = 0;
|
||||
int err = 0;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
|
||||
|
||||
if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_SMAC) {
|
||||
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
|
||||
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
|
||||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_VSD) {
|
||||
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
|
||||
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
|
||||
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
|
||||
}
|
||||
|
||||
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
|
||||
cmd->qp_mask = cpu_to_be64(qp_mask);
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
|
||||
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_update_qp);
|
||||
|
||||
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp_table->lock, flags);
|
||||
radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
|
||||
spin_unlock_irqrestore(&qp_table->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_remove);
|
||||
|
||||
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
||||
{
|
||||
if (atomic_dec_and_test(&qp->refcount))
|
||||
complete(&qp->free);
|
||||
wait_for_completion(&qp->free);
|
||||
|
||||
mlx4_qp_free_icm(dev, qp->qpn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_free);
|
||||
|
||||
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
|
||||
{
|
||||
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
int err;
|
||||
int reserved_from_top = 0;
|
||||
int k;
|
||||
|
||||
spin_lock_init(&qp_table->lock);
|
||||
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
|
||||
if (mlx4_is_slave(dev))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We reserve 2 extra QPs per port for the special QPs. The
|
||||
* block of special QPs must be aligned to a multiple of 8, so
|
||||
* round up.
|
||||
*
|
||||
* We also reserve the MSB of the 24-bit QP number to indicate
|
||||
* that a QP is an XRC QP.
|
||||
*/
|
||||
dev->phys_caps.base_sqpn =
|
||||
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
|
||||
|
||||
{
|
||||
int sort[MLX4_NUM_QP_REGION];
|
||||
int i, j, tmp;
|
||||
int last_base = dev->caps.num_qps;
|
||||
|
||||
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
|
||||
sort[i] = i;
|
||||
|
||||
for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
|
||||
for (j = 2; j < i; ++j) {
|
||||
if (dev->caps.reserved_qps_cnt[sort[j]] >
|
||||
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
|
||||
tmp = sort[j];
|
||||
sort[j] = sort[j - 1];
|
||||
sort[j - 1] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
|
||||
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
|
||||
dev->caps.reserved_qps_base[sort[i]] = last_base;
|
||||
reserved_from_top +=
|
||||
dev->caps.reserved_qps_cnt[sort[i]];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Reserve 8 real SQPs in both native and SRIOV modes.
|
||||
* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
|
||||
* (for all PFs and VFs), and 8 corresponding tunnel QPs.
|
||||
* Each proxy SQP works opposite its own tunnel QP.
|
||||
*
|
||||
* The QPs are arranged as follows:
|
||||
* a. 8 real SQPs
|
||||
* b. All the proxy SQPs (8 per function)
|
||||
* c. All the tunnel QPs (8 per function)
|
||||
*/
|
||||
|
||||
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
|
||||
(1 << 23) - 1, mlx4_num_reserved_sqps(dev),
|
||||
reserved_from_top);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
/* for PPF use */
|
||||
dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
|
||||
dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
|
||||
|
||||
/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
|
||||
* since the PF does not call mlx4_slave_caps */
|
||||
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
|
||||
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
|
||||
dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
|
||||
dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
|
||||
|
||||
if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
|
||||
!dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
|
||||
err = -ENOMEM;
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
for (k = 0; k < dev->caps.num_ports; k++) {
|
||||
dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
|
||||
8 * mlx4_master_func_num(dev) + k;
|
||||
dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
|
||||
dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
|
||||
8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
|
||||
dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
|
||||
if (err)
|
||||
goto err_mem;
|
||||
return 0;
|
||||
|
||||
err_mem:
|
||||
kfree(dev->caps.qp0_tunnel);
|
||||
kfree(dev->caps.qp0_proxy);
|
||||
kfree(dev->caps.qp1_tunnel);
|
||||
kfree(dev->caps.qp1_proxy);
|
||||
dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
|
||||
dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
|
||||
{
|
||||
if (mlx4_is_slave(dev))
|
||||
return;
|
||||
|
||||
mlx4_CONF_SPECIAL_QP(dev, 0);
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
|
||||
}
|
||||
|
||||
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
||||
struct mlx4_qp_context *context)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
|
||||
MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (!err)
|
||||
memcpy(context, mailbox->buf + 8, sizeof *context);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_query);
|
||||
|
||||
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
struct mlx4_qp_context *context,
|
||||
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
enum mlx4_qp_state states[] = {
|
||||
MLX4_QP_STATE_RST,
|
||||
MLX4_QP_STATE_INIT,
|
||||
MLX4_QP_STATE_RTR,
|
||||
MLX4_QP_STATE_RTS
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
|
||||
context->flags &= cpu_to_be32(~(0xf << 28));
|
||||
context->flags |= cpu_to_be32(states[i + 1] << 28);
|
||||
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
|
||||
context, 0, 0, qp);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
|
||||
states[i + 1], err);
|
||||
return err;
|
||||
}
|
||||
|
||||
*qp_state = states[i + 1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
|
||||
179
drivers/net/ethernet/mellanox/mlx4/reset.c
Normal file
179
drivers/net/ethernet/mellanox/mlx4/reset.c
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
int mlx4_reset(struct mlx4_dev *dev)
|
||||
{
|
||||
void __iomem *reset;
|
||||
u32 *hca_header = NULL;
|
||||
int pcie_cap;
|
||||
u16 devctl;
|
||||
u16 linkctl;
|
||||
u16 vendor;
|
||||
unsigned long end;
|
||||
u32 sem;
|
||||
int i;
|
||||
int err = 0;
|
||||
|
||||
#define MLX4_RESET_BASE 0xf0000
|
||||
#define MLX4_RESET_SIZE 0x400
|
||||
#define MLX4_SEM_OFFSET 0x3fc
|
||||
#define MLX4_RESET_OFFSET 0x10
|
||||
#define MLX4_RESET_VALUE swab32(1)
|
||||
|
||||
#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
|
||||
#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
|
||||
|
||||
/*
|
||||
* Reset the chip. This is somewhat ugly because we have to
|
||||
* save off the PCI header before reset and then restore it
|
||||
* after the chip reboots. We skip config space offsets 22
|
||||
* and 23 since those have a special meaning.
|
||||
*/
|
||||
|
||||
/* Do we need to save off the full 4K PCI Express header?? */
|
||||
hca_header = kmalloc(256, GFP_KERNEL);
|
||||
if (!hca_header) {
|
||||
err = -ENOMEM;
|
||||
mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pcie_cap = pci_pcie_cap(dev->pdev);
|
||||
|
||||
for (i = 0; i < 64; ++i) {
|
||||
if (i == 22 || i == 23)
|
||||
continue;
|
||||
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
|
||||
MLX4_RESET_SIZE);
|
||||
if (!reset) {
|
||||
err = -ENOMEM;
|
||||
mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* grab HW semaphore to lock out flash updates */
|
||||
end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
|
||||
do {
|
||||
sem = readl(reset + MLX4_SEM_OFFSET);
|
||||
if (!sem)
|
||||
break;
|
||||
|
||||
msleep(1);
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
if (sem) {
|
||||
mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
|
||||
err = -EAGAIN;
|
||||
iounmap(reset);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* actually hit reset */
|
||||
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
|
||||
iounmap(reset);
|
||||
|
||||
/* Docs say to wait one second before accessing device */
|
||||
msleep(1000);
|
||||
|
||||
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
|
||||
do {
|
||||
if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
|
||||
vendor != 0xffff)
|
||||
break;
|
||||
|
||||
msleep(1);
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
if (vendor == 0xffff) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Now restore the PCI headers */
|
||||
if (pcie_cap) {
|
||||
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
|
||||
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
|
||||
devctl)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
|
||||
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
|
||||
linkctl)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; ++i) {
|
||||
if (i * 4 == PCI_COMMAND)
|
||||
continue;
|
||||
|
||||
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
|
||||
i);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
|
||||
hca_header[PCI_COMMAND / 4])) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(hca_header);
|
||||
|
||||
return err;
|
||||
}
|
||||
4916
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
Normal file
4916
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
Normal file
File diff suppressed because it is too large
Load diff
143
drivers/net/ethernet/mellanox/mlx4/sense.c
Normal file
143
drivers/net/ethernet/mellanox/mlx4/sense.c
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
|
||||
enum mlx4_port_type *type)
|
||||
{
|
||||
u64 out_param;
|
||||
int err = 0;
|
||||
|
||||
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
|
||||
MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Sense command failed for port: %d\n", port);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (out_param > 2) {
|
||||
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type = out_param;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_do_sense_ports(struct mlx4_dev *dev,
|
||||
enum mlx4_port_type *stype,
|
||||
enum mlx4_port_type *defaults)
|
||||
{
|
||||
struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= dev->caps.num_ports; i++) {
|
||||
stype[i - 1] = 0;
|
||||
if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
|
||||
dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
|
||||
err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
|
||||
if (err)
|
||||
stype[i - 1] = defaults[i - 1];
|
||||
} else
|
||||
stype[i - 1] = defaults[i - 1];
|
||||
}
|
||||
|
||||
/*
|
||||
* If sensed nothing, remain in current configuration.
|
||||
*/
|
||||
for (i = 0; i < dev->caps.num_ports; i++)
|
||||
stype[i] = stype[i] ? stype[i] : defaults[i];
|
||||
|
||||
}
|
||||
|
||||
static void mlx4_sense_port(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *delay = to_delayed_work(work);
|
||||
struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
|
||||
sense_poll);
|
||||
struct mlx4_dev *dev = sense->dev;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
enum mlx4_port_type stype[MLX4_MAX_PORTS];
|
||||
|
||||
mutex_lock(&priv->port_mutex);
|
||||
mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
|
||||
|
||||
if (mlx4_check_port_params(dev, stype))
|
||||
goto sense_again;
|
||||
|
||||
if (mlx4_change_port_types(dev, stype))
|
||||
mlx4_err(dev, "Failed to change port_types\n");
|
||||
|
||||
sense_again:
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
queue_delayed_work(mlx4_wq , &sense->sense_poll,
|
||||
round_jiffies_relative(MLX4_SENSE_RANGE));
|
||||
}
|
||||
|
||||
void mlx4_start_sense(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_sense *sense = &priv->sense;
|
||||
|
||||
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
|
||||
return;
|
||||
|
||||
queue_delayed_work(mlx4_wq , &sense->sense_poll,
|
||||
round_jiffies_relative(MLX4_SENSE_RANGE));
|
||||
}
|
||||
|
||||
void mlx4_stop_sense(struct mlx4_dev *dev)
|
||||
{
|
||||
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
|
||||
}
|
||||
|
||||
void mlx4_sense_init(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_sense *sense = &priv->sense;
|
||||
int port;
|
||||
|
||||
sense->dev = dev;
|
||||
for (port = 1; port <= dev->caps.num_ports; port++)
|
||||
sense->do_sense_port[port] = 1;
|
||||
|
||||
INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
|
||||
}
|
||||
313
drivers/net/ethernet/mellanox/mlx4/srq.c
Normal file
313
drivers/net/ethernet/mellanox/mlx4/srq.c
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/mlx4/srq.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
struct mlx4_srq *srq;
|
||||
|
||||
spin_lock(&srq_table->lock);
|
||||
|
||||
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&srq_table->lock);
|
||||
|
||||
if (!srq) {
|
||||
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
|
||||
return;
|
||||
}
|
||||
|
||||
srq->event(srq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
}
|
||||
|
||||
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int srq_num)
|
||||
{
|
||||
return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
|
||||
MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int srq_num)
|
||||
{
|
||||
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
|
||||
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
|
||||
{
|
||||
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
int srq_num)
|
||||
{
|
||||
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
int err;
|
||||
|
||||
|
||||
*srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
|
||||
if (*srqn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_put;
|
||||
return 0;
|
||||
|
||||
err_put:
|
||||
mlx4_table_put(dev, &srq_table->table, *srqn);
|
||||
|
||||
err_out:
|
||||
mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
|
||||
{
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
|
||||
RES_OP_RESERVE_AND_MAP,
|
||||
MLX4_CMD_ALLOC_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (!err)
|
||||
*srqn = get_param_l(&out_param);
|
||||
|
||||
return err;
|
||||
}
|
||||
return __mlx4_srq_alloc_icm(dev, srqn);
|
||||
}
|
||||
|
||||
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
|
||||
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
|
||||
mlx4_table_put(dev, &srq_table->table, srqn);
|
||||
mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
|
||||
}
|
||||
|
||||
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
|
||||
{
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, srqn);
|
||||
if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
|
||||
MLX4_CMD_FREE_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
|
||||
mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
|
||||
return;
|
||||
}
|
||||
__mlx4_srq_free_icm(dev, srqn);
|
||||
}
|
||||
|
||||
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
|
||||
struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_srq_context *srq_context;
|
||||
u64 mtt_addr;
|
||||
int err;
|
||||
|
||||
err = mlx4_srq_alloc_icm(dev, &srq->srqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock_irq(&srq_table->lock);
|
||||
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
|
||||
spin_unlock_irq(&srq_table->lock);
|
||||
if (err)
|
||||
goto err_icm;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto err_radix;
|
||||
}
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
|
||||
srq->srqn);
|
||||
srq_context->logstride = srq->wqe_shift - 4;
|
||||
srq_context->xrcd = cpu_to_be16(xrcd);
|
||||
srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff);
|
||||
srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
||||
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
srq_context->mtt_base_addr_h = mtt_addr >> 32;
|
||||
srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
||||
srq_context->pd = cpu_to_be32(pdn);
|
||||
srq_context->db_rec_addr = cpu_to_be64(db_rec);
|
||||
|
||||
err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
if (err)
|
||||
goto err_radix;
|
||||
|
||||
atomic_set(&srq->refcount, 1);
|
||||
init_completion(&srq->free);
|
||||
|
||||
return 0;
|
||||
|
||||
err_radix:
|
||||
spin_lock_irq(&srq_table->lock);
|
||||
radix_tree_delete(&srq_table->tree, srq->srqn);
|
||||
spin_unlock_irq(&srq_table->lock);
|
||||
|
||||
err_icm:
|
||||
mlx4_srq_free_icm(dev, srq->srqn);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
|
||||
|
||||
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
int err;
|
||||
|
||||
err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
|
||||
if (err)
|
||||
mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
|
||||
|
||||
spin_lock_irq(&srq_table->lock);
|
||||
radix_tree_delete(&srq_table->tree, srq->srqn);
|
||||
spin_unlock_irq(&srq_table->lock);
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
wait_for_completion(&srq->free);
|
||||
|
||||
mlx4_srq_free_icm(dev, srq->srqn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_srq_free);
|
||||
|
||||
int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
|
||||
{
|
||||
return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_srq_arm);
|
||||
|
||||
int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_srq_context *srq_context;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
|
||||
err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
|
||||
if (err)
|
||||
goto err_out;
|
||||
*limit_watermark = be16_to_cpu(srq_context->limit_watermark);
|
||||
|
||||
err_out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_srq_query);
|
||||
|
||||
int mlx4_init_srq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
int err;
|
||||
|
||||
spin_lock_init(&srq_table->lock);
|
||||
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
|
||||
if (mlx4_is_slave(dev))
|
||||
return 0;
|
||||
|
||||
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
|
||||
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
if (mlx4_is_slave(dev))
|
||||
return;
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
|
||||
}
|
||||
|
||||
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
struct mlx4_srq *srq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&srq_table->lock, flags);
|
||||
srq = radix_tree_lookup(&srq_table->tree,
|
||||
srqn & (dev->caps.num_srqs - 1));
|
||||
spin_unlock_irqrestore(&srq_table->lock, flags);
|
||||
|
||||
return srq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
|
||||
Loading…
Add table
Add a link
Reference in a new issue