mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
5
drivers/infiniband/ulp/Makefile
Normal file
5
drivers/infiniband/ulp/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/
|
||||
obj-$(CONFIG_INFINIBAND_SRP) += srp/
|
||||
obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
|
||||
obj-$(CONFIG_INFINIBAND_ISER) += iser/
|
||||
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
|
49
drivers/infiniband/ulp/ipoib/Kconfig
Normal file
49
drivers/infiniband/ulp/ipoib/Kconfig
Normal file
|
@ -0,0 +1,49 @@
|
|||
config INFINIBAND_IPOIB
|
||||
tristate "IP-over-InfiniBand"
|
||||
depends on NETDEVICES && INET && (IPV6 || IPV6=n)
|
||||
---help---
|
||||
Support for the IP-over-InfiniBand protocol (IPoIB). This
|
||||
transports IP packets over InfiniBand so you can use your IB
|
||||
device as a fancy NIC.
|
||||
|
||||
See Documentation/infiniband/ipoib.txt for more information
|
||||
|
||||
config INFINIBAND_IPOIB_CM
|
||||
bool "IP-over-InfiniBand Connected Mode support"
|
||||
depends on INFINIBAND_IPOIB
|
||||
default n
|
||||
---help---
|
||||
This option enables support for IPoIB connected mode. After
|
||||
enabling this option, you need to switch to connected mode
|
||||
through /sys/class/net/ibXXX/mode to actually create
|
||||
connections, and then increase the interface MTU with
|
||||
e.g. ifconfig ib0 mtu 65520.
|
||||
|
||||
WARNING: Enabling connected mode will trigger some packet
|
||||
drops for multicast and UD mode traffic from this interface,
|
||||
unless you limit mtu for these destinations to 2044.
|
||||
|
||||
config INFINIBAND_IPOIB_DEBUG
|
||||
bool "IP-over-InfiniBand debugging" if EXPERT
|
||||
depends on INFINIBAND_IPOIB
|
||||
default y
|
||||
---help---
|
||||
This option causes debugging code to be compiled into the
|
||||
IPoIB driver. The output can be turned on via the
|
||||
debug_level and mcast_debug_level module parameters (which
|
||||
can also be set after the driver is loaded through sysfs).
|
||||
|
||||
This option also creates a directory tree under ipoib/ in
|
||||
debugfs, which contains files that expose debugging
|
||||
information about IB multicast groups used by the IPoIB
|
||||
driver.
|
||||
|
||||
config INFINIBAND_IPOIB_DEBUG_DATA
|
||||
bool "IP-over-InfiniBand data path debugging"
|
||||
depends on INFINIBAND_IPOIB_DEBUG
|
||||
---help---
|
||||
This option compiles debugging code into the data path
|
||||
of the IPoIB driver. The output can be turned on via the
|
||||
data_debug_level module parameter; however, even with output
|
||||
turned off, this debugging code will have some performance
|
||||
impact.
|
12
drivers/infiniband/ulp/ipoib/Makefile
Normal file
12
drivers/infiniband/ulp/ipoib/Makefile
Normal file
|
@ -0,0 +1,12 @@
|
|||
obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
|
||||
|
||||
ib_ipoib-y := ipoib_main.o \
|
||||
ipoib_ib.o \
|
||||
ipoib_multicast.o \
|
||||
ipoib_verbs.o \
|
||||
ipoib_vlan.o \
|
||||
ipoib_ethtool.o \
|
||||
ipoib_netlink.o
|
||||
ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
|
||||
ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
|
||||
|
773
drivers/infiniband/ulp/ipoib/ipoib.h
Normal file
773
drivers/infiniband/ulp/ipoib/ipoib.h
Normal file
|
@ -0,0 +1,773 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _IPOIB_H
|
||||
#define _IPOIB_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/if_infiniband.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <net/neighbour.h>
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_pack.h>
|
||||
#include <rdma/ib_sa.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* constants */
|
||||
|
||||
enum ipoib_flush_level {
|
||||
IPOIB_FLUSH_LIGHT,
|
||||
IPOIB_FLUSH_NORMAL,
|
||||
IPOIB_FLUSH_HEAVY
|
||||
};
|
||||
|
||||
enum {
|
||||
IPOIB_ENCAP_LEN = 4,
|
||||
|
||||
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
|
||||
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
|
||||
|
||||
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
|
||||
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
|
||||
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
|
||||
IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
|
||||
IPOIB_RX_RING_SIZE = 256,
|
||||
IPOIB_TX_RING_SIZE = 128,
|
||||
IPOIB_MAX_QUEUE_SIZE = 8192,
|
||||
IPOIB_MIN_QUEUE_SIZE = 2,
|
||||
IPOIB_CM_MAX_CONN_QP = 4096,
|
||||
|
||||
IPOIB_NUM_WC = 4,
|
||||
|
||||
IPOIB_MAX_PATH_REC_QUEUE = 3,
|
||||
IPOIB_MAX_MCAST_QUEUE = 3,
|
||||
|
||||
IPOIB_FLAG_OPER_UP = 0,
|
||||
IPOIB_FLAG_INITIALIZED = 1,
|
||||
IPOIB_FLAG_ADMIN_UP = 2,
|
||||
IPOIB_PKEY_ASSIGNED = 3,
|
||||
IPOIB_FLAG_SUBINTERFACE = 5,
|
||||
IPOIB_MCAST_RUN = 6,
|
||||
IPOIB_STOP_REAPER = 7,
|
||||
IPOIB_FLAG_ADMIN_CM = 9,
|
||||
IPOIB_FLAG_UMCAST = 10,
|
||||
IPOIB_STOP_NEIGH_GC = 11,
|
||||
IPOIB_NEIGH_TBL_FLUSH = 12,
|
||||
|
||||
IPOIB_MAX_BACKOFF_SECONDS = 16,
|
||||
|
||||
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
|
||||
IPOIB_MCAST_FLAG_SENDONLY = 1,
|
||||
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
|
||||
IPOIB_MCAST_FLAG_ATTACHED = 3,
|
||||
IPOIB_MCAST_JOIN_STARTED = 4,
|
||||
|
||||
MAX_SEND_CQE = 16,
|
||||
IPOIB_CM_COPYBREAK = 256,
|
||||
|
||||
IPOIB_NON_CHILD = 0,
|
||||
IPOIB_LEGACY_CHILD = 1,
|
||||
IPOIB_RTNL_CHILD = 2,
|
||||
};
|
||||
|
||||
#define IPOIB_OP_RECV (1ul << 31)
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
#define IPOIB_OP_CM (1ul << 30)
|
||||
#else
|
||||
#define IPOIB_OP_CM (0)
|
||||
#endif
|
||||
|
||||
#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
|
||||
|
||||
/* structs */
|
||||
|
||||
struct ipoib_header {
|
||||
__be16 proto;
|
||||
u16 reserved;
|
||||
};
|
||||
|
||||
struct ipoib_cb {
|
||||
struct qdisc_skb_cb qdisc_cb;
|
||||
u8 hwaddr[INFINIBAND_ALEN];
|
||||
};
|
||||
|
||||
static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
|
||||
return (struct ipoib_cb *)skb->cb;
|
||||
}
|
||||
|
||||
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
|
||||
struct ipoib_mcast {
|
||||
struct ib_sa_mcmember_rec mcmember;
|
||||
struct ib_sa_multicast *mc;
|
||||
struct ipoib_ah *ah;
|
||||
|
||||
struct rb_node rb_node;
|
||||
struct list_head list;
|
||||
|
||||
unsigned long created;
|
||||
unsigned long backoff;
|
||||
|
||||
unsigned long flags;
|
||||
unsigned char logcount;
|
||||
|
||||
struct list_head neigh_list;
|
||||
|
||||
struct sk_buff_head pkt_queue;
|
||||
|
||||
struct net_device *dev;
|
||||
struct completion done;
|
||||
};
|
||||
|
||||
struct ipoib_rx_buf {
|
||||
struct sk_buff *skb;
|
||||
u64 mapping[IPOIB_UD_RX_SG];
|
||||
};
|
||||
|
||||
struct ipoib_tx_buf {
|
||||
struct sk_buff *skb;
|
||||
u64 mapping[MAX_SKB_FRAGS + 1];
|
||||
};
|
||||
|
||||
struct ipoib_cm_tx_buf {
|
||||
struct sk_buff *skb;
|
||||
u64 mapping;
|
||||
};
|
||||
|
||||
struct ib_cm_id;
|
||||
|
||||
struct ipoib_cm_data {
|
||||
__be32 qpn; /* High byte MUST be ignored on receive */
|
||||
__be32 mtu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Quoting 10.3.1 Queue Pair and EE Context States:
|
||||
*
|
||||
* Note, for QPs that are associated with an SRQ, the Consumer should take the
|
||||
* QP through the Error State before invoking a Destroy QP or a Modify QP to the
|
||||
* Reset State. The Consumer may invoke the Destroy QP without first performing
|
||||
* a Modify QP to the Error State and waiting for the Affiliated Asynchronous
|
||||
* Last WQE Reached Event. However, if the Consumer does not wait for the
|
||||
* Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
|
||||
* leakage may occur. Therefore, it is good programming practice to tear down a
|
||||
* QP that is associated with an SRQ by using the following process:
|
||||
*
|
||||
* - Put the QP in the Error State
|
||||
* - Wait for the Affiliated Asynchronous Last WQE Reached Event;
|
||||
* - either:
|
||||
* drain the CQ by invoking the Poll CQ verb and either wait for CQ
|
||||
* to be empty or the number of Poll CQ operations has exceeded
|
||||
* CQ capacity size;
|
||||
* - or
|
||||
* post another WR that completes on the same CQ and wait for this
|
||||
* WR to return as a WC;
|
||||
* - and then invoke a Destroy QP or Reset QP.
|
||||
*
|
||||
* We use the second option and wait for a completion on the
|
||||
* same CQ before destroying QPs attached to our SRQ.
|
||||
*/
|
||||
|
||||
enum ipoib_cm_state {
|
||||
IPOIB_CM_RX_LIVE,
|
||||
IPOIB_CM_RX_ERROR, /* Ignored by stale task */
|
||||
IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */
|
||||
};
|
||||
|
||||
struct ipoib_cm_rx {
|
||||
struct ib_cm_id *id;
|
||||
struct ib_qp *qp;
|
||||
struct ipoib_cm_rx_buf *rx_ring;
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
unsigned long jiffies;
|
||||
enum ipoib_cm_state state;
|
||||
int recv_count;
|
||||
};
|
||||
|
||||
struct ipoib_cm_tx {
|
||||
struct ib_cm_id *id;
|
||||
struct ib_qp *qp;
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
struct ipoib_neigh *neigh;
|
||||
struct ipoib_path *path;
|
||||
struct ipoib_cm_tx_buf *tx_ring;
|
||||
unsigned tx_head;
|
||||
unsigned tx_tail;
|
||||
unsigned long flags;
|
||||
u32 mtu;
|
||||
};
|
||||
|
||||
struct ipoib_cm_rx_buf {
|
||||
struct sk_buff *skb;
|
||||
u64 mapping[IPOIB_CM_RX_SG];
|
||||
};
|
||||
|
||||
struct ipoib_cm_dev_priv {
|
||||
struct ib_srq *srq;
|
||||
struct ipoib_cm_rx_buf *srq_ring;
|
||||
struct ib_cm_id *id;
|
||||
struct list_head passive_ids; /* state: LIVE */
|
||||
struct list_head rx_error_list; /* state: ERROR */
|
||||
struct list_head rx_flush_list; /* state: FLUSH, drain not started */
|
||||
struct list_head rx_drain_list; /* state: FLUSH, drain started */
|
||||
struct list_head rx_reap_list; /* state: FLUSH, drain done */
|
||||
struct work_struct start_task;
|
||||
struct work_struct reap_task;
|
||||
struct work_struct skb_task;
|
||||
struct work_struct rx_reap_task;
|
||||
struct delayed_work stale_task;
|
||||
struct sk_buff_head skb_queue;
|
||||
struct list_head start_list;
|
||||
struct list_head reap_list;
|
||||
struct ib_wc ibwc[IPOIB_NUM_WC];
|
||||
struct ib_sge rx_sge[IPOIB_CM_RX_SG];
|
||||
struct ib_recv_wr rx_wr;
|
||||
int nonsrq_conn_qp;
|
||||
int max_cm_mtu;
|
||||
int num_frags;
|
||||
};
|
||||
|
||||
struct ipoib_ethtool_st {
|
||||
u16 coalesce_usecs;
|
||||
u16 max_coalesced_frames;
|
||||
};
|
||||
|
||||
struct ipoib_neigh_table;
|
||||
|
||||
struct ipoib_neigh_hash {
|
||||
struct ipoib_neigh_table *ntbl;
|
||||
struct ipoib_neigh __rcu **buckets;
|
||||
struct rcu_head rcu;
|
||||
u32 mask;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
struct ipoib_neigh_table {
|
||||
struct ipoib_neigh_hash __rcu *htbl;
|
||||
atomic_t entries;
|
||||
struct completion flushed;
|
||||
struct completion deleted;
|
||||
};
|
||||
|
||||
/*
|
||||
* Device private locking: network stack tx_lock protects members used
|
||||
* in TX fast path, lock protects everything else. lock nests inside
|
||||
* of tx_lock (ie tx_lock must be acquired first if needed).
|
||||
*/
|
||||
struct ipoib_dev_priv {
|
||||
spinlock_t lock;
|
||||
|
||||
struct net_device *dev;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
struct rw_semaphore vlan_rwsem;
|
||||
|
||||
struct rb_root path_tree;
|
||||
struct list_head path_list;
|
||||
|
||||
struct ipoib_neigh_table ntbl;
|
||||
|
||||
struct ipoib_mcast *broadcast;
|
||||
struct list_head multicast_list;
|
||||
struct rb_root multicast_tree;
|
||||
|
||||
struct delayed_work mcast_task;
|
||||
struct work_struct carrier_on_task;
|
||||
struct work_struct flush_light;
|
||||
struct work_struct flush_normal;
|
||||
struct work_struct flush_heavy;
|
||||
struct work_struct restart_task;
|
||||
struct delayed_work ah_reap_task;
|
||||
struct delayed_work neigh_reap_task;
|
||||
struct ib_device *ca;
|
||||
u8 port;
|
||||
u16 pkey;
|
||||
u16 pkey_index;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
struct ib_cq *recv_cq;
|
||||
struct ib_cq *send_cq;
|
||||
struct ib_qp *qp;
|
||||
u32 qkey;
|
||||
|
||||
union ib_gid local_gid;
|
||||
u16 local_lid;
|
||||
|
||||
unsigned int admin_mtu;
|
||||
unsigned int mcast_mtu;
|
||||
unsigned int max_ib_mtu;
|
||||
|
||||
struct ipoib_rx_buf *rx_ring;
|
||||
|
||||
struct ipoib_tx_buf *tx_ring;
|
||||
unsigned tx_head;
|
||||
unsigned tx_tail;
|
||||
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
|
||||
struct ib_send_wr tx_wr;
|
||||
unsigned tx_outstanding;
|
||||
struct ib_wc send_wc[MAX_SEND_CQE];
|
||||
|
||||
struct ib_recv_wr rx_wr;
|
||||
struct ib_sge rx_sge[IPOIB_UD_RX_SG];
|
||||
|
||||
struct ib_wc ibwc[IPOIB_NUM_WC];
|
||||
|
||||
struct list_head dead_ahs;
|
||||
|
||||
struct ib_event_handler event_handler;
|
||||
|
||||
struct net_device *parent;
|
||||
struct list_head child_intfs;
|
||||
struct list_head list;
|
||||
int child_type;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
struct ipoib_cm_dev_priv cm;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
struct list_head fs_list;
|
||||
struct dentry *mcg_dentry;
|
||||
struct dentry *path_dentry;
|
||||
#endif
|
||||
int hca_caps;
|
||||
struct ipoib_ethtool_st ethtool;
|
||||
struct timer_list poll_timer;
|
||||
};
|
||||
|
||||
struct ipoib_ah {
|
||||
struct net_device *dev;
|
||||
struct ib_ah *ah;
|
||||
struct list_head list;
|
||||
struct kref ref;
|
||||
unsigned last_send;
|
||||
};
|
||||
|
||||
struct ipoib_path {
|
||||
struct net_device *dev;
|
||||
struct ib_sa_path_rec pathrec;
|
||||
struct ipoib_ah *ah;
|
||||
struct sk_buff_head queue;
|
||||
|
||||
struct list_head neigh_list;
|
||||
|
||||
int query_id;
|
||||
struct ib_sa_query *query;
|
||||
struct completion done;
|
||||
|
||||
struct rb_node rb_node;
|
||||
struct list_head list;
|
||||
int valid;
|
||||
};
|
||||
|
||||
struct ipoib_neigh {
|
||||
struct ipoib_ah *ah;
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
struct ipoib_cm_tx *cm;
|
||||
#endif
|
||||
u8 daddr[INFINIBAND_ALEN];
|
||||
struct sk_buff_head queue;
|
||||
|
||||
struct net_device *dev;
|
||||
|
||||
struct list_head list;
|
||||
struct ipoib_neigh __rcu *hnext;
|
||||
struct rcu_head rcu;
|
||||
atomic_t refcnt;
|
||||
unsigned long alive;
|
||||
};
|
||||
|
||||
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
|
||||
#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
|
||||
|
||||
static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
|
||||
{
|
||||
return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
|
||||
}
|
||||
|
||||
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
|
||||
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh->refcnt))
|
||||
ipoib_neigh_dtor(neigh);
|
||||
}
|
||||
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
|
||||
struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
|
||||
struct net_device *dev);
|
||||
void ipoib_neigh_free(struct ipoib_neigh *neigh);
|
||||
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid);
|
||||
|
||||
extern struct workqueue_struct *ipoib_workqueue;
|
||||
|
||||
/* functions */
|
||||
|
||||
int ipoib_poll(struct napi_struct *napi, int budget);
|
||||
void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
|
||||
void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
|
||||
|
||||
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
|
||||
struct ib_pd *pd, struct ib_ah_attr *attr);
|
||||
void ipoib_free_ah(struct kref *kref);
|
||||
static inline void ipoib_put_ah(struct ipoib_ah *ah)
|
||||
{
|
||||
kref_put(&ah->ref, ipoib_free_ah);
|
||||
}
|
||||
int ipoib_open(struct net_device *dev);
|
||||
int ipoib_add_pkey_attr(struct net_device *dev);
|
||||
int ipoib_add_umcast_attr(struct net_device *dev);
|
||||
|
||||
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
struct ipoib_ah *address, u32 qpn);
|
||||
void ipoib_reap_ah(struct work_struct *work);
|
||||
|
||||
void ipoib_mark_paths_invalid(struct net_device *dev);
|
||||
void ipoib_flush_paths(struct net_device *dev);
|
||||
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
|
||||
|
||||
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
|
||||
void ipoib_ib_dev_flush_light(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_normal(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
|
||||
void ipoib_pkey_event(struct work_struct *work);
|
||||
void ipoib_ib_dev_cleanup(struct net_device *dev);
|
||||
|
||||
int ipoib_ib_dev_open(struct net_device *dev, int flush);
|
||||
int ipoib_ib_dev_up(struct net_device *dev);
|
||||
int ipoib_ib_dev_down(struct net_device *dev, int flush);
|
||||
int ipoib_ib_dev_stop(struct net_device *dev, int flush);
|
||||
void ipoib_pkey_dev_check_presence(struct net_device *dev);
|
||||
|
||||
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
|
||||
void ipoib_dev_cleanup(struct net_device *dev);
|
||||
|
||||
void ipoib_mcast_join_task(struct work_struct *work);
|
||||
void ipoib_mcast_carrier_on_task(struct work_struct *work);
|
||||
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
|
||||
|
||||
void ipoib_mcast_restart_task(struct work_struct *work);
|
||||
int ipoib_mcast_start_thread(struct net_device *dev);
|
||||
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
|
||||
|
||||
void ipoib_mcast_dev_down(struct net_device *dev);
|
||||
void ipoib_mcast_dev_flush(struct net_device *dev);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
|
||||
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
|
||||
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
|
||||
union ib_gid *gid,
|
||||
unsigned long *created,
|
||||
unsigned int *queuelen,
|
||||
unsigned int *complete,
|
||||
unsigned int *send_only);
|
||||
|
||||
struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev);
|
||||
int ipoib_path_iter_next(struct ipoib_path_iter *iter);
|
||||
void ipoib_path_iter_read(struct ipoib_path_iter *iter,
|
||||
struct ipoib_path *path);
|
||||
#endif
|
||||
|
||||
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
|
||||
union ib_gid *mgid, int set_qkey);
|
||||
|
||||
int ipoib_init_qp(struct net_device *dev);
|
||||
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
|
||||
void ipoib_transport_dev_cleanup(struct net_device *dev);
|
||||
|
||||
void ipoib_event(struct ib_event_handler *handler,
|
||||
struct ib_event *record);
|
||||
|
||||
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
|
||||
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
|
||||
|
||||
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
|
||||
u16 pkey, int child_type);
|
||||
|
||||
int __init ipoib_netlink_init(void);
|
||||
void __exit ipoib_netlink_fini(void);
|
||||
|
||||
void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
|
||||
int ipoib_set_mode(struct net_device *dev, const char *buf);
|
||||
|
||||
void ipoib_setup(struct net_device *dev);
|
||||
|
||||
void ipoib_pkey_open(struct ipoib_dev_priv *priv);
|
||||
void ipoib_drain_cq(struct net_device *dev);
|
||||
|
||||
void ipoib_set_ethtool_ops(struct net_device *dev);
|
||||
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
|
||||
|
||||
#define IPOIB_FLAGS_RC 0x80
|
||||
#define IPOIB_FLAGS_UC 0x40
|
||||
|
||||
/* We don't support UC connections at the moment */
|
||||
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
|
||||
extern int ipoib_max_conn_qp;
|
||||
|
||||
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
return IPOIB_CM_SUPPORTED(dev->dev_addr) &&
|
||||
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
}
|
||||
|
||||
static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
return IPOIB_CM_SUPPORTED(hwaddr) &&
|
||||
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
}
|
||||
|
||||
static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
|
||||
|
||||
{
|
||||
return test_bit(IPOIB_FLAG_OPER_UP, &neigh->cm->flags);
|
||||
}
|
||||
|
||||
static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
|
||||
{
|
||||
return neigh->cm;
|
||||
}
|
||||
|
||||
static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
|
||||
{
|
||||
neigh->cm = tx;
|
||||
}
|
||||
|
||||
static inline int ipoib_cm_has_srq(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
return !!priv->cm.srq;
|
||||
}
|
||||
|
||||
static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
return priv->cm.max_cm_mtu;
|
||||
}
|
||||
|
||||
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
|
||||
int ipoib_cm_dev_open(struct net_device *dev);
|
||||
void ipoib_cm_dev_stop(struct net_device *dev);
|
||||
int ipoib_cm_dev_init(struct net_device *dev);
|
||||
int ipoib_cm_add_mode_attr(struct net_device *dev);
|
||||
void ipoib_cm_dev_cleanup(struct net_device *dev);
|
||||
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
|
||||
struct ipoib_neigh *neigh);
|
||||
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
|
||||
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
|
||||
unsigned int mtu);
|
||||
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
|
||||
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
|
||||
#else
|
||||
|
||||
struct ipoib_cm_tx;
|
||||
|
||||
#define ipoib_max_conn_qp 0
|
||||
|
||||
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
|
||||
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
|
||||
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int ipoib_cm_has_srq(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline
|
||||
int ipoib_cm_dev_open(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void ipoib_cm_dev_stop(struct net_device *dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline
|
||||
int ipoib_cm_dev_init(struct net_device *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline
|
||||
void ipoib_cm_dev_cleanup(struct net_device *dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
|
||||
struct ipoib_neigh *neigh)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline
|
||||
int ipoib_cm_add_mode_attr(struct net_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
|
||||
unsigned int mtu)
|
||||
{
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
void ipoib_create_debug_files(struct net_device *dev);
|
||||
void ipoib_delete_debug_files(struct net_device *dev);
|
||||
int ipoib_register_debugfs(void);
|
||||
void ipoib_unregister_debugfs(void);
|
||||
#else
|
||||
static inline void ipoib_create_debug_files(struct net_device *dev) { }
|
||||
static inline void ipoib_delete_debug_files(struct net_device *dev) { }
|
||||
static inline int ipoib_register_debugfs(void) { return 0; }
|
||||
static inline void ipoib_unregister_debugfs(void) { }
|
||||
#endif
|
||||
|
||||
#define ipoib_printk(level, priv, format, arg...) \
|
||||
printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
|
||||
#define ipoib_warn(priv, format, arg...) \
|
||||
ipoib_printk(KERN_WARNING, priv, format , ## arg)
|
||||
|
||||
extern int ipoib_sendq_size;
|
||||
extern int ipoib_recvq_size;
|
||||
|
||||
extern struct ib_sa_client ipoib_sa_client;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
extern int ipoib_debug_level;
|
||||
|
||||
#define ipoib_dbg(priv, format, arg...) \
|
||||
do { \
|
||||
if (ipoib_debug_level > 0) \
|
||||
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
|
||||
} while (0)
|
||||
#define ipoib_dbg_mcast(priv, format, arg...) \
|
||||
do { \
|
||||
if (mcast_debug_level > 0) \
|
||||
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
|
||||
} while (0)
|
||||
#else /* CONFIG_INFINIBAND_IPOIB_DEBUG */
|
||||
#define ipoib_dbg(priv, format, arg...) \
|
||||
do { (void) (priv); } while (0)
|
||||
#define ipoib_dbg_mcast(priv, format, arg...) \
|
||||
do { (void) (priv); } while (0)
|
||||
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
|
||||
#define ipoib_dbg_data(priv, format, arg...) \
|
||||
do { \
|
||||
if (data_debug_level > 0) \
|
||||
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
|
||||
} while (0)
|
||||
#else /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
|
||||
#define ipoib_dbg_data(priv, format, arg...) \
|
||||
do { (void) (priv); } while (0)
|
||||
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
|
||||
|
||||
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
|
||||
|
||||
extern const char ipoib_driver_version[];
|
||||
|
||||
#endif /* _IPOIB_H */
|
1615
drivers/infiniband/ulp/ipoib/ipoib_cm.c
Normal file
1615
drivers/infiniband/ulp/ipoib/ipoib_cm.c
Normal file
File diff suppressed because it is too large
Load diff
109
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
Normal file
109
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include "ipoib.h"
|
||||
|
||||
static void ipoib_get_drvinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(netdev);
|
||||
struct ib_device_attr *attr;
|
||||
|
||||
attr = kmalloc(sizeof(*attr), GFP_KERNEL);
|
||||
if (attr && !ib_query_device(priv->ca, attr))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%d", (int)(attr->fw_ver >> 32),
|
||||
(int)(attr->fw_ver >> 16) & 0xffff,
|
||||
(int)attr->fw_ver & 0xffff);
|
||||
kfree(attr);
|
||||
|
||||
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
|
||||
sizeof(drvinfo->bus_info));
|
||||
|
||||
strlcpy(drvinfo->version, ipoib_driver_version,
|
||||
sizeof(drvinfo->version));
|
||||
|
||||
strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
|
||||
}
|
||||
|
||||
static int ipoib_get_coalesce(struct net_device *dev,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
|
||||
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipoib_set_coalesce(struct net_device *dev,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* These values are saved in the private data and returned
|
||||
* when ipoib_get_coalesce() is called
|
||||
*/
|
||||
if (coal->rx_coalesce_usecs > 0xffff ||
|
||||
coal->rx_max_coalesced_frames > 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
|
||||
coal->rx_coalesce_usecs);
|
||||
if (ret && ret != -ENOSYS) {
|
||||
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
|
||||
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops ipoib_ethtool_ops = {
|
||||
.get_drvinfo = ipoib_get_drvinfo,
|
||||
.get_coalesce = ipoib_get_coalesce,
|
||||
.set_coalesce = ipoib_set_coalesce,
|
||||
};
|
||||
|
||||
void ipoib_set_ethtool_ops(struct net_device *dev)
|
||||
{
|
||||
dev->ethtool_ops = &ipoib_ethtool_ops;
|
||||
}
|
297
drivers/infiniband/ulp/ipoib/ipoib_fs.c
Normal file
297
drivers/infiniband/ulp/ipoib/ipoib_fs.c
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct file_operations;
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "ipoib.h"
|
||||
|
||||
static struct dentry *ipoib_root;
|
||||
|
||||
static void format_gid(union ib_gid *gid, char *buf)
|
||||
{
|
||||
int i, n;
|
||||
|
||||
for (n = 0, i = 0; i < 8; ++i) {
|
||||
n += sprintf(buf + n, "%x",
|
||||
be16_to_cpu(((__be16 *) gid->raw)[i]));
|
||||
if (i < 7)
|
||||
buf[n++] = ':';
|
||||
}
|
||||
}
|
||||
|
||||
static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
|
||||
{
|
||||
struct ipoib_mcast_iter *iter;
|
||||
loff_t n = *pos;
|
||||
|
||||
iter = ipoib_mcast_iter_init(file->private);
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
while (n--) {
|
||||
if (ipoib_mcast_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
|
||||
loff_t *pos)
|
||||
{
|
||||
struct ipoib_mcast_iter *iter = iter_ptr;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if (ipoib_mcast_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
/* nothing for now */
|
||||
}
|
||||
|
||||
static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
struct ipoib_mcast_iter *iter = iter_ptr;
|
||||
char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
|
||||
union ib_gid mgid;
|
||||
unsigned long created;
|
||||
unsigned int queuelen, complete, send_only;
|
||||
|
||||
if (!iter)
|
||||
return 0;
|
||||
|
||||
ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
|
||||
&complete, &send_only);
|
||||
|
||||
format_gid(&mgid, gid_buf);
|
||||
|
||||
seq_printf(file,
|
||||
"GID: %s\n"
|
||||
" created: %10ld\n"
|
||||
" queuelen: %9d\n"
|
||||
" complete: %9s\n"
|
||||
" send_only: %8s\n"
|
||||
"\n",
|
||||
gid_buf, created, queuelen,
|
||||
complete ? "yes" : "no",
|
||||
send_only ? "yes" : "no");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations ipoib_mcg_seq_ops = {
|
||||
.start = ipoib_mcg_seq_start,
|
||||
.next = ipoib_mcg_seq_next,
|
||||
.stop = ipoib_mcg_seq_stop,
|
||||
.show = ipoib_mcg_seq_show,
|
||||
};
|
||||
|
||||
static int ipoib_mcg_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq;
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &ipoib_mcg_seq_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = inode->i_private;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations ipoib_mcg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ipoib_mcg_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
|
||||
{
|
||||
struct ipoib_path_iter *iter;
|
||||
loff_t n = *pos;
|
||||
|
||||
iter = ipoib_path_iter_init(file->private);
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
while (n--) {
|
||||
if (ipoib_path_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
|
||||
loff_t *pos)
|
||||
{
|
||||
struct ipoib_path_iter *iter = iter_ptr;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if (ipoib_path_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
/* nothing for now */
|
||||
}
|
||||
|
||||
static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
struct ipoib_path_iter *iter = iter_ptr;
|
||||
char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
|
||||
struct ipoib_path path;
|
||||
int rate;
|
||||
|
||||
if (!iter)
|
||||
return 0;
|
||||
|
||||
ipoib_path_iter_read(iter, &path);
|
||||
|
||||
format_gid(&path.pathrec.dgid, gid_buf);
|
||||
|
||||
seq_printf(file,
|
||||
"GID: %s\n"
|
||||
" complete: %6s\n",
|
||||
gid_buf, path.pathrec.dlid ? "yes" : "no");
|
||||
|
||||
if (path.pathrec.dlid) {
|
||||
rate = ib_rate_to_mbps(path.pathrec.rate);
|
||||
|
||||
seq_printf(file,
|
||||
" DLID: 0x%04x\n"
|
||||
" SL: %12d\n"
|
||||
" rate: %8d.%d Gb/sec\n",
|
||||
be16_to_cpu(path.pathrec.dlid),
|
||||
path.pathrec.sl,
|
||||
rate / 1000, rate % 1000);
|
||||
}
|
||||
|
||||
seq_putc(file, '\n');
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations ipoib_path_seq_ops = {
|
||||
.start = ipoib_path_seq_start,
|
||||
.next = ipoib_path_seq_next,
|
||||
.stop = ipoib_path_seq_stop,
|
||||
.show = ipoib_path_seq_show,
|
||||
};
|
||||
|
||||
static int ipoib_path_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq;
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &ipoib_path_seq_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = inode->i_private;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations ipoib_path_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ipoib_path_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
void ipoib_create_debug_files(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
char name[IFNAMSIZ + sizeof "_path"];
|
||||
|
||||
snprintf(name, sizeof name, "%s_mcg", dev->name);
|
||||
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
|
||||
ipoib_root, dev, &ipoib_mcg_fops);
|
||||
if (!priv->mcg_dentry)
|
||||
ipoib_warn(priv, "failed to create mcg debug file\n");
|
||||
|
||||
snprintf(name, sizeof name, "%s_path", dev->name);
|
||||
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
|
||||
ipoib_root, dev, &ipoib_path_fops);
|
||||
if (!priv->path_dentry)
|
||||
ipoib_warn(priv, "failed to create path debug file\n");
|
||||
}
|
||||
|
||||
void ipoib_delete_debug_files(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
debugfs_remove(priv->mcg_dentry);
|
||||
debugfs_remove(priv->path_dentry);
|
||||
}
|
||||
|
||||
int ipoib_register_debugfs(void)
|
||||
{
|
||||
ipoib_root = debugfs_create_dir("ipoib", NULL);
|
||||
return ipoib_root ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void ipoib_unregister_debugfs(void)
|
||||
{
|
||||
debugfs_remove(ipoib_root);
|
||||
}
|
1106
drivers/infiniband/ulp/ipoib/ipoib_ib.c
Normal file
1106
drivers/infiniband/ulp/ipoib/ipoib_ib.c
Normal file
File diff suppressed because it is too large
Load diff
1793
drivers/infiniband/ulp/ipoib/ipoib_main.c
Normal file
1793
drivers/infiniband/ulp/ipoib/ipoib_main.c
Normal file
File diff suppressed because it is too large
Load diff
963
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
Normal file
963
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
Normal file
|
@ -0,0 +1,963 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/igmp.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <net/dst.h>
|
||||
|
||||
#include "ipoib.h"
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
static int mcast_debug_level;
|
||||
|
||||
module_param(mcast_debug_level, int, 0644);
|
||||
MODULE_PARM_DESC(mcast_debug_level,
|
||||
"Enable multicast debug tracing if > 0");
|
||||
#endif
|
||||
|
||||
static DEFINE_MUTEX(mcast_mutex);
|
||||
|
||||
struct ipoib_mcast_iter {
|
||||
struct net_device *dev;
|
||||
union ib_gid mgid;
|
||||
unsigned long created;
|
||||
unsigned int queuelen;
|
||||
unsigned int complete;
|
||||
unsigned int send_only;
|
||||
};
|
||||
|
||||
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct net_device *dev = mcast->dev;
|
||||
int tx_dropped = 0;
|
||||
|
||||
ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
|
||||
/* remove all neigh connected to this mcast */
|
||||
ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
|
||||
|
||||
if (mcast->ah)
|
||||
ipoib_put_ah(mcast->ah);
|
||||
|
||||
while (!skb_queue_empty(&mcast->pkt_queue)) {
|
||||
++tx_dropped;
|
||||
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
|
||||
}
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
dev->stats.tx_dropped += tx_dropped;
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
kfree(mcast);
|
||||
}
|
||||
|
||||
static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
|
||||
int can_sleep)
|
||||
{
|
||||
struct ipoib_mcast *mcast;
|
||||
|
||||
mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!mcast)
|
||||
return NULL;
|
||||
|
||||
mcast->dev = dev;
|
||||
mcast->created = jiffies;
|
||||
mcast->backoff = 1;
|
||||
|
||||
INIT_LIST_HEAD(&mcast->list);
|
||||
INIT_LIST_HEAD(&mcast->neigh_list);
|
||||
skb_queue_head_init(&mcast->pkt_queue);
|
||||
|
||||
return mcast;
|
||||
}
|
||||
|
||||
static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct rb_node *n = priv->multicast_tree.rb_node;
|
||||
|
||||
while (n) {
|
||||
struct ipoib_mcast *mcast;
|
||||
int ret;
|
||||
|
||||
mcast = rb_entry(n, struct ipoib_mcast, rb_node);
|
||||
|
||||
ret = memcmp(mgid, mcast->mcmember.mgid.raw,
|
||||
sizeof (union ib_gid));
|
||||
if (ret < 0)
|
||||
n = n->rb_left;
|
||||
else if (ret > 0)
|
||||
n = n->rb_right;
|
||||
else
|
||||
return mcast;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
|
||||
|
||||
while (*n) {
|
||||
struct ipoib_mcast *tmcast;
|
||||
int ret;
|
||||
|
||||
pn = *n;
|
||||
tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
|
||||
|
||||
ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
|
||||
sizeof (union ib_gid));
|
||||
if (ret < 0)
|
||||
n = &pn->rb_left;
|
||||
else if (ret > 0)
|
||||
n = &pn->rb_right;
|
||||
else
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
rb_link_node(&mcast->rb_node, pn, n);
|
||||
rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
||||
struct ib_sa_mcmember_rec *mcmember)
|
||||
{
|
||||
struct net_device *dev = mcast->dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_ah *ah;
|
||||
int ret;
|
||||
int set_qkey = 0;
|
||||
|
||||
mcast->mcmember = *mcmember;
|
||||
|
||||
/* Set the multicast MTU and cached Q_Key before we attach if it's
|
||||
* the broadcast group.
|
||||
*/
|
||||
if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
|
||||
sizeof (union ib_gid))) {
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (!priv->broadcast) {
|
||||
spin_unlock_irq(&priv->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
|
||||
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
|
||||
set_qkey = 1;
|
||||
|
||||
if (!ipoib_cm_admin_enabled(dev)) {
|
||||
rtnl_lock();
|
||||
dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
|
||||
rtnl_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
|
||||
if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
|
||||
ipoib_warn(priv, "multicast group %pI6 already attached\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
|
||||
&mcast->mcmember.mgid, set_qkey);
|
||||
if (ret < 0) {
|
||||
ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
|
||||
clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
struct ib_ah_attr av = {
|
||||
.dlid = be16_to_cpu(mcast->mcmember.mlid),
|
||||
.port_num = priv->port,
|
||||
.sl = mcast->mcmember.sl,
|
||||
.ah_flags = IB_AH_GRH,
|
||||
.static_rate = mcast->mcmember.rate,
|
||||
.grh = {
|
||||
.flow_label = be32_to_cpu(mcast->mcmember.flow_label),
|
||||
.hop_limit = mcast->mcmember.hop_limit,
|
||||
.sgid_index = 0,
|
||||
.traffic_class = mcast->mcmember.traffic_class
|
||||
}
|
||||
};
|
||||
av.grh.dgid = mcast->mcmember.mgid;
|
||||
|
||||
ah = ipoib_create_ah(dev, priv->pd, &av);
|
||||
if (IS_ERR(ah)) {
|
||||
ipoib_warn(priv, "ib_address_create failed %ld\n",
|
||||
-PTR_ERR(ah));
|
||||
/* use original error */
|
||||
return PTR_ERR(ah);
|
||||
} else {
|
||||
spin_lock_irq(&priv->lock);
|
||||
mcast->ah = ah;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
|
||||
mcast->mcmember.mgid.raw,
|
||||
mcast->ah->ah,
|
||||
be16_to_cpu(mcast->mcmember.mlid),
|
||||
mcast->mcmember.sl);
|
||||
}
|
||||
}
|
||||
|
||||
/* actually send any queued packets */
|
||||
netif_tx_lock_bh(dev);
|
||||
while (!skb_queue_empty(&mcast->pkt_queue)) {
|
||||
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
|
||||
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
skb->dev = dev;
|
||||
if (dev_queue_xmit(skb))
|
||||
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
}
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ipoib_mcast_sendonly_join_complete(int status,
|
||||
struct ib_sa_multicast *multicast)
|
||||
{
|
||||
struct ipoib_mcast *mcast = multicast->context;
|
||||
struct net_device *dev = mcast->dev;
|
||||
|
||||
/* We trap for port events ourselves. */
|
||||
if (status == -ENETRESET)
|
||||
return 0;
|
||||
|
||||
if (!status)
|
||||
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
|
||||
|
||||
if (status) {
|
||||
if (mcast->logcount++ < 20)
|
||||
ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
|
||||
mcast->mcmember.mgid.raw, status);
|
||||
|
||||
/* Flush out any queued packets */
|
||||
netif_tx_lock_bh(dev);
|
||||
while (!skb_queue_empty(&mcast->pkt_queue)) {
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
|
||||
}
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
/* Clear the busy flag so we try again */
|
||||
status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
|
||||
&mcast->flags);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct net_device *dev = mcast->dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ib_sa_mcmember_rec rec = {
|
||||
#if 0 /* Some SMs don't support send-only yet */
|
||||
.join_state = 4
|
||||
#else
|
||||
.join_state = 1
|
||||
#endif
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
|
||||
ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
|
||||
ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rec.mgid = mcast->mcmember.mgid;
|
||||
rec.port_gid = priv->local_gid;
|
||||
rec.pkey = cpu_to_be16(priv->pkey);
|
||||
|
||||
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
|
||||
priv->port, &rec,
|
||||
IB_SA_MCMEMBER_REC_MGID |
|
||||
IB_SA_MCMEMBER_REC_PORT_GID |
|
||||
IB_SA_MCMEMBER_REC_PKEY |
|
||||
IB_SA_MCMEMBER_REC_JOIN_STATE,
|
||||
GFP_ATOMIC,
|
||||
ipoib_mcast_sendonly_join_complete,
|
||||
mcast);
|
||||
if (IS_ERR(mcast->mc)) {
|
||||
ret = PTR_ERR(mcast->mc);
|
||||
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
|
||||
ret);
|
||||
} else {
|
||||
ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipoib_mcast_carrier_on_task(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
|
||||
carrier_on_task);
|
||||
struct ib_port_attr attr;
|
||||
|
||||
/*
|
||||
* Take rtnl_lock to avoid racing with ipoib_stop() and
|
||||
* turning the carrier back on while a device is being
|
||||
* removed.
|
||||
*/
|
||||
if (ib_query_port(priv->ca, priv->port, &attr) ||
|
||||
attr.state != IB_PORT_ACTIVE) {
|
||||
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
|
||||
return;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
netif_carrier_on(priv->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int ipoib_mcast_join_complete(int status,
|
||||
struct ib_sa_multicast *multicast)
|
||||
{
|
||||
struct ipoib_mcast *mcast = multicast->context;
|
||||
struct net_device *dev = mcast->dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
|
||||
mcast->mcmember.mgid.raw, status);
|
||||
|
||||
/* We trap for port events ourselves. */
|
||||
if (status == -ENETRESET) {
|
||||
status = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!status)
|
||||
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
|
||||
|
||||
if (!status) {
|
||||
mcast->backoff = 1;
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->mcast_task, 0);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
|
||||
/*
|
||||
* Defer carrier on work to ipoib_workqueue to avoid a
|
||||
* deadlock on rtnl_lock here.
|
||||
*/
|
||||
if (mcast == priv->broadcast)
|
||||
queue_work(ipoib_workqueue, &priv->carrier_on_task);
|
||||
|
||||
status = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mcast->logcount++ < 20) {
|
||||
if (status == -ETIMEDOUT || status == -EAGAIN) {
|
||||
ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
|
||||
mcast->mcmember.mgid.raw, status);
|
||||
} else {
|
||||
ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
|
||||
mcast->mcmember.mgid.raw, status);
|
||||
}
|
||||
}
|
||||
|
||||
mcast->backoff *= 2;
|
||||
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
|
||||
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
|
||||
|
||||
/* Clear the busy flag so we try again */
|
||||
status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
|
||||
mutex_lock(&mcast_mutex);
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
|
||||
mcast->backoff * HZ);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
out:
|
||||
complete(&mcast->done);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
|
||||
int create)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ib_sa_mcmember_rec rec = {
|
||||
.join_state = 1
|
||||
};
|
||||
ib_sa_comp_mask comp_mask;
|
||||
int ret = 0;
|
||||
|
||||
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
|
||||
|
||||
rec.mgid = mcast->mcmember.mgid;
|
||||
rec.port_gid = priv->local_gid;
|
||||
rec.pkey = cpu_to_be16(priv->pkey);
|
||||
|
||||
comp_mask =
|
||||
IB_SA_MCMEMBER_REC_MGID |
|
||||
IB_SA_MCMEMBER_REC_PORT_GID |
|
||||
IB_SA_MCMEMBER_REC_PKEY |
|
||||
IB_SA_MCMEMBER_REC_JOIN_STATE;
|
||||
|
||||
if (create) {
|
||||
comp_mask |=
|
||||
IB_SA_MCMEMBER_REC_QKEY |
|
||||
IB_SA_MCMEMBER_REC_MTU_SELECTOR |
|
||||
IB_SA_MCMEMBER_REC_MTU |
|
||||
IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
|
||||
IB_SA_MCMEMBER_REC_RATE_SELECTOR |
|
||||
IB_SA_MCMEMBER_REC_RATE |
|
||||
IB_SA_MCMEMBER_REC_SL |
|
||||
IB_SA_MCMEMBER_REC_FLOW_LABEL |
|
||||
IB_SA_MCMEMBER_REC_HOP_LIMIT;
|
||||
|
||||
rec.qkey = priv->broadcast->mcmember.qkey;
|
||||
rec.mtu_selector = IB_SA_EQ;
|
||||
rec.mtu = priv->broadcast->mcmember.mtu;
|
||||
rec.traffic_class = priv->broadcast->mcmember.traffic_class;
|
||||
rec.rate_selector = IB_SA_EQ;
|
||||
rec.rate = priv->broadcast->mcmember.rate;
|
||||
rec.sl = priv->broadcast->mcmember.sl;
|
||||
rec.flow_label = priv->broadcast->mcmember.flow_label;
|
||||
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
|
||||
}
|
||||
|
||||
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
init_completion(&mcast->done);
|
||||
set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
|
||||
|
||||
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
|
||||
&rec, comp_mask, GFP_KERNEL,
|
||||
ipoib_mcast_join_complete, mcast);
|
||||
if (IS_ERR(mcast->mc)) {
|
||||
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
complete(&mcast->done);
|
||||
ret = PTR_ERR(mcast->mc);
|
||||
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
|
||||
|
||||
mcast->backoff *= 2;
|
||||
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
|
||||
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
|
||||
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->mcast_task,
|
||||
mcast->backoff * HZ);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void ipoib_mcast_join_task(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, mcast_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
struct ib_port_attr port_attr;
|
||||
|
||||
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
return;
|
||||
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
|
||||
port_attr.state != IB_PORT_ACTIVE) {
|
||||
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
|
||||
port_attr.state);
|
||||
return;
|
||||
}
|
||||
priv->local_lid = port_attr.lid;
|
||||
|
||||
if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
|
||||
ipoib_warn(priv, "ib_query_gid() failed\n");
|
||||
else
|
||||
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
|
||||
|
||||
if (!priv->broadcast) {
|
||||
struct ipoib_mcast *broadcast;
|
||||
|
||||
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
||||
return;
|
||||
|
||||
broadcast = ipoib_mcast_alloc(dev, 1);
|
||||
if (!broadcast) {
|
||||
ipoib_warn(priv, "failed to allocate broadcast group\n");
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->mcast_task, HZ);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
|
||||
sizeof (union ib_gid));
|
||||
priv->broadcast = broadcast;
|
||||
|
||||
__ipoib_mcast_add(dev, priv->broadcast);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
|
||||
if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
|
||||
ipoib_mcast_join(dev, priv->broadcast, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
struct ipoib_mcast *mcast = NULL;
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
list_for_each_entry(mcast, &priv->multicast_list, list) {
|
||||
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
|
||||
&& !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
|
||||
&& !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
|
||||
/* Found the next unjoined group */
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
if (&mcast->list == &priv->multicast_list) {
|
||||
/* All done */
|
||||
break;
|
||||
}
|
||||
|
||||
ipoib_mcast_join(dev, mcast, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
|
||||
|
||||
clear_bit(IPOIB_MCAST_RUN, &priv->flags);
|
||||
}
|
||||
|
||||
int ipoib_mcast_start_thread(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
ipoib_dbg_mcast(priv, "starting multicast thread\n");
|
||||
|
||||
mutex_lock(&mcast_mutex);
|
||||
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
|
||||
|
||||
mutex_lock(&mcast_mutex);
|
||||
clear_bit(IPOIB_MCAST_RUN, &priv->flags);
|
||||
cancel_delayed_work(&priv->mcast_task);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
|
||||
if (flush)
|
||||
flush_workqueue(ipoib_workqueue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
|
||||
ib_sa_free_multicast(mcast->mc);
|
||||
|
||||
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
|
||||
ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
|
||||
/* Remove ourselves from the multicast group */
|
||||
ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
|
||||
be16_to_cpu(mcast->mcmember.mlid));
|
||||
if (ret)
|
||||
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_mcast *mcast;
|
||||
unsigned long flags;
|
||||
void *mgid = daddr + 4;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
|
||||
!priv->broadcast ||
|
||||
!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mcast = __ipoib_mcast_find(dev, mgid);
|
||||
if (!mcast) {
|
||||
/* Let's create a new send only group now */
|
||||
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
|
||||
mgid);
|
||||
|
||||
mcast = ipoib_mcast_alloc(dev, 0);
|
||||
if (!mcast) {
|
||||
ipoib_warn(priv, "unable to allocate memory for "
|
||||
"multicast structure\n");
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
|
||||
memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
|
||||
__ipoib_mcast_add(dev, mcast);
|
||||
list_add_tail(&mcast->list, &priv->multicast_list);
|
||||
}
|
||||
|
||||
if (!mcast->ah) {
|
||||
if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
|
||||
skb_queue_tail(&mcast->pkt_queue, skb);
|
||||
else {
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
|
||||
ipoib_dbg_mcast(priv, "no address vector, "
|
||||
"but multicast join already started\n");
|
||||
else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
|
||||
ipoib_mcast_sendonly_join(mcast);
|
||||
|
||||
/*
|
||||
* If lookup completes between here and out:, don't
|
||||
* want to send packet twice.
|
||||
*/
|
||||
mcast = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (mcast && mcast->ah) {
|
||||
struct ipoib_neigh *neigh;
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
neigh = ipoib_neigh_get(dev, daddr);
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!neigh) {
|
||||
neigh = ipoib_neigh_alloc(daddr, dev);
|
||||
if (neigh) {
|
||||
kref_get(&mcast->ah->ref);
|
||||
neigh->ah = mcast->ah;
|
||||
list_add_tail(&neigh->list, &mcast->neigh_list);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
|
||||
if (neigh)
|
||||
ipoib_neigh_put(neigh);
|
||||
return;
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
void ipoib_mcast_dev_flush(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
LIST_HEAD(remove_list);
|
||||
struct ipoib_mcast *mcast, *tmcast;
|
||||
unsigned long flags;
|
||||
|
||||
ipoib_dbg_mcast(priv, "flushing multicast list\n");
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
|
||||
list_del(&mcast->list);
|
||||
rb_erase(&mcast->rb_node, &priv->multicast_tree);
|
||||
list_add_tail(&mcast->list, &remove_list);
|
||||
}
|
||||
|
||||
if (priv->broadcast) {
|
||||
rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
|
||||
list_add_tail(&priv->broadcast->list, &remove_list);
|
||||
priv->broadcast = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* seperate between the wait to the leave*/
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
|
||||
if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
|
||||
wait_for_completion(&mcast->done);
|
||||
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
||||
ipoib_mcast_leave(dev, mcast);
|
||||
ipoib_mcast_free(mcast);
|
||||
}
|
||||
}
|
||||
|
||||
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
|
||||
{
|
||||
/* reserved QPN, prefix, scope */
|
||||
if (memcmp(addr, broadcast, 6))
|
||||
return 0;
|
||||
/* signature lower, pkey */
|
||||
if (memcmp(addr + 7, broadcast + 7, 3))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void ipoib_mcast_restart_task(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, restart_task);
|
||||
struct net_device *dev = priv->dev;
|
||||
struct netdev_hw_addr *ha;
|
||||
struct ipoib_mcast *mcast, *tmcast;
|
||||
LIST_HEAD(remove_list);
|
||||
unsigned long flags;
|
||||
struct ib_sa_mcmember_rec rec;
|
||||
|
||||
ipoib_dbg_mcast(priv, "restarting multicast task\n");
|
||||
|
||||
ipoib_mcast_stop_thread(dev, 0);
|
||||
|
||||
local_irq_save(flags);
|
||||
netif_addr_lock(dev);
|
||||
spin_lock(&priv->lock);
|
||||
|
||||
/*
|
||||
* Unfortunately, the networking core only gives us a list of all of
|
||||
* the multicast hardware addresses. We need to figure out which ones
|
||||
* are new and which ones have been removed
|
||||
*/
|
||||
|
||||
/* Clear out the found flag */
|
||||
list_for_each_entry(mcast, &priv->multicast_list, list)
|
||||
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
|
||||
|
||||
/* Mark all of the entries that are found or don't exist */
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
union ib_gid mgid;
|
||||
|
||||
if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
|
||||
continue;
|
||||
|
||||
memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
|
||||
|
||||
mcast = __ipoib_mcast_find(dev, &mgid);
|
||||
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
|
||||
struct ipoib_mcast *nmcast;
|
||||
|
||||
/* ignore group which is directly joined by userspace */
|
||||
if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
|
||||
!ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
|
||||
ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
|
||||
mgid.raw);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Not found or send-only group, let's add a new entry */
|
||||
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
|
||||
mgid.raw);
|
||||
|
||||
nmcast = ipoib_mcast_alloc(dev, 0);
|
||||
if (!nmcast) {
|
||||
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
|
||||
|
||||
nmcast->mcmember.mgid = mgid;
|
||||
|
||||
if (mcast) {
|
||||
/* Destroy the send only entry */
|
||||
list_move_tail(&mcast->list, &remove_list);
|
||||
|
||||
rb_replace_node(&mcast->rb_node,
|
||||
&nmcast->rb_node,
|
||||
&priv->multicast_tree);
|
||||
} else
|
||||
__ipoib_mcast_add(dev, nmcast);
|
||||
|
||||
list_add_tail(&nmcast->list, &priv->multicast_list);
|
||||
}
|
||||
|
||||
if (mcast)
|
||||
set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
|
||||
}
|
||||
|
||||
/* Remove all of the entries don't exist anymore */
|
||||
list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
|
||||
if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
|
||||
!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
|
||||
ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
|
||||
mcast->mcmember.mgid.raw);
|
||||
|
||||
rb_erase(&mcast->rb_node, &priv->multicast_tree);
|
||||
|
||||
/* Move to the remove list */
|
||||
list_move_tail(&mcast->list, &remove_list);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&priv->lock);
|
||||
netif_addr_unlock(dev);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* We have to cancel outside of the spinlock */
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
||||
ipoib_mcast_leave(mcast->dev, mcast);
|
||||
ipoib_mcast_free(mcast);
|
||||
}
|
||||
|
||||
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
||||
ipoib_mcast_start_thread(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
||||
|
||||
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_mcast_iter *iter;
|
||||
|
||||
iter = kmalloc(sizeof *iter, GFP_KERNEL);
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
iter->dev = dev;
|
||||
memset(iter->mgid.raw, 0, 16);
|
||||
|
||||
if (ipoib_mcast_iter_next(iter)) {
|
||||
kfree(iter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
|
||||
struct rb_node *n;
|
||||
struct ipoib_mcast *mcast;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
|
||||
n = rb_first(&priv->multicast_tree);
|
||||
|
||||
while (n) {
|
||||
mcast = rb_entry(n, struct ipoib_mcast, rb_node);
|
||||
|
||||
if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
|
||||
sizeof (union ib_gid)) < 0) {
|
||||
iter->mgid = mcast->mcmember.mgid;
|
||||
iter->created = mcast->created;
|
||||
iter->queuelen = skb_queue_len(&mcast->pkt_queue);
|
||||
iter->complete = !!mcast->ah;
|
||||
iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
|
||||
|
||||
ret = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
n = rb_next(n);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
|
||||
union ib_gid *mgid,
|
||||
unsigned long *created,
|
||||
unsigned int *queuelen,
|
||||
unsigned int *complete,
|
||||
unsigned int *send_only)
|
||||
{
|
||||
*mgid = iter->mgid;
|
||||
*created = iter->created;
|
||||
*queuelen = iter->queuelen;
|
||||
*complete = iter->complete;
|
||||
*send_only = iter->send_only;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
|
182
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
Normal file
182
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
||||
#include <linux/module.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "ipoib.h"
|
||||
|
||||
static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
|
||||
[IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
|
||||
[IFLA_IPOIB_MODE] = { .type = NLA_U16 },
|
||||
[IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
|
||||
};
|
||||
|
||||
static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
u16 val;
|
||||
|
||||
if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
|
||||
goto nla_put_failure;
|
||||
|
||||
val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
|
||||
goto nla_put_failure;
|
||||
|
||||
val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
|
||||
if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
|
||||
goto nla_put_failure;
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int ipoib_changelink(struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
u16 mode, umcast;
|
||||
int ret = 0;
|
||||
|
||||
if (data[IFLA_IPOIB_MODE]) {
|
||||
mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
|
||||
if (mode == IPOIB_MODE_DATAGRAM)
|
||||
ret = ipoib_set_mode(dev, "datagram\n");
|
||||
else if (mode == IPOIB_MODE_CONNECTED)
|
||||
ret = ipoib_set_mode(dev, "connected\n");
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (data[IFLA_IPOIB_UMCAST]) {
|
||||
umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
|
||||
ipoib_set_umcast(dev, umcast);
|
||||
}
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct net_device *pdev;
|
||||
struct ipoib_dev_priv *ppriv;
|
||||
u16 child_pkey;
|
||||
int err;
|
||||
|
||||
if (!tb[IFLA_LINK])
|
||||
return -EINVAL;
|
||||
|
||||
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
|
||||
return -ENODEV;
|
||||
|
||||
ppriv = netdev_priv(pdev);
|
||||
|
||||
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
|
||||
ipoib_warn(ppriv, "child creation disallowed for child devices\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!data || !data[IFLA_IPOIB_PKEY]) {
|
||||
ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
|
||||
child_pkey = ppriv->pkey;
|
||||
} else
|
||||
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
|
||||
|
||||
if (child_pkey == 0 || child_pkey == 0x8000)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Set the full membership bit, so that we join the right
|
||||
* broadcast group, etc.
|
||||
*/
|
||||
child_pkey |= 0x8000;
|
||||
|
||||
err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
|
||||
|
||||
if (!err && data)
|
||||
err = ipoib_changelink(dev, tb, data);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct ipoib_dev_priv *priv, *ppriv;
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
ppriv = netdev_priv(priv->parent);
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
unregister_netdevice_queue(dev, head);
|
||||
list_del(&priv->list);
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
}
|
||||
|
||||
static size_t ipoib_get_size(const struct net_device *dev)
|
||||
{
|
||||
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
|
||||
nla_total_size(2) + /* IFLA_IPOIB_MODE */
|
||||
nla_total_size(2); /* IFLA_IPOIB_UMCAST */
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
|
||||
.kind = "ipoib",
|
||||
.maxtype = IFLA_IPOIB_MAX,
|
||||
.policy = ipoib_policy,
|
||||
.priv_size = sizeof(struct ipoib_dev_priv),
|
||||
.setup = ipoib_setup,
|
||||
.newlink = ipoib_new_child_link,
|
||||
.changelink = ipoib_changelink,
|
||||
.dellink = ipoib_unregister_child_dev,
|
||||
.get_size = ipoib_get_size,
|
||||
.fill_info = ipoib_fill_info,
|
||||
};
|
||||
|
||||
int __init ipoib_netlink_init(void)
|
||||
{
|
||||
return rtnl_link_register(&ipoib_link_ops);
|
||||
}
|
||||
|
||||
void __exit ipoib_netlink_fini(void)
|
||||
{
|
||||
rtnl_link_unregister(&ipoib_link_ops);
|
||||
}
|
||||
|
||||
MODULE_ALIAS_RTNL_LINK("ipoib");
|
297
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
Normal file
297
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "ipoib.h"
|
||||
|
||||
int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ib_qp_attr *qp_attr = NULL;
|
||||
int ret;
|
||||
u16 pkey_index;
|
||||
|
||||
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
|
||||
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
|
||||
|
||||
if (set_qkey) {
|
||||
ret = -ENOMEM;
|
||||
qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
|
||||
if (!qp_attr)
|
||||
goto out;
|
||||
|
||||
/* set correct QKey for QP */
|
||||
qp_attr->qkey = priv->qkey;
|
||||
ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* attach QP to multicast group */
|
||||
ret = ib_attach_mcast(priv->qp, mgid, mlid);
|
||||
if (ret)
|
||||
ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
|
||||
|
||||
out:
|
||||
kfree(qp_attr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ipoib_init_qp(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
struct ib_qp_attr qp_attr;
|
||||
int attr_mask;
|
||||
|
||||
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
|
||||
return -1;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_INIT;
|
||||
qp_attr.qkey = 0;
|
||||
qp_attr.port_num = priv->port;
|
||||
qp_attr.pkey_index = priv->pkey_index;
|
||||
attr_mask =
|
||||
IB_QP_QKEY |
|
||||
IB_QP_PORT |
|
||||
IB_QP_PKEY_INDEX |
|
||||
IB_QP_STATE;
|
||||
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
qp_attr.qp_state = IB_QPS_RTR;
|
||||
/* Can't set this in a INIT->RTR transition */
|
||||
attr_mask &= ~IB_QP_PORT;
|
||||
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
qp_attr.qp_state = IB_QPS_RTS;
|
||||
qp_attr.sq_psn = 0;
|
||||
attr_mask |= IB_QP_SQ_PSN;
|
||||
attr_mask &= ~IB_QP_PKEY_INDEX;
|
||||
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
qp_attr.qp_state = IB_QPS_RESET;
|
||||
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
|
||||
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ib_qp_init_attr init_attr = {
|
||||
.cap = {
|
||||
.max_send_wr = ipoib_sendq_size,
|
||||
.max_recv_wr = ipoib_recvq_size,
|
||||
.max_send_sge = 1,
|
||||
.max_recv_sge = IPOIB_UD_RX_SG
|
||||
},
|
||||
.sq_sig_type = IB_SIGNAL_ALL_WR,
|
||||
.qp_type = IB_QPT_UD
|
||||
};
|
||||
|
||||
int ret, size;
|
||||
int i;
|
||||
|
||||
priv->pd = ib_alloc_pd(priv->ca);
|
||||
if (IS_ERR(priv->pd)) {
|
||||
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(priv->mr)) {
|
||||
printk(KERN_WARNING "%s: ib_get_dma_mr failed\n", ca->name);
|
||||
goto out_free_pd;
|
||||
}
|
||||
|
||||
size = ipoib_recvq_size + 1;
|
||||
ret = ipoib_cm_dev_init(dev);
|
||||
if (!ret) {
|
||||
size += ipoib_sendq_size;
|
||||
if (ipoib_cm_has_srq(dev))
|
||||
size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
|
||||
else
|
||||
size += ipoib_recvq_size * ipoib_max_conn_qp;
|
||||
}
|
||||
|
||||
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
|
||||
if (IS_ERR(priv->recv_cq)) {
|
||||
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
|
||||
goto out_free_mr;
|
||||
}
|
||||
|
||||
priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
|
||||
dev, ipoib_sendq_size, 0);
|
||||
if (IS_ERR(priv->send_cq)) {
|
||||
printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
|
||||
goto out_free_recv_cq;
|
||||
}
|
||||
|
||||
if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
|
||||
goto out_free_send_cq;
|
||||
|
||||
init_attr.send_cq = priv->send_cq;
|
||||
init_attr.recv_cq = priv->recv_cq;
|
||||
|
||||
if (priv->hca_caps & IB_DEVICE_UD_TSO)
|
||||
init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
|
||||
|
||||
if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
|
||||
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
|
||||
|
||||
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
|
||||
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
|
||||
|
||||
if (dev->features & NETIF_F_SG)
|
||||
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
|
||||
|
||||
priv->qp = ib_create_qp(priv->pd, &init_attr);
|
||||
if (IS_ERR(priv->qp)) {
|
||||
printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
|
||||
goto out_free_send_cq;
|
||||
}
|
||||
|
||||
priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
|
||||
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
|
||||
priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
|
||||
|
||||
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
|
||||
priv->tx_sge[i].lkey = priv->mr->lkey;
|
||||
|
||||
priv->tx_wr.opcode = IB_WR_SEND;
|
||||
priv->tx_wr.sg_list = priv->tx_sge;
|
||||
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
|
||||
|
||||
priv->rx_sge[0].lkey = priv->mr->lkey;
|
||||
if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
|
||||
priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
|
||||
priv->rx_sge[1].length = PAGE_SIZE;
|
||||
priv->rx_sge[1].lkey = priv->mr->lkey;
|
||||
priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
|
||||
} else {
|
||||
priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
|
||||
priv->rx_wr.num_sge = 1;
|
||||
}
|
||||
priv->rx_wr.next = NULL;
|
||||
priv->rx_wr.sg_list = priv->rx_sge;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_send_cq:
|
||||
ib_destroy_cq(priv->send_cq);
|
||||
|
||||
out_free_recv_cq:
|
||||
ib_destroy_cq(priv->recv_cq);
|
||||
|
||||
out_free_mr:
|
||||
ib_dereg_mr(priv->mr);
|
||||
ipoib_cm_dev_cleanup(dev);
|
||||
|
||||
out_free_pd:
|
||||
ib_dealloc_pd(priv->pd);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
void ipoib_transport_dev_cleanup(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (priv->qp) {
|
||||
if (ib_destroy_qp(priv->qp))
|
||||
ipoib_warn(priv, "ib_qp_destroy failed\n");
|
||||
|
||||
priv->qp = NULL;
|
||||
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
|
||||
}
|
||||
|
||||
if (ib_destroy_cq(priv->send_cq))
|
||||
ipoib_warn(priv, "ib_cq_destroy (send) failed\n");
|
||||
|
||||
if (ib_destroy_cq(priv->recv_cq))
|
||||
ipoib_warn(priv, "ib_cq_destroy (recv) failed\n");
|
||||
|
||||
ipoib_cm_dev_cleanup(dev);
|
||||
|
||||
if (ib_dereg_mr(priv->mr))
|
||||
ipoib_warn(priv, "ib_dereg_mr failed\n");
|
||||
|
||||
if (ib_dealloc_pd(priv->pd))
|
||||
ipoib_warn(priv, "ib_dealloc_pd failed\n");
|
||||
}
|
||||
|
||||
void ipoib_event(struct ib_event_handler *handler,
|
||||
struct ib_event *record)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(handler, struct ipoib_dev_priv, event_handler);
|
||||
|
||||
if (record->element.port_num != priv->port)
|
||||
return;
|
||||
|
||||
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
|
||||
record->device->name, record->element.port_num);
|
||||
|
||||
if (record->event == IB_EVENT_SM_CHANGE ||
|
||||
record->event == IB_EVENT_CLIENT_REREGISTER) {
|
||||
queue_work(ipoib_workqueue, &priv->flush_light);
|
||||
} else if (record->event == IB_EVENT_PORT_ERR ||
|
||||
record->event == IB_EVENT_PORT_ACTIVE ||
|
||||
record->event == IB_EVENT_LID_CHANGE) {
|
||||
queue_work(ipoib_workqueue, &priv->flush_normal);
|
||||
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
|
||||
queue_work(ipoib_workqueue, &priv->flush_heavy);
|
||||
}
|
||||
}
|
209
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
Normal file
209
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
Normal file
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "ipoib.h"
|
||||
|
||||
static ssize_t show_parent(struct device *d, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct net_device *dev = to_net_dev(d);
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
return sprintf(buf, "%s\n", priv->parent->name);
|
||||
}
|
||||
static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
|
||||
|
||||
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
|
||||
u16 pkey, int type)
|
||||
{
|
||||
int result;
|
||||
|
||||
priv->max_ib_mtu = ppriv->max_ib_mtu;
|
||||
/* MTU will be reset when mcast join happens */
|
||||
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
|
||||
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
|
||||
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
|
||||
|
||||
result = ipoib_set_dev_features(priv, ppriv->ca);
|
||||
if (result)
|
||||
goto err;
|
||||
|
||||
priv->pkey = pkey;
|
||||
|
||||
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
|
||||
priv->dev->broadcast[8] = pkey >> 8;
|
||||
priv->dev->broadcast[9] = pkey & 0xff;
|
||||
|
||||
result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
|
||||
if (result < 0) {
|
||||
ipoib_warn(ppriv, "failed to initialize subinterface: "
|
||||
"device %s, port %d",
|
||||
ppriv->ca->name, ppriv->port);
|
||||
goto err;
|
||||
}
|
||||
|
||||
result = register_netdevice(priv->dev);
|
||||
if (result) {
|
||||
ipoib_warn(priv, "failed to initialize; error %i", result);
|
||||
goto register_failed;
|
||||
}
|
||||
|
||||
priv->parent = ppriv->dev;
|
||||
|
||||
ipoib_create_debug_files(priv->dev);
|
||||
|
||||
/* RTNL childs don't need proprietary sysfs entries */
|
||||
if (type == IPOIB_LEGACY_CHILD) {
|
||||
if (ipoib_cm_add_mode_attr(priv->dev))
|
||||
goto sysfs_failed;
|
||||
if (ipoib_add_pkey_attr(priv->dev))
|
||||
goto sysfs_failed;
|
||||
if (ipoib_add_umcast_attr(priv->dev))
|
||||
goto sysfs_failed;
|
||||
|
||||
if (device_create_file(&priv->dev->dev, &dev_attr_parent))
|
||||
goto sysfs_failed;
|
||||
}
|
||||
|
||||
priv->child_type = type;
|
||||
priv->dev->iflink = ppriv->dev->ifindex;
|
||||
list_add_tail(&priv->list, &ppriv->child_intfs);
|
||||
|
||||
return 0;
|
||||
|
||||
sysfs_failed:
|
||||
result = -ENOMEM;
|
||||
ipoib_delete_debug_files(priv->dev);
|
||||
unregister_netdevice(priv->dev);
|
||||
|
||||
register_failed:
|
||||
ipoib_dev_cleanup(priv->dev);
|
||||
|
||||
err:
|
||||
return result;
|
||||
}
|
||||
|
||||
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
|
||||
{
|
||||
struct ipoib_dev_priv *ppriv, *priv;
|
||||
char intf_name[IFNAMSIZ];
|
||||
struct ipoib_dev_priv *tpriv;
|
||||
int result;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ppriv = netdev_priv(pdev);
|
||||
|
||||
snprintf(intf_name, sizeof intf_name, "%s.%04x",
|
||||
ppriv->dev->name, pkey);
|
||||
priv = ipoib_intf_alloc(intf_name);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!rtnl_trylock())
|
||||
return restart_syscall();
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
|
||||
/*
|
||||
* First ensure this isn't a duplicate. We check the parent device and
|
||||
* then all of the legacy child interfaces to make sure the Pkey
|
||||
* doesn't match.
|
||||
*/
|
||||
if (ppriv->pkey == pkey) {
|
||||
result = -ENOTUNIQ;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
|
||||
if (tpriv->pkey == pkey &&
|
||||
tpriv->child_type == IPOIB_LEGACY_CHILD) {
|
||||
result = -ENOTUNIQ;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
|
||||
|
||||
out:
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
|
||||
if (result)
|
||||
free_netdev(priv->dev);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
|
||||
{
|
||||
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
|
||||
struct net_device *dev = NULL;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ppriv = netdev_priv(pdev);
|
||||
|
||||
if (!rtnl_trylock())
|
||||
return restart_syscall();
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
|
||||
if (priv->pkey == pkey &&
|
||||
priv->child_type == IPOIB_LEGACY_CHILD) {
|
||||
unregister_netdevice(priv->dev);
|
||||
list_del(&priv->list);
|
||||
dev = priv->dev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (dev) {
|
||||
free_netdev(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
12
drivers/infiniband/ulp/iser/Kconfig
Normal file
12
drivers/infiniband/ulp/iser/Kconfig
Normal file
|
@ -0,0 +1,12 @@
|
|||
config INFINIBAND_ISER
|
||||
tristate "iSCSI Extensions for RDMA (iSER)"
|
||||
depends on SCSI && INET && INFINIBAND_ADDR_TRANS
|
||||
select SCSI_ISCSI_ATTRS
|
||||
---help---
|
||||
Support for the iSCSI Extensions for RDMA (iSER) Protocol
|
||||
over InfiniBand. This allows you to access storage devices
|
||||
that speak iSCSI over iSER over InfiniBand.
|
||||
|
||||
The iSER protocol is defined by IETF.
|
||||
See <http://www.ietf.org/rfc/rfc5046.txt>
|
||||
and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF>
|
4
drivers/infiniband/ulp/iser/Makefile
Normal file
4
drivers/infiniband/ulp/iser/Makefile
Normal file
|
@ -0,0 +1,4 @@
|
|||
obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o
|
||||
|
||||
ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \
|
||||
iscsi_iser.o
|
1038
drivers/infiniband/ulp/iser/iscsi_iser.c
Normal file
1038
drivers/infiniband/ulp/iser/iscsi_iser.c
Normal file
File diff suppressed because it is too large
Load diff
670
drivers/infiniband/ulp/iser/iscsi_iser.h
Normal file
670
drivers/infiniband/ulp/iser/iscsi_iser.h
Normal file
|
@ -0,0 +1,670 @@
|
|||
/*
|
||||
* iSER transport for the Open iSCSI Initiator & iSER transport internals
|
||||
*
|
||||
* Copyright (C) 2004 Dmitry Yusupov
|
||||
* Copyright (C) 2004 Alex Aizman
|
||||
* Copyright (C) 2005 Mike Christie
|
||||
* based on code maintained by open-iscsi@googlegroups.com
|
||||
*
|
||||
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
|
||||
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __ISCSI_ISER_H__
|
||||
#define __ISCSI_ISER_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/printk.h>
|
||||
#include <scsi/libiscsi.h>
|
||||
#include <scsi/scsi_transport_iscsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_fmr_pool.h>
|
||||
#include <rdma/rdma_cm.h>
|
||||
|
||||
#define DRV_NAME "iser"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DRV_VER "1.4.8"
|
||||
|
||||
#define iser_dbg(fmt, arg...) \
|
||||
do { \
|
||||
if (iser_debug_level > 2) \
|
||||
printk(KERN_DEBUG PFX "%s: " fmt,\
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define iser_warn(fmt, arg...) \
|
||||
do { \
|
||||
if (iser_debug_level > 0) \
|
||||
pr_warn(PFX "%s: " fmt, \
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define iser_info(fmt, arg...) \
|
||||
do { \
|
||||
if (iser_debug_level > 1) \
|
||||
pr_info(PFX "%s: " fmt, \
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define iser_err(fmt, arg...) \
|
||||
do { \
|
||||
printk(KERN_ERR PFX "%s: " fmt, \
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define SHIFT_4K 12
|
||||
#define SIZE_4K (1ULL << SHIFT_4K)
|
||||
#define MASK_4K (~(SIZE_4K-1))
|
||||
/* support up to 512KB in one RDMA */
|
||||
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
|
||||
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
|
||||
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
|
||||
#define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
|
||||
#else
|
||||
#define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
|
||||
#endif
|
||||
#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
|
||||
|
||||
/* QP settings */
|
||||
/* Maximal bounds on received asynchronous PDUs */
|
||||
#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
|
||||
|
||||
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
|
||||
* SCSI_TMFUNC(2), LOGOUT(1) */
|
||||
|
||||
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
|
||||
|
||||
#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
|
||||
|
||||
/* the max TX (send) WR supported by the iSER QP is defined by *
|
||||
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
|
||||
* to have at max for SCSI command. The tx posting & completion handling code *
|
||||
* supports -EAGAIN scheme where tx is suspended till the QP has room for more *
|
||||
* send WR. D=8 comes from 64K/8K */
|
||||
|
||||
#define ISER_INFLIGHT_DATAOUTS 8
|
||||
|
||||
#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
|
||||
(1 + ISER_INFLIGHT_DATAOUTS) + \
|
||||
ISER_MAX_TX_MISC_PDUS + \
|
||||
ISER_MAX_RX_MISC_PDUS)
|
||||
|
||||
/* Max registration work requests per command */
|
||||
#define ISER_MAX_REG_WR_PER_CMD 5
|
||||
|
||||
/* For Signature we don't support DATAOUTs so no need to make room for them */
|
||||
#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
|
||||
(1 + ISER_MAX_REG_WR_PER_CMD) + \
|
||||
ISER_MAX_TX_MISC_PDUS + \
|
||||
ISER_MAX_RX_MISC_PDUS)
|
||||
|
||||
#define ISER_WC_BATCH_COUNT 16
|
||||
#define ISER_SIGNAL_CMD_COUNT 32
|
||||
|
||||
#define ISER_VER 0x10
|
||||
#define ISER_WSV 0x08
|
||||
#define ISER_RSV 0x04
|
||||
|
||||
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
|
||||
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
|
||||
|
||||
/**
|
||||
* struct iser_hdr - iSER header
|
||||
*
|
||||
* @flags: flags support (zbva, remote_inv)
|
||||
* @rsvd: reserved
|
||||
* @write_stag: write rkey
|
||||
* @write_va: write virtual address
|
||||
* @reaf_stag: read rkey
|
||||
* @read_va: read virtual address
|
||||
*/
|
||||
struct iser_hdr {
|
||||
u8 flags;
|
||||
u8 rsvd[3];
|
||||
__be32 write_stag;
|
||||
__be64 write_va;
|
||||
__be32 read_stag;
|
||||
__be64 read_va;
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
#define ISER_ZBVA_NOT_SUPPORTED 0x80
|
||||
#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
|
||||
|
||||
struct iser_cm_hdr {
|
||||
u8 flags;
|
||||
u8 rsvd[3];
|
||||
} __packed;
|
||||
|
||||
/* Constant PDU lengths calculations */
|
||||
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
|
||||
|
||||
#define ISER_RECV_DATA_SEG_LEN 128
|
||||
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
|
||||
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
|
||||
|
||||
/* Length of an object name string */
|
||||
#define ISER_OBJECT_NAME_SIZE 64
|
||||
|
||||
enum iser_conn_state {
|
||||
ISER_CONN_INIT, /* descriptor allocd, no conn */
|
||||
ISER_CONN_PENDING, /* in the process of being established */
|
||||
ISER_CONN_UP, /* up and running */
|
||||
ISER_CONN_TERMINATING, /* in the process of being terminated */
|
||||
ISER_CONN_DOWN, /* shut down */
|
||||
ISER_CONN_STATES_NUM
|
||||
};
|
||||
|
||||
enum iser_task_status {
|
||||
ISER_TASK_STATUS_INIT = 0,
|
||||
ISER_TASK_STATUS_STARTED,
|
||||
ISER_TASK_STATUS_COMPLETED
|
||||
};
|
||||
|
||||
enum iser_data_dir {
|
||||
ISER_DIR_IN = 0, /* to initiator */
|
||||
ISER_DIR_OUT, /* from initiator */
|
||||
ISER_DIRS_NUM
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_data_buf - iSER data buffer
|
||||
*
|
||||
* @buf: pointer to the sg list
|
||||
* @size: num entries of this sg
|
||||
* @data_len: total beffer byte len
|
||||
* @dma_nents: returned by dma_map_sg
|
||||
* @copy_buf: allocated copy buf for SGs unaligned
|
||||
* for rdma which are copied
|
||||
* @sg_single: SG-ified clone of a non SG SC or
|
||||
* unaligned SG
|
||||
*/
|
||||
struct iser_data_buf {
|
||||
void *buf;
|
||||
unsigned int size;
|
||||
unsigned long data_len;
|
||||
unsigned int dma_nents;
|
||||
char *copy_buf;
|
||||
struct scatterlist sg_single;
|
||||
};
|
||||
|
||||
/* fwd declarations */
|
||||
struct iser_device;
|
||||
struct iscsi_iser_task;
|
||||
struct iscsi_endpoint;
|
||||
|
||||
/**
|
||||
* struct iser_mem_reg - iSER memory registration info
|
||||
*
|
||||
* @lkey: MR local key
|
||||
* @rkey: MR remote key
|
||||
* @va: MR start address (buffer va)
|
||||
* @len: MR length
|
||||
* @mem_h: pointer to registration context (FMR/Fastreg)
|
||||
* @is_mr: indicates weather we registered the buffer
|
||||
*/
|
||||
struct iser_mem_reg {
|
||||
u32 lkey;
|
||||
u32 rkey;
|
||||
u64 va;
|
||||
u64 len;
|
||||
void *mem_h;
|
||||
int is_mr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_regd_buf - iSER buffer registration desc
|
||||
*
|
||||
* @reg: memory registration info
|
||||
* @virt_addr: virtual address of buffer
|
||||
* @device: reference to iser device
|
||||
* @direction: dma direction (for dma_unmap)
|
||||
* @data_size: data buffer size in bytes
|
||||
*/
|
||||
struct iser_regd_buf {
|
||||
struct iser_mem_reg reg;
|
||||
void *virt_addr;
|
||||
struct iser_device *device;
|
||||
enum dma_data_direction direction;
|
||||
unsigned int data_size;
|
||||
};
|
||||
|
||||
enum iser_desc_type {
|
||||
ISCSI_TX_CONTROL ,
|
||||
ISCSI_TX_SCSI_COMMAND,
|
||||
ISCSI_TX_DATAOUT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_tx_desc - iSER TX descriptor (for send wr_id)
|
||||
*
|
||||
* @iser_header: iser header
|
||||
* @iscsi_header: iscsi header
|
||||
* @type: command/control/dataout
|
||||
* @dam_addr: header buffer dma_address
|
||||
* @tx_sg: sg[0] points to iser/iscsi headers
|
||||
* sg[1] optionally points to either of immediate data
|
||||
* unsolicited data-out or control
|
||||
* @num_sge: number sges used on this TX task
|
||||
*/
|
||||
struct iser_tx_desc {
|
||||
struct iser_hdr iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
enum iser_desc_type type;
|
||||
u64 dma_addr;
|
||||
struct ib_sge tx_sg[2];
|
||||
int num_sge;
|
||||
};
|
||||
|
||||
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
|
||||
sizeof(u64) + sizeof(struct ib_sge)))
|
||||
/**
|
||||
* struct iser_rx_desc - iSER RX descriptor (for recv wr_id)
|
||||
*
|
||||
* @iser_header: iser header
|
||||
* @iscsi_header: iscsi header
|
||||
* @data: received data segment
|
||||
* @dma_addr: receive buffer dma address
|
||||
* @rx_sg: ib_sge of receive buffer
|
||||
* @pad: for sense data TODO: Modify to maximum sense length supported
|
||||
*/
|
||||
struct iser_rx_desc {
|
||||
struct iser_hdr iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
char data[ISER_RECV_DATA_SEG_LEN];
|
||||
u64 dma_addr;
|
||||
struct ib_sge rx_sg;
|
||||
char pad[ISER_RX_PAD_SIZE];
|
||||
} __attribute__((packed));
|
||||
|
||||
#define ISER_MAX_CQ 4
|
||||
|
||||
struct iser_conn;
|
||||
struct ib_conn;
|
||||
struct iscsi_iser_task;
|
||||
|
||||
/**
|
||||
* struct iser_comp - iSER completion context
|
||||
*
|
||||
* @device: pointer to device handle
|
||||
* @cq: completion queue
|
||||
* @wcs: work completion array
|
||||
* @tasklet: Tasklet handle
|
||||
* @active_qps: Number of active QPs attached
|
||||
* to completion context
|
||||
*/
|
||||
struct iser_comp {
|
||||
struct iser_device *device;
|
||||
struct ib_cq *cq;
|
||||
struct ib_wc wcs[ISER_WC_BATCH_COUNT];
|
||||
struct tasklet_struct tasklet;
|
||||
int active_qps;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_device - iSER device handle
|
||||
*
|
||||
* @ib_device: RDMA device
|
||||
* @pd: Protection Domain for this device
|
||||
* @dev_attr: Device attributes container
|
||||
* @mr: Global DMA memory region
|
||||
* @event_handler: IB events handle routine
|
||||
* @ig_list: entry in devices list
|
||||
* @refcount: Reference counter, dominated by open iser connections
|
||||
* @comps_used: Number of completion contexts used, Min between online
|
||||
* cpus and device max completion vectors
|
||||
* @comps: Dinamically allocated array of completion handlers
|
||||
* Memory registration pool Function pointers (FMR or Fastreg):
|
||||
* @iser_alloc_rdma_reg_res: Allocation of memory regions pool
|
||||
* @iser_free_rdma_reg_res: Free of memory regions pool
|
||||
* @iser_reg_rdma_mem: Memory registration routine
|
||||
* @iser_unreg_rdma_mem: Memory deregistration routine
|
||||
*/
|
||||
struct iser_device {
|
||||
struct ib_device *ib_device;
|
||||
struct ib_pd *pd;
|
||||
struct ib_device_attr dev_attr;
|
||||
struct ib_mr *mr;
|
||||
struct ib_event_handler event_handler;
|
||||
struct list_head ig_list;
|
||||
int refcount;
|
||||
int comps_used;
|
||||
struct iser_comp comps[ISER_MAX_CQ];
|
||||
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
|
||||
unsigned cmds_max);
|
||||
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
|
||||
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
};
|
||||
|
||||
#define ISER_CHECK_GUARD 0xc0
|
||||
#define ISER_CHECK_REFTAG 0x0f
|
||||
#define ISER_CHECK_APPTAG 0x30
|
||||
|
||||
enum iser_reg_indicator {
|
||||
ISER_DATA_KEY_VALID = 1 << 0,
|
||||
ISER_PROT_KEY_VALID = 1 << 1,
|
||||
ISER_SIG_KEY_VALID = 1 << 2,
|
||||
ISER_FASTREG_PROTECTED = 1 << 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_pi_context - Protection information context
|
||||
*
|
||||
* @prot_mr: protection memory region
|
||||
* @prot_frpl: protection fastreg page list
|
||||
* @sig_mr: signature feature enabled memory region
|
||||
*/
|
||||
struct iser_pi_context {
|
||||
struct ib_mr *prot_mr;
|
||||
struct ib_fast_reg_page_list *prot_frpl;
|
||||
struct ib_mr *sig_mr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fast_reg_descriptor - Fast registration descriptor
|
||||
*
|
||||
* @list: entry in connection fastreg pool
|
||||
* @data_mr: data memory region
|
||||
* @data_frpl: data fastreg page list
|
||||
* @pi_ctx: protection information context
|
||||
* @reg_indicators: fast registration indicators
|
||||
*/
|
||||
struct fast_reg_descriptor {
|
||||
struct list_head list;
|
||||
struct ib_mr *data_mr;
|
||||
struct ib_fast_reg_page_list *data_frpl;
|
||||
struct iser_pi_context *pi_ctx;
|
||||
u8 reg_indicators;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ib_conn - Infiniband related objects
|
||||
*
|
||||
* @cma_id: rdma_cm connection maneger handle
|
||||
* @qp: Connection Queue-pair
|
||||
* @post_recv_buf_count: post receive counter
|
||||
* @sig_count: send work request signal count
|
||||
* @rx_wr: receive work request for batch posts
|
||||
* @device: reference to iser device
|
||||
* @comp: iser completion context
|
||||
* @pi_support: Indicate device T10-PI support
|
||||
* @beacon: beacon send wr to signal all flush errors were drained
|
||||
* @flush_comp: completes when all connection completions consumed
|
||||
* @lock: protects fmr/fastreg pool
|
||||
* @union.fmr:
|
||||
* @pool: FMR pool for fast registrations
|
||||
* @page_vec: page vector to hold mapped commands pages
|
||||
* used for registration
|
||||
* @union.fastreg:
|
||||
* @pool: Fast registration descriptors pool for fast
|
||||
* registrations
|
||||
* @pool_size: Size of pool
|
||||
*/
|
||||
struct ib_conn {
|
||||
struct rdma_cm_id *cma_id;
|
||||
struct ib_qp *qp;
|
||||
int post_recv_buf_count;
|
||||
u8 sig_count;
|
||||
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
|
||||
struct iser_device *device;
|
||||
struct iser_comp *comp;
|
||||
bool pi_support;
|
||||
struct ib_send_wr beacon;
|
||||
struct completion flush_comp;
|
||||
spinlock_t lock;
|
||||
union {
|
||||
struct {
|
||||
struct ib_fmr_pool *pool;
|
||||
struct iser_page_vec *page_vec;
|
||||
} fmr;
|
||||
struct {
|
||||
struct list_head pool;
|
||||
int pool_size;
|
||||
} fastreg;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_conn - iSER connection context
|
||||
*
|
||||
* @ib_conn: connection RDMA resources
|
||||
* @iscsi_conn: link to matching iscsi connection
|
||||
* @ep: transport handle
|
||||
* @state: connection logical state
|
||||
* @qp_max_recv_dtos: maximum number of data outs, corresponds
|
||||
* to max number of post recvs
|
||||
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
|
||||
* @min_posted_rx: (qp_max_recv_dtos >> 2)
|
||||
* @name: connection peer portal
|
||||
* @release_work: deffered work for release job
|
||||
* @state_mutex: protects iser onnection state
|
||||
* @stop_completion: conn_stop completion
|
||||
* @ib_completion: RDMA cleanup completion
|
||||
* @up_completion: connection establishment completed
|
||||
* (state is ISER_CONN_UP)
|
||||
* @conn_list: entry in ig conn list
|
||||
* @login_buf: login data buffer (stores login parameters)
|
||||
* @login_req_buf: login request buffer
|
||||
* @login_req_dma: login request buffer dma address
|
||||
* @login_resp_buf: login response buffer
|
||||
* @login_resp_dma: login response buffer dma address
|
||||
* @rx_desc_head: head of rx_descs cyclic buffer
|
||||
* @rx_descs: rx buffers array (cyclic buffer)
|
||||
* @num_rx_descs: number of rx descriptors
|
||||
*/
|
||||
struct iser_conn {
|
||||
struct ib_conn ib_conn;
|
||||
struct iscsi_conn *iscsi_conn;
|
||||
struct iscsi_endpoint *ep;
|
||||
enum iser_conn_state state;
|
||||
unsigned qp_max_recv_dtos;
|
||||
unsigned qp_max_recv_dtos_mask;
|
||||
unsigned min_posted_rx;
|
||||
char name[ISER_OBJECT_NAME_SIZE];
|
||||
struct work_struct release_work;
|
||||
struct mutex state_mutex;
|
||||
struct completion stop_completion;
|
||||
struct completion ib_completion;
|
||||
struct completion up_completion;
|
||||
struct list_head conn_list;
|
||||
|
||||
char *login_buf;
|
||||
char *login_req_buf, *login_resp_buf;
|
||||
u64 login_req_dma, login_resp_dma;
|
||||
unsigned int rx_desc_head;
|
||||
struct iser_rx_desc *rx_descs;
|
||||
u32 num_rx_descs;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iscsi_iser_task - iser task context
|
||||
*
|
||||
* @desc: TX descriptor
|
||||
* @iser_conn: link to iser connection
|
||||
* @status: current task status
|
||||
* @sc: link to scsi command
|
||||
* @command_sent: indicate if command was sent
|
||||
* @dir: iser data direction
|
||||
* @rdma_regd: task rdma registration desc
|
||||
* @data: iser data buffer desc
|
||||
* @data_copy: iser data copy buffer desc (bounce buffer)
|
||||
* @prot: iser protection buffer desc
|
||||
* @prot_copy: iser protection copy buffer desc (bounce buffer)
|
||||
*/
|
||||
struct iscsi_iser_task {
|
||||
struct iser_tx_desc desc;
|
||||
struct iser_conn *iser_conn;
|
||||
enum iser_task_status status;
|
||||
struct scsi_cmnd *sc;
|
||||
int command_sent;
|
||||
int dir[ISER_DIRS_NUM];
|
||||
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];
|
||||
struct iser_data_buf data[ISER_DIRS_NUM];
|
||||
struct iser_data_buf data_copy[ISER_DIRS_NUM];
|
||||
struct iser_data_buf prot[ISER_DIRS_NUM];
|
||||
struct iser_data_buf prot_copy[ISER_DIRS_NUM];
|
||||
};
|
||||
|
||||
struct iser_page_vec {
|
||||
u64 *pages;
|
||||
int length;
|
||||
int offset;
|
||||
int data_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iser_global: iSER global context
|
||||
*
|
||||
* @device_list_mutex: protects device_list
|
||||
* @device_list: iser devices global list
|
||||
* @connlist_mutex: protects connlist
|
||||
* @connlist: iser connections global list
|
||||
* @desc_cache: kmem cache for tx dataout
|
||||
*/
|
||||
struct iser_global {
|
||||
struct mutex device_list_mutex;
|
||||
struct list_head device_list;
|
||||
struct mutex connlist_mutex;
|
||||
struct list_head connlist;
|
||||
struct kmem_cache *desc_cache;
|
||||
};
|
||||
|
||||
extern struct iser_global ig;
|
||||
extern int iser_debug_level;
|
||||
extern bool iser_pi_enable;
|
||||
extern int iser_pi_guard;
|
||||
|
||||
int iser_send_control(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task);
|
||||
|
||||
int iser_send_command(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task);
|
||||
|
||||
int iser_send_data_out(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task,
|
||||
struct iscsi_data *hdr);
|
||||
|
||||
void iscsi_iser_recv(struct iscsi_conn *conn,
|
||||
struct iscsi_hdr *hdr,
|
||||
char *rx_data,
|
||||
int rx_data_len);
|
||||
|
||||
void iser_conn_init(struct iser_conn *iser_conn);
|
||||
|
||||
void iser_conn_release(struct iser_conn *iser_conn);
|
||||
|
||||
int iser_conn_terminate(struct iser_conn *iser_conn);
|
||||
|
||||
void iser_release_work(struct work_struct *work);
|
||||
|
||||
void iser_rcv_completion(struct iser_rx_desc *desc,
|
||||
unsigned long dto_xfer_len,
|
||||
struct ib_conn *ib_conn);
|
||||
|
||||
void iser_snd_completion(struct iser_tx_desc *desc,
|
||||
struct ib_conn *ib_conn);
|
||||
|
||||
void iser_task_rdma_init(struct iscsi_iser_task *task);
|
||||
|
||||
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
|
||||
|
||||
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
|
||||
|
||||
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_data_buf *mem_copy,
|
||||
enum iser_data_dir cmd_dir);
|
||||
|
||||
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
|
||||
int iser_connect(struct iser_conn *iser_conn,
|
||||
struct sockaddr *src_addr,
|
||||
struct sockaddr *dst_addr,
|
||||
int non_blocking);
|
||||
|
||||
int iser_reg_page_vec(struct ib_conn *ib_conn,
|
||||
struct iser_page_vec *page_vec,
|
||||
struct iser_mem_reg *mem_reg);
|
||||
|
||||
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir);
|
||||
|
||||
int iser_post_recvl(struct iser_conn *iser_conn);
|
||||
int iser_post_recvm(struct iser_conn *iser_conn, int count);
|
||||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
|
||||
bool signal);
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir);
|
||||
|
||||
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
int iser_initialize_task_headers(struct iscsi_task *task,
|
||||
struct iser_tx_desc *tx_desc);
|
||||
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
|
||||
struct iscsi_session *session);
|
||||
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max);
|
||||
void iser_free_fmr_pool(struct ib_conn *ib_conn);
|
||||
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
|
||||
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
|
||||
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir, sector_t *sector);
|
||||
#endif
|
732
drivers/infiniband/ulp/iser/iser_initiator.c
Normal file
732
drivers/infiniband/ulp/iser/iser_initiator.c
Normal file
|
@ -0,0 +1,732 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#include "iscsi_iser.h"
|
||||
|
||||
/* Register user buffer memory and initialize passive rdma
|
||||
* dto descriptor. Data size is stored in
|
||||
* task->data[ISER_DIR_IN].data_len, Protection size
|
||||
* os stored in task->prot[ISER_DIR_IN].data_len
|
||||
*/
|
||||
static int iser_prepare_read_cmd(struct iscsi_task *task)
|
||||
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
|
||||
struct iser_regd_buf *regd_buf;
|
||||
int err;
|
||||
struct iser_hdr *hdr = &iser_task->desc.iser_header;
|
||||
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
buf_in,
|
||||
ISER_DIR_IN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
pbuf_in,
|
||||
ISER_DIR_IN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
|
||||
if (err) {
|
||||
iser_err("Failed to set up Data-IN RDMA\n");
|
||||
return err;
|
||||
}
|
||||
regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
|
||||
|
||||
hdr->flags |= ISER_RSV;
|
||||
hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
|
||||
hdr->read_va = cpu_to_be64(regd_buf->reg.va);
|
||||
|
||||
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
|
||||
task->itt, regd_buf->reg.rkey,
|
||||
(unsigned long long)regd_buf->reg.va);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register user buffer memory and initialize passive rdma
|
||||
* dto descriptor. Data size is stored in
|
||||
* task->data[ISER_DIR_OUT].data_len, Protection size
|
||||
* is stored at task->prot[ISER_DIR_OUT].data_len
|
||||
*/
|
||||
static int
|
||||
iser_prepare_write_cmd(struct iscsi_task *task,
|
||||
unsigned int imm_sz,
|
||||
unsigned int unsol_sz,
|
||||
unsigned int edtl)
|
||||
{
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
|
||||
struct iser_regd_buf *regd_buf;
|
||||
int err;
|
||||
struct iser_hdr *hdr = &iser_task->desc.iser_header;
|
||||
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
|
||||
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
buf_out,
|
||||
ISER_DIR_OUT,
|
||||
DMA_TO_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
pbuf_out,
|
||||
ISER_DIR_OUT,
|
||||
DMA_TO_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
|
||||
if (err != 0) {
|
||||
iser_err("Failed to register write cmd RDMA mem\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
|
||||
|
||||
if (unsol_sz < edtl) {
|
||||
hdr->flags |= ISER_WSV;
|
||||
hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
|
||||
hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
|
||||
|
||||
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
|
||||
"VA:%#llX + unsol:%d\n",
|
||||
task->itt, regd_buf->reg.rkey,
|
||||
(unsigned long long)regd_buf->reg.va, unsol_sz);
|
||||
}
|
||||
|
||||
if (imm_sz > 0) {
|
||||
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
|
||||
task->itt, imm_sz);
|
||||
tx_dsg->addr = regd_buf->reg.va;
|
||||
tx_dsg->length = imm_sz;
|
||||
tx_dsg->lkey = regd_buf->reg.lkey;
|
||||
iser_task->desc.num_sge = 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* creates a new tx descriptor and adds header regd buffer */
|
||||
static void iser_create_send_desc(struct iser_conn *iser_conn,
|
||||
struct iser_tx_desc *tx_desc)
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
|
||||
ib_dma_sync_single_for_cpu(device->ib_device,
|
||||
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
||||
|
||||
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
|
||||
tx_desc->iser_header.flags = ISER_VER;
|
||||
|
||||
tx_desc->num_sge = 1;
|
||||
|
||||
if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
|
||||
tx_desc->tx_sg[0].lkey = device->mr->lkey;
|
||||
iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
|
||||
}
|
||||
}
|
||||
|
||||
static void iser_free_login_buf(struct iser_conn *iser_conn)
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
|
||||
if (!iser_conn->login_buf)
|
||||
return;
|
||||
|
||||
if (iser_conn->login_req_dma)
|
||||
ib_dma_unmap_single(device->ib_device,
|
||||
iser_conn->login_req_dma,
|
||||
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
|
||||
|
||||
if (iser_conn->login_resp_dma)
|
||||
ib_dma_unmap_single(device->ib_device,
|
||||
iser_conn->login_resp_dma,
|
||||
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
kfree(iser_conn->login_buf);
|
||||
|
||||
/* make sure we never redo any unmapping */
|
||||
iser_conn->login_req_dma = 0;
|
||||
iser_conn->login_resp_dma = 0;
|
||||
iser_conn->login_buf = NULL;
|
||||
}
|
||||
|
||||
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
|
||||
{
|
||||
struct iser_device *device = iser_conn->ib_conn.device;
|
||||
int req_err, resp_err;
|
||||
|
||||
BUG_ON(device == NULL);
|
||||
|
||||
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
|
||||
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
|
||||
if (!iser_conn->login_buf)
|
||||
goto out_err;
|
||||
|
||||
iser_conn->login_req_buf = iser_conn->login_buf;
|
||||
iser_conn->login_resp_buf = iser_conn->login_buf +
|
||||
ISCSI_DEF_MAX_RECV_SEG_LEN;
|
||||
|
||||
iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
|
||||
iser_conn->login_req_buf,
|
||||
ISCSI_DEF_MAX_RECV_SEG_LEN,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
|
||||
iser_conn->login_resp_buf,
|
||||
ISER_RX_LOGIN_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
req_err = ib_dma_mapping_error(device->ib_device,
|
||||
iser_conn->login_req_dma);
|
||||
resp_err = ib_dma_mapping_error(device->ib_device,
|
||||
iser_conn->login_resp_dma);
|
||||
|
||||
if (req_err || resp_err) {
|
||||
if (req_err)
|
||||
iser_conn->login_req_dma = 0;
|
||||
if (resp_err)
|
||||
iser_conn->login_resp_dma = 0;
|
||||
goto free_login_buf;
|
||||
}
|
||||
return 0;
|
||||
|
||||
free_login_buf:
|
||||
iser_free_login_buf(iser_conn);
|
||||
|
||||
out_err:
|
||||
iser_err("unable to alloc or map login buf\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
|
||||
struct iscsi_session *session)
|
||||
{
|
||||
int i, j;
|
||||
u64 dma_addr;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
struct ib_sge *rx_sg;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
iser_conn->qp_max_recv_dtos = session->cmds_max;
|
||||
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
|
||||
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
|
||||
|
||||
if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
|
||||
goto create_rdma_reg_res_failed;
|
||||
|
||||
if (iser_alloc_login_buf(iser_conn))
|
||||
goto alloc_login_buf_fail;
|
||||
|
||||
iser_conn->num_rx_descs = session->cmds_max;
|
||||
iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
|
||||
sizeof(struct iser_rx_desc), GFP_KERNEL);
|
||||
if (!iser_conn->rx_descs)
|
||||
goto rx_desc_alloc_fail;
|
||||
|
||||
rx_desc = iser_conn->rx_descs;
|
||||
|
||||
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
|
||||
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
|
||||
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
||||
if (ib_dma_mapping_error(device->ib_device, dma_addr))
|
||||
goto rx_desc_dma_map_failed;
|
||||
|
||||
rx_desc->dma_addr = dma_addr;
|
||||
|
||||
rx_sg = &rx_desc->rx_sg;
|
||||
rx_sg->addr = rx_desc->dma_addr;
|
||||
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
|
||||
rx_sg->lkey = device->mr->lkey;
|
||||
}
|
||||
|
||||
iser_conn->rx_desc_head = 0;
|
||||
return 0;
|
||||
|
||||
rx_desc_dma_map_failed:
|
||||
rx_desc = iser_conn->rx_descs;
|
||||
for (j = 0; j < i; j++, rx_desc++)
|
||||
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
|
||||
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
||||
kfree(iser_conn->rx_descs);
|
||||
iser_conn->rx_descs = NULL;
|
||||
rx_desc_alloc_fail:
|
||||
iser_free_login_buf(iser_conn);
|
||||
alloc_login_buf_fail:
|
||||
device->iser_free_rdma_reg_res(ib_conn);
|
||||
create_rdma_reg_res_failed:
|
||||
iser_err("failed allocating rx descriptors / data buffers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
|
||||
{
|
||||
int i;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
if (device->iser_free_rdma_reg_res)
|
||||
device->iser_free_rdma_reg_res(ib_conn);
|
||||
|
||||
rx_desc = iser_conn->rx_descs;
|
||||
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
|
||||
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
|
||||
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
||||
kfree(iser_conn->rx_descs);
|
||||
/* make sure we never redo any unmapping */
|
||||
iser_conn->rx_descs = NULL;
|
||||
|
||||
iser_free_login_buf(iser_conn);
|
||||
}
|
||||
|
||||
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct ib_conn *ib_conn = &iser_conn->ib_conn;
|
||||
struct iscsi_session *session = conn->session;
|
||||
|
||||
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
|
||||
/* check if this is the last login - going to full feature phase */
|
||||
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Check that there is one posted recv buffer
|
||||
* (for the last login response).
|
||||
*/
|
||||
WARN_ON(ib_conn->post_recv_buf_count != 1);
|
||||
|
||||
if (session->discovery_sess) {
|
||||
iser_info("Discovery session, re-using login RX buffer\n");
|
||||
return 0;
|
||||
} else
|
||||
iser_info("Normal session, posting batch of RX %d buffers\n",
|
||||
iser_conn->min_posted_rx);
|
||||
|
||||
/* Initial post receive buffers */
|
||||
if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool iser_signal_comp(u8 sig_count)
|
||||
{
|
||||
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_send_command - send command PDU
|
||||
*/
|
||||
int iser_send_command(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
unsigned long edtl;
|
||||
int err;
|
||||
struct iser_data_buf *data_buf, *prot_buf;
|
||||
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
|
||||
struct scsi_cmnd *sc = task->sc;
|
||||
struct iser_tx_desc *tx_desc = &iser_task->desc;
|
||||
u8 sig_count = ++iser_conn->ib_conn.sig_count;
|
||||
|
||||
edtl = ntohl(hdr->data_length);
|
||||
|
||||
/* build the tx desc regd header and add it to the tx desc dto */
|
||||
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
|
||||
iser_create_send_desc(iser_conn, tx_desc);
|
||||
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
|
||||
data_buf = &iser_task->data[ISER_DIR_IN];
|
||||
prot_buf = &iser_task->prot[ISER_DIR_IN];
|
||||
} else {
|
||||
data_buf = &iser_task->data[ISER_DIR_OUT];
|
||||
prot_buf = &iser_task->prot[ISER_DIR_OUT];
|
||||
}
|
||||
|
||||
if (scsi_sg_count(sc)) { /* using a scatter list */
|
||||
data_buf->buf = scsi_sglist(sc);
|
||||
data_buf->size = scsi_sg_count(sc);
|
||||
}
|
||||
data_buf->data_len = scsi_bufflen(sc);
|
||||
|
||||
if (scsi_prot_sg_count(sc)) {
|
||||
prot_buf->buf = scsi_prot_sglist(sc);
|
||||
prot_buf->size = scsi_prot_sg_count(sc);
|
||||
prot_buf->data_len = (data_buf->data_len >>
|
||||
ilog2(sc->device->sector_size)) * 8;
|
||||
}
|
||||
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
|
||||
err = iser_prepare_read_cmd(task);
|
||||
if (err)
|
||||
goto send_command_error;
|
||||
}
|
||||
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
|
||||
err = iser_prepare_write_cmd(task,
|
||||
task->imm_count,
|
||||
task->imm_count +
|
||||
task->unsol_r2t.data_length,
|
||||
edtl);
|
||||
if (err)
|
||||
goto send_command_error;
|
||||
}
|
||||
|
||||
iser_task->status = ISER_TASK_STATUS_STARTED;
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
|
||||
iser_signal_comp(sig_count));
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
send_command_error:
|
||||
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_send_data_out - send data out PDU
|
||||
*/
|
||||
int iser_send_data_out(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task,
|
||||
struct iscsi_data *hdr)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
struct iser_tx_desc *tx_desc = NULL;
|
||||
struct iser_regd_buf *regd_buf;
|
||||
unsigned long buf_offset;
|
||||
unsigned long data_seg_len;
|
||||
uint32_t itt;
|
||||
int err = 0;
|
||||
struct ib_sge *tx_dsg;
|
||||
|
||||
itt = (__force uint32_t)hdr->itt;
|
||||
data_seg_len = ntoh24(hdr->dlength);
|
||||
buf_offset = ntohl(hdr->offset);
|
||||
|
||||
iser_dbg("%s itt %d dseg_len %d offset %d\n",
|
||||
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
|
||||
|
||||
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
|
||||
if (tx_desc == NULL) {
|
||||
iser_err("Failed to alloc desc for post dataout\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tx_desc->type = ISCSI_TX_DATAOUT;
|
||||
tx_desc->iser_header.flags = ISER_VER;
|
||||
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
|
||||
|
||||
/* build the tx desc */
|
||||
iser_initialize_task_headers(task, tx_desc);
|
||||
|
||||
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
|
||||
tx_dsg = &tx_desc->tx_sg[1];
|
||||
tx_dsg->addr = regd_buf->reg.va + buf_offset;
|
||||
tx_dsg->length = data_seg_len;
|
||||
tx_dsg->lkey = regd_buf->reg.lkey;
|
||||
tx_desc->num_sge = 2;
|
||||
|
||||
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
|
||||
iser_err("Offset:%ld & DSL:%ld in Data-Out "
|
||||
"inconsistent with total len:%ld, itt:%d\n",
|
||||
buf_offset, data_seg_len,
|
||||
iser_task->data[ISER_DIR_OUT].data_len, itt);
|
||||
err = -EINVAL;
|
||||
goto send_data_out_error;
|
||||
}
|
||||
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
|
||||
itt, buf_offset, data_seg_len);
|
||||
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
send_data_out_error:
|
||||
kmem_cache_free(ig.desc_cache, tx_desc);
|
||||
iser_err("conn %p failed err %d\n",conn, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int iser_send_control(struct iscsi_conn *conn,
|
||||
struct iscsi_task *task)
|
||||
{
|
||||
struct iser_conn *iser_conn = conn->dd_data;
|
||||
struct iscsi_iser_task *iser_task = task->dd_data;
|
||||
struct iser_tx_desc *mdesc = &iser_task->desc;
|
||||
unsigned long data_seg_len;
|
||||
int err = 0;
|
||||
struct iser_device *device;
|
||||
|
||||
/* build the tx desc regd header and add it to the tx desc dto */
|
||||
mdesc->type = ISCSI_TX_CONTROL;
|
||||
iser_create_send_desc(iser_conn, mdesc);
|
||||
|
||||
device = iser_conn->ib_conn.device;
|
||||
|
||||
data_seg_len = ntoh24(task->hdr->dlength);
|
||||
|
||||
if (data_seg_len > 0) {
|
||||
struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
|
||||
if (task != conn->login_task) {
|
||||
iser_err("data present on non login task!!!\n");
|
||||
goto send_control_error;
|
||||
}
|
||||
|
||||
ib_dma_sync_single_for_cpu(device->ib_device,
|
||||
iser_conn->login_req_dma, task->data_count,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
memcpy(iser_conn->login_req_buf, task->data, task->data_count);
|
||||
|
||||
ib_dma_sync_single_for_device(device->ib_device,
|
||||
iser_conn->login_req_dma, task->data_count,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
tx_dsg->addr = iser_conn->login_req_dma;
|
||||
tx_dsg->length = task->data_count;
|
||||
tx_dsg->lkey = device->mr->lkey;
|
||||
mdesc->num_sge = 2;
|
||||
}
|
||||
|
||||
if (task == conn->login_task) {
|
||||
iser_dbg("op %x dsl %lx, posting login rx buffer\n",
|
||||
task->hdr->opcode, data_seg_len);
|
||||
err = iser_post_recvl(iser_conn);
|
||||
if (err)
|
||||
goto send_control_error;
|
||||
err = iser_post_rx_bufs(conn, task->hdr);
|
||||
if (err)
|
||||
goto send_control_error;
|
||||
}
|
||||
|
||||
err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
send_control_error:
|
||||
iser_err("conn %p failed err %d\n",conn, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_rcv_dto_completion - recv DTO completion
|
||||
*/
|
||||
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
|
||||
unsigned long rx_xfer_len,
|
||||
struct ib_conn *ib_conn)
|
||||
{
|
||||
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
|
||||
ib_conn);
|
||||
struct iscsi_hdr *hdr;
|
||||
u64 rx_dma;
|
||||
int rx_buflen, outstanding, count, err;
|
||||
|
||||
/* differentiate between login to all other PDUs */
|
||||
if ((char *)rx_desc == iser_conn->login_resp_buf) {
|
||||
rx_dma = iser_conn->login_resp_dma;
|
||||
rx_buflen = ISER_RX_LOGIN_SIZE;
|
||||
} else {
|
||||
rx_dma = rx_desc->dma_addr;
|
||||
rx_buflen = ISER_RX_PAYLOAD_SIZE;
|
||||
}
|
||||
|
||||
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
|
||||
rx_buflen, DMA_FROM_DEVICE);
|
||||
|
||||
hdr = &rx_desc->iscsi_header;
|
||||
|
||||
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
|
||||
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
|
||||
|
||||
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
|
||||
rx_xfer_len - ISER_HEADERS_LEN);
|
||||
|
||||
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
|
||||
rx_buflen, DMA_FROM_DEVICE);
|
||||
|
||||
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
|
||||
* task eliminates the need to worry on tasks which are completed in *
|
||||
* parallel to the execution of iser_conn_term. So the code that waits *
|
||||
* for the posted rx bufs refcount to become zero handles everything */
|
||||
ib_conn->post_recv_buf_count--;
|
||||
|
||||
if (rx_dma == iser_conn->login_resp_dma)
|
||||
return;
|
||||
|
||||
outstanding = ib_conn->post_recv_buf_count;
|
||||
if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
|
||||
count = min(iser_conn->qp_max_recv_dtos - outstanding,
|
||||
iser_conn->min_posted_rx);
|
||||
err = iser_post_recvm(iser_conn, count);
|
||||
if (err)
|
||||
iser_err("posting %d rx bufs err %d\n", count, err);
|
||||
}
|
||||
}
|
||||
|
||||
void iser_snd_completion(struct iser_tx_desc *tx_desc,
|
||||
struct ib_conn *ib_conn)
|
||||
{
|
||||
struct iscsi_task *task;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
|
||||
if (tx_desc->type == ISCSI_TX_DATAOUT) {
|
||||
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
|
||||
ISER_HEADERS_LEN, DMA_TO_DEVICE);
|
||||
kmem_cache_free(ig.desc_cache, tx_desc);
|
||||
tx_desc = NULL;
|
||||
}
|
||||
|
||||
if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
|
||||
/* this arithmetic is legal by libiscsi dd_data allocation */
|
||||
task = (void *) ((long)(void *)tx_desc -
|
||||
sizeof(struct iscsi_task));
|
||||
if (task->hdr->itt == RESERVED_ITT)
|
||||
iscsi_put_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
|
||||
|
||||
{
|
||||
iser_task->status = ISER_TASK_STATUS_INIT;
|
||||
|
||||
iser_task->dir[ISER_DIR_IN] = 0;
|
||||
iser_task->dir[ISER_DIR_OUT] = 0;
|
||||
|
||||
iser_task->data[ISER_DIR_IN].data_len = 0;
|
||||
iser_task->data[ISER_DIR_OUT].data_len = 0;
|
||||
|
||||
iser_task->prot[ISER_DIR_IN].data_len = 0;
|
||||
iser_task->prot[ISER_DIR_OUT].data_len = 0;
|
||||
|
||||
memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
|
||||
sizeof(struct iser_regd_buf));
|
||||
memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
|
||||
sizeof(struct iser_regd_buf));
|
||||
}
|
||||
|
||||
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
|
||||
{
|
||||
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
|
||||
int is_rdma_data_aligned = 1;
|
||||
int is_rdma_prot_aligned = 1;
|
||||
int prot_count = scsi_prot_sg_count(iser_task->sc);
|
||||
|
||||
/* if we were reading, copy back to unaligned sglist,
|
||||
* anyway dma_unmap and free the copy
|
||||
*/
|
||||
if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
|
||||
is_rdma_data_aligned = 0;
|
||||
iser_finalize_rdma_unaligned_sg(iser_task,
|
||||
&iser_task->data[ISER_DIR_IN],
|
||||
&iser_task->data_copy[ISER_DIR_IN],
|
||||
ISER_DIR_IN);
|
||||
}
|
||||
|
||||
if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
|
||||
is_rdma_data_aligned = 0;
|
||||
iser_finalize_rdma_unaligned_sg(iser_task,
|
||||
&iser_task->data[ISER_DIR_OUT],
|
||||
&iser_task->data_copy[ISER_DIR_OUT],
|
||||
ISER_DIR_OUT);
|
||||
}
|
||||
|
||||
if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
|
||||
is_rdma_prot_aligned = 0;
|
||||
iser_finalize_rdma_unaligned_sg(iser_task,
|
||||
&iser_task->prot[ISER_DIR_IN],
|
||||
&iser_task->prot_copy[ISER_DIR_IN],
|
||||
ISER_DIR_IN);
|
||||
}
|
||||
|
||||
if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
|
||||
is_rdma_prot_aligned = 0;
|
||||
iser_finalize_rdma_unaligned_sg(iser_task,
|
||||
&iser_task->prot[ISER_DIR_OUT],
|
||||
&iser_task->prot_copy[ISER_DIR_OUT],
|
||||
ISER_DIR_OUT);
|
||||
}
|
||||
|
||||
if (iser_task->dir[ISER_DIR_IN]) {
|
||||
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
|
||||
if (is_rdma_data_aligned)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->data[ISER_DIR_IN],
|
||||
DMA_FROM_DEVICE);
|
||||
if (prot_count && is_rdma_prot_aligned)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->prot[ISER_DIR_IN],
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
if (iser_task->dir[ISER_DIR_OUT]) {
|
||||
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
|
||||
if (is_rdma_data_aligned)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->data[ISER_DIR_OUT],
|
||||
DMA_TO_DEVICE);
|
||||
if (prot_count && is_rdma_prot_aligned)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->prot[ISER_DIR_OUT],
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
800
drivers/infiniband/ulp/iser/iser_memory.c
Normal file
800
drivers/infiniband/ulp/iser/iser_memory.c
Normal file
|
@ -0,0 +1,800 @@
|
|||
/*
|
||||
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "iscsi_iser.h"
|
||||
|
||||
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
|
||||
|
||||
/**
|
||||
* iser_start_rdma_unaligned_sg
|
||||
*/
|
||||
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
struct iser_data_buf *data_copy,
|
||||
enum iser_data_dir cmd_dir)
|
||||
{
|
||||
struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
||||
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sg;
|
||||
char *mem = NULL;
|
||||
unsigned long cmd_data_len = 0;
|
||||
int dma_nents, i;
|
||||
|
||||
for_each_sg(sgl, sg, data->size, i)
|
||||
cmd_data_len += ib_sg_dma_len(dev, sg);
|
||||
|
||||
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
|
||||
mem = (void *)__get_free_pages(GFP_ATOMIC,
|
||||
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
|
||||
else
|
||||
mem = kmalloc(cmd_data_len, GFP_ATOMIC);
|
||||
|
||||
if (mem == NULL) {
|
||||
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
|
||||
data->size, (int)cmd_data_len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (cmd_dir == ISER_DIR_OUT) {
|
||||
/* copy the unaligned sg the buffer which is used for RDMA */
|
||||
int i;
|
||||
char *p, *from;
|
||||
|
||||
sgl = (struct scatterlist *)data->buf;
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, data->size, i) {
|
||||
from = kmap_atomic(sg_page(sg));
|
||||
memcpy(p,
|
||||
from + sg->offset,
|
||||
sg->length);
|
||||
kunmap_atomic(from);
|
||||
p += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
|
||||
data_copy->buf = &data_copy->sg_single;
|
||||
data_copy->size = 1;
|
||||
data_copy->copy_buf = mem;
|
||||
|
||||
dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
|
||||
(cmd_dir == ISER_DIR_OUT) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
BUG_ON(dma_nents == 0);
|
||||
|
||||
data_copy->dma_nents = dma_nents;
|
||||
data_copy->data_len = cmd_data_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_finalize_rdma_unaligned_sg
|
||||
*/
|
||||
|
||||
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
struct iser_data_buf *data_copy,
|
||||
enum iser_data_dir cmd_dir)
|
||||
{
|
||||
struct ib_device *dev;
|
||||
unsigned long cmd_data_len;
|
||||
|
||||
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
||||
|
||||
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
|
||||
(cmd_dir == ISER_DIR_OUT) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
|
||||
if (cmd_dir == ISER_DIR_IN) {
|
||||
char *mem;
|
||||
struct scatterlist *sgl, *sg;
|
||||
unsigned char *p, *to;
|
||||
unsigned int sg_size;
|
||||
int i;
|
||||
|
||||
/* copy back read RDMA to unaligned sg */
|
||||
mem = data_copy->copy_buf;
|
||||
|
||||
sgl = (struct scatterlist *)data->buf;
|
||||
sg_size = data->size;
|
||||
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, sg_size, i) {
|
||||
to = kmap_atomic(sg_page(sg));
|
||||
memcpy(to + sg->offset,
|
||||
p,
|
||||
sg->length);
|
||||
kunmap_atomic(to);
|
||||
p += sg->length;
|
||||
}
|
||||
}
|
||||
|
||||
cmd_data_len = data->data_len;
|
||||
|
||||
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
|
||||
free_pages((unsigned long)data_copy->copy_buf,
|
||||
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
|
||||
else
|
||||
kfree(data_copy->copy_buf);
|
||||
|
||||
data_copy->copy_buf = NULL;
|
||||
}
|
||||
|
||||
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
|
||||
|
||||
/**
|
||||
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
|
||||
* and returns the length of resulting physical address array (may be less than
|
||||
* the original due to possible compaction).
|
||||
*
|
||||
* we build a "page vec" under the assumption that the SG meets the RDMA
|
||||
* alignment requirements. Other then the first and last SG elements, all
|
||||
* the "internal" elements can be compacted into a list whose elements are
|
||||
* dma addresses of physical pages. The code supports also the weird case
|
||||
* where --few fragments of the same page-- are present in the SG as
|
||||
* consecutive elements. Also, it handles one entry SG.
|
||||
*/
|
||||
|
||||
static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
||||
struct ib_device *ibdev, u64 *pages,
|
||||
int *offset, int *data_size)
|
||||
{
|
||||
struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
|
||||
u64 start_addr, end_addr, page, chunk_start = 0;
|
||||
unsigned long total_sz = 0;
|
||||
unsigned int dma_len;
|
||||
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
|
||||
|
||||
/* compute the offset of first element */
|
||||
*offset = (u64) sgl[0].offset & ~MASK_4K;
|
||||
|
||||
new_chunk = 1;
|
||||
cur_page = 0;
|
||||
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||
start_addr = ib_sg_dma_address(ibdev, sg);
|
||||
if (new_chunk)
|
||||
chunk_start = start_addr;
|
||||
dma_len = ib_sg_dma_len(ibdev, sg);
|
||||
end_addr = start_addr + dma_len;
|
||||
total_sz += dma_len;
|
||||
|
||||
/* collect page fragments until aligned or end of SG list */
|
||||
if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
|
||||
new_chunk = 0;
|
||||
continue;
|
||||
}
|
||||
new_chunk = 1;
|
||||
|
||||
/* address of the first page in the contiguous chunk;
|
||||
masking relevant for the very first SG entry,
|
||||
which might be unaligned */
|
||||
page = chunk_start & MASK_4K;
|
||||
do {
|
||||
pages[cur_page++] = page;
|
||||
page += SIZE_4K;
|
||||
} while (page < end_addr);
|
||||
}
|
||||
|
||||
*data_size = total_sz;
|
||||
iser_dbg("page_vec->data_size:%d cur_page %d\n",
|
||||
*data_size, cur_page);
|
||||
return cur_page;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
|
||||
* for RDMA sub-list of a scatter-gather list of memory buffers, and returns
|
||||
* the number of entries which are aligned correctly. Supports the case where
|
||||
* consecutive SG elements are actually fragments of the same physcial page.
|
||||
*/
|
||||
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
struct scatterlist *sgl, *sg, *next_sg = NULL;
|
||||
u64 start_addr, end_addr;
|
||||
int i, ret_len, start_check = 0;
|
||||
|
||||
if (data->dma_nents == 1)
|
||||
return 1;
|
||||
|
||||
sgl = (struct scatterlist *)data->buf;
|
||||
start_addr = ib_sg_dma_address(ibdev, sgl);
|
||||
|
||||
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||
if (start_check && !IS_4K_ALIGNED(start_addr))
|
||||
break;
|
||||
|
||||
next_sg = sg_next(sg);
|
||||
if (!next_sg)
|
||||
break;
|
||||
|
||||
end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
|
||||
start_addr = ib_sg_dma_address(ibdev, next_sg);
|
||||
|
||||
if (end_addr == start_addr) {
|
||||
start_check = 0;
|
||||
continue;
|
||||
} else
|
||||
start_check = 1;
|
||||
|
||||
if (!IS_4K_ALIGNED(end_addr))
|
||||
break;
|
||||
}
|
||||
ret_len = (next_sg) ? i : i+1;
|
||||
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
|
||||
ret_len, data->dma_nents, data);
|
||||
return ret_len;
|
||||
}
|
||||
|
||||
static void iser_data_buf_dump(struct iser_data_buf *data,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, data->dma_nents, i)
|
||||
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||
sg_page(sg), sg->offset,
|
||||
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||
}
|
||||
|
||||
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
||||
{
|
||||
int i;
|
||||
|
||||
iser_err("page vec length %d data size %d\n",
|
||||
page_vec->length, page_vec->data_size);
|
||||
for (i = 0; i < page_vec->length; i++)
|
||||
iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
|
||||
}
|
||||
|
||||
static void iser_page_vec_build(struct iser_data_buf *data,
|
||||
struct iser_page_vec *page_vec,
|
||||
struct ib_device *ibdev)
|
||||
{
|
||||
int page_vec_len = 0;
|
||||
|
||||
page_vec->length = 0;
|
||||
page_vec->offset = 0;
|
||||
|
||||
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
|
||||
page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
|
||||
&page_vec->offset,
|
||||
&page_vec->data_size);
|
||||
iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
|
||||
|
||||
page_vec->length = page_vec_len;
|
||||
|
||||
if (page_vec_len * SIZE_4K < page_vec->data_size) {
|
||||
iser_err("page_vec too short to hold this SG\n");
|
||||
iser_data_buf_dump(data, ibdev);
|
||||
iser_dump_page_vec(page_vec);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct ib_device *dev;
|
||||
|
||||
iser_task->dir[iser_dir] = 1;
|
||||
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
||||
|
||||
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
|
||||
if (data->dma_nents == 0) {
|
||||
iser_err("dma_map_sg failed!!!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct ib_device *dev;
|
||||
|
||||
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
||||
ib_dma_unmap_sg(dev, data->buf, data->size, dir);
|
||||
}
|
||||
|
||||
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
|
||||
struct ib_device *ibdev,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_data_buf *mem_copy,
|
||||
enum iser_data_dir cmd_dir,
|
||||
int aligned_len)
|
||||
{
|
||||
struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
|
||||
|
||||
iscsi_conn->fmr_unalign_cnt++;
|
||||
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
|
||||
aligned_len, mem->size);
|
||||
|
||||
if (iser_debug_level > 0)
|
||||
iser_data_buf_dump(mem, ibdev);
|
||||
|
||||
/* unmap the command data before accessing it */
|
||||
iser_dma_unmap_task_data(iser_task, mem,
|
||||
(cmd_dir == ISER_DIR_OUT) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
|
||||
/* allocate copy buf, if we are writing, copy the */
|
||||
/* unaligned scatterlist, dma map the copy */
|
||||
if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
|
||||
* using FMR (if possible) obtaining rkey and va
|
||||
*
|
||||
* returns 0 on success, errno code on failure
|
||||
*/
|
||||
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
struct ib_device *ibdev = device->ib_device;
|
||||
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
|
||||
struct iser_regd_buf *regd_buf;
|
||||
int aligned_len;
|
||||
int err;
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
regd_buf = &iser_task->rdma_regd[cmd_dir];
|
||||
|
||||
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
||||
if (aligned_len != mem->dma_nents) {
|
||||
err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
||||
&iser_task->data_copy[cmd_dir],
|
||||
cmd_dir, aligned_len);
|
||||
if (err) {
|
||||
iser_err("failed to allocate bounce buffer\n");
|
||||
return err;
|
||||
}
|
||||
mem = &iser_task->data_copy[cmd_dir];
|
||||
}
|
||||
|
||||
/* if there a single dma entry, FMR is not needed */
|
||||
if (mem->dma_nents == 1) {
|
||||
sg = (struct scatterlist *)mem->buf;
|
||||
|
||||
regd_buf->reg.lkey = device->mr->lkey;
|
||||
regd_buf->reg.rkey = device->mr->rkey;
|
||||
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
|
||||
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
|
||||
regd_buf->reg.is_mr = 0;
|
||||
|
||||
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
|
||||
"va: 0x%08lX sz: %ld]\n",
|
||||
(unsigned int)regd_buf->reg.lkey,
|
||||
(unsigned int)regd_buf->reg.rkey,
|
||||
(unsigned long)regd_buf->reg.va,
|
||||
(unsigned long)regd_buf->reg.len);
|
||||
} else { /* use FMR for multiple dma entries */
|
||||
iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
|
||||
err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
|
||||
®d_buf->reg);
|
||||
if (err && err != -EAGAIN) {
|
||||
iser_data_buf_dump(mem, ibdev);
|
||||
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
|
||||
mem->dma_nents,
|
||||
ntoh24(iser_task->desc.iscsi_header.dlength));
|
||||
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
|
||||
ib_conn->fmr.page_vec->data_size,
|
||||
ib_conn->fmr.page_vec->length,
|
||||
ib_conn->fmr.page_vec->offset);
|
||||
for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
|
||||
iser_err("page_vec[%d] = 0x%llx\n", i,
|
||||
(unsigned long long)ib_conn->fmr.page_vec->pages[i]);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
|
||||
struct ib_sig_domain *domain)
|
||||
{
|
||||
domain->sig_type = IB_SIG_TYPE_T10_DIF;
|
||||
domain->sig.dif.pi_interval = sc->device->sector_size;
|
||||
domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
|
||||
/*
|
||||
* At the moment we hard code those, but in the future
|
||||
* we will take them from sc.
|
||||
*/
|
||||
domain->sig.dif.apptag_check_mask = 0xffff;
|
||||
domain->sig.dif.app_escape = true;
|
||||
domain->sig.dif.ref_escape = true;
|
||||
if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
|
||||
scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
|
||||
domain->sig.dif.ref_remap = true;
|
||||
};
|
||||
|
||||
static int
|
||||
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
|
||||
{
|
||||
switch (scsi_get_prot_op(sc)) {
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
case SCSI_PROT_READ_STRIP:
|
||||
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
|
||||
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
|
||||
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
|
||||
break;
|
||||
case SCSI_PROT_READ_INSERT:
|
||||
case SCSI_PROT_WRITE_STRIP:
|
||||
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
|
||||
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
|
||||
/*
|
||||
* At the moment we use this modparam to tell what is
|
||||
* the memory bg_type, in the future we will take it
|
||||
* from sc.
|
||||
*/
|
||||
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
|
||||
IB_T10DIF_CRC;
|
||||
break;
|
||||
case SCSI_PROT_READ_PASS:
|
||||
case SCSI_PROT_WRITE_PASS:
|
||||
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
|
||||
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
|
||||
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
|
||||
/*
|
||||
* At the moment we use this modparam to tell what is
|
||||
* the memory bg_type, in the future we will take it
|
||||
* from sc.
|
||||
*/
|
||||
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
|
||||
IB_T10DIF_CRC;
|
||||
break;
|
||||
default:
|
||||
iser_err("Unsupported PI operation %d\n",
|
||||
scsi_get_prot_op(sc));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
|
||||
{
|
||||
switch (scsi_get_prot_type(sc)) {
|
||||
case SCSI_PROT_DIF_TYPE0:
|
||||
break;
|
||||
case SCSI_PROT_DIF_TYPE1:
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
*mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
|
||||
break;
|
||||
case SCSI_PROT_DIF_TYPE3:
|
||||
*mask = ISER_CHECK_GUARD;
|
||||
break;
|
||||
default:
|
||||
iser_err("Unsupported protection type %d\n",
|
||||
scsi_get_prot_type(sc));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
||||
struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
|
||||
struct ib_sge *prot_sge, struct ib_sge *sig_sge)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
||||
struct iser_pi_context *pi_ctx = desc->pi_ctx;
|
||||
struct ib_send_wr sig_wr, inv_wr;
|
||||
struct ib_send_wr *bad_wr, *wr = NULL;
|
||||
struct ib_sig_attrs sig_attrs;
|
||||
int ret;
|
||||
u32 key;
|
||||
|
||||
memset(&sig_attrs, 0, sizeof(sig_attrs));
|
||||
ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
|
||||
memset(&inv_wr, 0, sizeof(inv_wr));
|
||||
inv_wr.opcode = IB_WR_LOCAL_INV;
|
||||
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
|
||||
inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
|
||||
wr = &inv_wr;
|
||||
/* Bump the key */
|
||||
key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
|
||||
ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
|
||||
}
|
||||
|
||||
memset(&sig_wr, 0, sizeof(sig_wr));
|
||||
sig_wr.opcode = IB_WR_REG_SIG_MR;
|
||||
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
|
||||
sig_wr.sg_list = data_sge;
|
||||
sig_wr.num_sge = 1;
|
||||
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
|
||||
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
|
||||
if (scsi_prot_sg_count(iser_task->sc))
|
||||
sig_wr.wr.sig_handover.prot = prot_sge;
|
||||
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_READ |
|
||||
IB_ACCESS_REMOTE_WRITE;
|
||||
|
||||
if (!wr)
|
||||
wr = &sig_wr;
|
||||
else
|
||||
wr->next = &sig_wr;
|
||||
|
||||
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
|
||||
if (ret) {
|
||||
iser_err("reg_sig_mr failed, ret:%d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
|
||||
|
||||
sig_sge->lkey = pi_ctx->sig_mr->lkey;
|
||||
sig_sge->addr = 0;
|
||||
sig_sge->length = data_sge->length + prot_sge->length;
|
||||
if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
|
||||
scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
|
||||
sig_sge->length += (data_sge->length /
|
||||
iser_task->sc->device->sector_size) * 8;
|
||||
}
|
||||
|
||||
iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
|
||||
sig_sge->addr, sig_sge->length,
|
||||
sig_sge->lkey);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
||||
struct iser_regd_buf *regd_buf,
|
||||
struct iser_data_buf *mem,
|
||||
enum iser_reg_indicator ind,
|
||||
struct ib_sge *sge)
|
||||
{
|
||||
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
|
||||
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
struct ib_device *ibdev = device->ib_device;
|
||||
struct ib_mr *mr;
|
||||
struct ib_fast_reg_page_list *frpl;
|
||||
struct ib_send_wr fastreg_wr, inv_wr;
|
||||
struct ib_send_wr *bad_wr, *wr = NULL;
|
||||
u8 key;
|
||||
int ret, offset, size, plen;
|
||||
|
||||
/* if there a single dma entry, dma mr suffices */
|
||||
if (mem->dma_nents == 1) {
|
||||
struct scatterlist *sg = (struct scatterlist *)mem->buf;
|
||||
|
||||
sge->lkey = device->mr->lkey;
|
||||
sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
|
||||
sge->length = ib_sg_dma_len(ibdev, &sg[0]);
|
||||
|
||||
iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
|
||||
sge->lkey, sge->addr, sge->length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ind == ISER_DATA_KEY_VALID) {
|
||||
mr = desc->data_mr;
|
||||
frpl = desc->data_frpl;
|
||||
} else {
|
||||
mr = desc->pi_ctx->prot_mr;
|
||||
frpl = desc->pi_ctx->prot_frpl;
|
||||
}
|
||||
|
||||
plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
|
||||
&offset, &size);
|
||||
if (plen * SIZE_4K < size) {
|
||||
iser_err("fast reg page_list too short to hold this SG\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(desc->reg_indicators & ind)) {
|
||||
memset(&inv_wr, 0, sizeof(inv_wr));
|
||||
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
|
||||
inv_wr.opcode = IB_WR_LOCAL_INV;
|
||||
inv_wr.ex.invalidate_rkey = mr->rkey;
|
||||
wr = &inv_wr;
|
||||
/* Bump the key */
|
||||
key = (u8)(mr->rkey & 0x000000FF);
|
||||
ib_update_fast_reg_key(mr, ++key);
|
||||
}
|
||||
|
||||
/* Prepare FASTREG WR */
|
||||
memset(&fastreg_wr, 0, sizeof(fastreg_wr));
|
||||
fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
|
||||
fastreg_wr.opcode = IB_WR_FAST_REG_MR;
|
||||
fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
|
||||
fastreg_wr.wr.fast_reg.page_list = frpl;
|
||||
fastreg_wr.wr.fast_reg.page_list_len = plen;
|
||||
fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
|
||||
fastreg_wr.wr.fast_reg.length = size;
|
||||
fastreg_wr.wr.fast_reg.rkey = mr->rkey;
|
||||
fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_READ);
|
||||
|
||||
if (!wr)
|
||||
wr = &fastreg_wr;
|
||||
else
|
||||
wr->next = &fastreg_wr;
|
||||
|
||||
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
|
||||
if (ret) {
|
||||
iser_err("fast registration failed, ret:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
desc->reg_indicators &= ~ind;
|
||||
|
||||
sge->lkey = mr->lkey;
|
||||
sge->addr = frpl->page_list[0] + offset;
|
||||
sge->length = size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
|
||||
* using Fast Registration WR (if possible) obtaining rkey and va
|
||||
*
|
||||
* returns 0 on success, errno code on failure
|
||||
*/
|
||||
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
|
||||
enum iser_data_dir cmd_dir)
|
||||
{
|
||||
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
struct ib_device *ibdev = device->ib_device;
|
||||
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
|
||||
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
|
||||
struct fast_reg_descriptor *desc = NULL;
|
||||
struct ib_sge data_sge;
|
||||
int err, aligned_len;
|
||||
unsigned long flags;
|
||||
|
||||
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
||||
if (aligned_len != mem->dma_nents) {
|
||||
err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
||||
&iser_task->data_copy[cmd_dir],
|
||||
cmd_dir, aligned_len);
|
||||
if (err) {
|
||||
iser_err("failed to allocate bounce buffer\n");
|
||||
return err;
|
||||
}
|
||||
mem = &iser_task->data_copy[cmd_dir];
|
||||
}
|
||||
|
||||
if (mem->dma_nents != 1 ||
|
||||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
|
||||
spin_lock_irqsave(&ib_conn->lock, flags);
|
||||
desc = list_first_entry(&ib_conn->fastreg.pool,
|
||||
struct fast_reg_descriptor, list);
|
||||
list_del(&desc->list);
|
||||
spin_unlock_irqrestore(&ib_conn->lock, flags);
|
||||
regd_buf->reg.mem_h = desc;
|
||||
}
|
||||
|
||||
err = iser_fast_reg_mr(iser_task, regd_buf, mem,
|
||||
ISER_DATA_KEY_VALID, &data_sge);
|
||||
if (err)
|
||||
goto err_reg;
|
||||
|
||||
if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
|
||||
struct ib_sge prot_sge, sig_sge;
|
||||
|
||||
memset(&prot_sge, 0, sizeof(prot_sge));
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
mem = &iser_task->prot[cmd_dir];
|
||||
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
||||
if (aligned_len != mem->dma_nents) {
|
||||
err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
||||
&iser_task->prot_copy[cmd_dir],
|
||||
cmd_dir, aligned_len);
|
||||
if (err) {
|
||||
iser_err("failed to allocate bounce buffer\n");
|
||||
return err;
|
||||
}
|
||||
mem = &iser_task->prot_copy[cmd_dir];
|
||||
}
|
||||
|
||||
err = iser_fast_reg_mr(iser_task, regd_buf, mem,
|
||||
ISER_PROT_KEY_VALID, &prot_sge);
|
||||
if (err)
|
||||
goto err_reg;
|
||||
}
|
||||
|
||||
err = iser_reg_sig_mr(iser_task, desc, &data_sge,
|
||||
&prot_sge, &sig_sge);
|
||||
if (err) {
|
||||
iser_err("Failed to register signature mr\n");
|
||||
return err;
|
||||
}
|
||||
desc->reg_indicators |= ISER_FASTREG_PROTECTED;
|
||||
|
||||
regd_buf->reg.lkey = sig_sge.lkey;
|
||||
regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
|
||||
regd_buf->reg.va = sig_sge.addr;
|
||||
regd_buf->reg.len = sig_sge.length;
|
||||
regd_buf->reg.is_mr = 1;
|
||||
} else {
|
||||
if (desc) {
|
||||
regd_buf->reg.rkey = desc->data_mr->rkey;
|
||||
regd_buf->reg.is_mr = 1;
|
||||
} else {
|
||||
regd_buf->reg.rkey = device->mr->rkey;
|
||||
regd_buf->reg.is_mr = 0;
|
||||
}
|
||||
|
||||
regd_buf->reg.lkey = data_sge.lkey;
|
||||
regd_buf->reg.va = data_sge.addr;
|
||||
regd_buf->reg.len = data_sge.length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_reg:
|
||||
if (desc) {
|
||||
spin_lock_irqsave(&ib_conn->lock, flags);
|
||||
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
|
||||
spin_unlock_irqrestore(&ib_conn->lock, flags);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
1313
drivers/infiniband/ulp/iser/iser_verbs.c
Normal file
1313
drivers/infiniband/ulp/iser/iser_verbs.c
Normal file
File diff suppressed because it is too large
Load diff
5
drivers/infiniband/ulp/isert/Kconfig
Normal file
5
drivers/infiniband/ulp/isert/Kconfig
Normal file
|
@ -0,0 +1,5 @@
|
|||
config INFINIBAND_ISERT
|
||||
tristate "iSCSI Extensions for RDMA (iSER) target support"
|
||||
depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
|
||||
---help---
|
||||
Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
|
2
drivers/infiniband/ulp/isert/Makefile
Normal file
2
drivers/infiniband/ulp/isert/Makefile
Normal file
|
@ -0,0 +1,2 @@
|
|||
ccflags-y := -Idrivers/target -Idrivers/target/iscsi
|
||||
obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
|
3505
drivers/infiniband/ulp/isert/ib_isert.c
Normal file
3505
drivers/infiniband/ulp/isert/ib_isert.c
Normal file
File diff suppressed because it is too large
Load diff
201
drivers/infiniband/ulp/isert/ib_isert.h
Normal file
201
drivers/infiniband/ulp/isert/ib_isert.h
Normal file
|
@ -0,0 +1,201 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/rdma_cm.h>
|
||||
|
||||
#define ISERT_RDMA_LISTEN_BACKLOG 10
|
||||
#define ISCSI_ISER_SG_TABLESIZE 256
|
||||
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
|
||||
|
||||
enum isert_desc_type {
|
||||
ISCSI_TX_CONTROL,
|
||||
ISCSI_TX_DATAIN
|
||||
};
|
||||
|
||||
enum iser_ib_op_code {
|
||||
ISER_IB_RECV,
|
||||
ISER_IB_SEND,
|
||||
ISER_IB_RDMA_WRITE,
|
||||
ISER_IB_RDMA_READ,
|
||||
};
|
||||
|
||||
enum iser_conn_state {
|
||||
ISER_CONN_INIT,
|
||||
ISER_CONN_UP,
|
||||
ISER_CONN_FULL_FEATURE,
|
||||
ISER_CONN_TERMINATING,
|
||||
ISER_CONN_DOWN,
|
||||
};
|
||||
|
||||
struct iser_rx_desc {
|
||||
struct iser_hdr iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
char data[ISER_RECV_DATA_SEG_LEN];
|
||||
u64 dma_addr;
|
||||
struct ib_sge rx_sg;
|
||||
char pad[ISER_RX_PAD_SIZE];
|
||||
} __packed;
|
||||
|
||||
struct iser_tx_desc {
|
||||
struct iser_hdr iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
enum isert_desc_type type;
|
||||
u64 dma_addr;
|
||||
struct ib_sge tx_sg[2];
|
||||
int num_sge;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct llist_node *comp_llnode_batch;
|
||||
struct llist_node comp_llnode;
|
||||
bool llnode_active;
|
||||
struct ib_send_wr send_wr;
|
||||
} __packed;
|
||||
|
||||
enum isert_indicator {
|
||||
ISERT_PROTECTED = 1 << 0,
|
||||
ISERT_DATA_KEY_VALID = 1 << 1,
|
||||
ISERT_PROT_KEY_VALID = 1 << 2,
|
||||
ISERT_SIG_KEY_VALID = 1 << 3,
|
||||
};
|
||||
|
||||
struct pi_context {
|
||||
struct ib_mr *prot_mr;
|
||||
struct ib_fast_reg_page_list *prot_frpl;
|
||||
struct ib_mr *sig_mr;
|
||||
};
|
||||
|
||||
struct fast_reg_descriptor {
|
||||
struct list_head list;
|
||||
struct ib_mr *data_mr;
|
||||
struct ib_fast_reg_page_list *data_frpl;
|
||||
u8 ind;
|
||||
struct pi_context *pi_ctx;
|
||||
};
|
||||
|
||||
struct isert_data_buf {
|
||||
struct scatterlist *sg;
|
||||
int nents;
|
||||
u32 sg_off;
|
||||
u32 len; /* cur_rdma_length */
|
||||
u32 offset;
|
||||
unsigned int dma_nents;
|
||||
enum dma_data_direction dma_dir;
|
||||
};
|
||||
|
||||
enum {
|
||||
DATA = 0,
|
||||
PROT = 1,
|
||||
SIG = 2,
|
||||
};
|
||||
|
||||
struct isert_rdma_wr {
|
||||
struct list_head wr_list;
|
||||
struct isert_cmd *isert_cmd;
|
||||
enum iser_ib_op_code iser_ib_op;
|
||||
struct ib_sge *ib_sge;
|
||||
struct ib_sge s_ib_sge;
|
||||
int send_wr_num;
|
||||
struct ib_send_wr *send_wr;
|
||||
struct ib_send_wr s_send_wr;
|
||||
struct ib_sge ib_sg[3];
|
||||
struct isert_data_buf data;
|
||||
struct isert_data_buf prot;
|
||||
struct fast_reg_descriptor *fr_desc;
|
||||
};
|
||||
|
||||
struct isert_cmd {
|
||||
uint32_t read_stag;
|
||||
uint32_t write_stag;
|
||||
uint64_t read_va;
|
||||
uint64_t write_va;
|
||||
u64 pdu_buf_dma;
|
||||
u32 pdu_buf_len;
|
||||
u32 read_va_off;
|
||||
u32 write_va_off;
|
||||
u32 rdma_wr_num;
|
||||
struct isert_conn *conn;
|
||||
struct iscsi_cmd *iscsi_cmd;
|
||||
struct iser_tx_desc tx_desc;
|
||||
struct isert_rdma_wr rdma_wr;
|
||||
struct work_struct comp_work;
|
||||
};
|
||||
|
||||
struct isert_device;
|
||||
|
||||
struct isert_conn {
|
||||
enum iser_conn_state state;
|
||||
int post_recv_buf_count;
|
||||
atomic_t post_send_buf_count;
|
||||
u32 responder_resources;
|
||||
u32 initiator_depth;
|
||||
bool pi_support;
|
||||
u32 max_sge;
|
||||
char *login_buf;
|
||||
char *login_req_buf;
|
||||
char *login_rsp_buf;
|
||||
u64 login_req_dma;
|
||||
int login_req_len;
|
||||
u64 login_rsp_dma;
|
||||
unsigned int conn_rx_desc_head;
|
||||
struct iser_rx_desc *conn_rx_descs;
|
||||
struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX];
|
||||
struct iscsi_conn *conn;
|
||||
struct list_head conn_accept_node;
|
||||
struct completion conn_login_comp;
|
||||
struct completion login_req_comp;
|
||||
struct iser_tx_desc conn_login_tx_desc;
|
||||
struct rdma_cm_id *conn_cm_id;
|
||||
struct ib_pd *conn_pd;
|
||||
struct ib_mr *conn_mr;
|
||||
struct ib_qp *conn_qp;
|
||||
struct isert_device *conn_device;
|
||||
struct mutex conn_mutex;
|
||||
struct completion conn_wait;
|
||||
struct completion conn_wait_comp_err;
|
||||
struct kref conn_kref;
|
||||
struct list_head conn_fr_pool;
|
||||
int conn_fr_pool_size;
|
||||
/* lock to protect fastreg pool */
|
||||
spinlock_t conn_lock;
|
||||
struct work_struct release_work;
|
||||
#define ISERT_COMP_BATCH_COUNT 8
|
||||
int conn_comp_batch;
|
||||
struct llist_head conn_comp_llist;
|
||||
};
|
||||
|
||||
#define ISERT_MAX_CQ 64
|
||||
|
||||
struct isert_cq_desc {
|
||||
struct isert_device *device;
|
||||
int cq_index;
|
||||
struct work_struct cq_rx_work;
|
||||
struct work_struct cq_tx_work;
|
||||
};
|
||||
|
||||
struct isert_device {
|
||||
int use_fastreg;
|
||||
bool pi_capable;
|
||||
int cqs_used;
|
||||
int refcount;
|
||||
int cq_active_qps[ISERT_MAX_CQ];
|
||||
struct ib_device *ib_device;
|
||||
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
|
||||
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
|
||||
struct isert_cq_desc *cq_desc;
|
||||
struct list_head dev_node;
|
||||
struct ib_device_attr dev_attr;
|
||||
int (*reg_rdma_mem)(struct iscsi_conn *conn,
|
||||
struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr);
|
||||
void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
|
||||
struct isert_conn *isert_conn);
|
||||
};
|
||||
|
||||
struct isert_np {
|
||||
struct iscsi_np *np;
|
||||
struct semaphore np_sem;
|
||||
struct rdma_cm_id *np_cm_id;
|
||||
struct mutex np_accept_mutex;
|
||||
struct list_head np_accept_list;
|
||||
struct completion np_login_comp;
|
||||
};
|
47
drivers/infiniband/ulp/isert/isert_proto.h
Normal file
47
drivers/infiniband/ulp/isert/isert_proto.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
/* From iscsi_iser.h */
|
||||
|
||||
struct iser_hdr {
|
||||
u8 flags;
|
||||
u8 rsvd[3];
|
||||
__be32 write_stag; /* write rkey */
|
||||
__be64 write_va;
|
||||
__be32 read_stag; /* read rkey */
|
||||
__be64 read_va;
|
||||
} __packed;
|
||||
|
||||
/*Constant PDU lengths calculations */
|
||||
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
|
||||
|
||||
#define ISER_RECV_DATA_SEG_LEN 8192
|
||||
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
|
||||
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
|
||||
|
||||
/* QP settings */
|
||||
/* Maximal bounds on received asynchronous PDUs */
|
||||
#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
|
||||
|
||||
#define ISERT_MAX_RX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
|
||||
* SCSI_TMFUNC(2), LOGOUT(1) */
|
||||
|
||||
#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
|
||||
|
||||
#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
|
||||
|
||||
#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
|
||||
|
||||
#define ISERT_INFLIGHT_DATAOUTS 8
|
||||
|
||||
#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
|
||||
(1 + ISERT_INFLIGHT_DATAOUTS) + \
|
||||
ISERT_MAX_TX_MISC_PDUS + \
|
||||
ISERT_MAX_RX_MISC_PDUS)
|
||||
|
||||
#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
|
||||
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
|
||||
|
||||
#define ISER_VER 0x10
|
||||
#define ISER_WSV 0x08
|
||||
#define ISER_RSV 0x04
|
||||
#define ISCSI_CTRL 0x10
|
||||
#define ISER_HELLO 0x20
|
||||
#define ISER_HELLORPLY 0x30
|
1
drivers/infiniband/ulp/srp/Kbuild
Normal file
1
drivers/infiniband/ulp/srp/Kbuild
Normal file
|
@ -0,0 +1 @@
|
|||
obj-$(CONFIG_INFINIBAND_SRP) += ib_srp.o
|
12
drivers/infiniband/ulp/srp/Kconfig
Normal file
12
drivers/infiniband/ulp/srp/Kconfig
Normal file
|
@ -0,0 +1,12 @@
|
|||
config INFINIBAND_SRP
|
||||
tristate "InfiniBand SCSI RDMA Protocol"
|
||||
depends on SCSI
|
||||
select SCSI_SRP_ATTRS
|
||||
---help---
|
||||
Support for the SCSI RDMA Protocol over InfiniBand. This
|
||||
allows you to access storage devices that speak SRP over
|
||||
InfiniBand.
|
||||
|
||||
The SRP protocol is defined by the INCITS T10 technical
|
||||
committee. See <http://www.t10.org/>.
|
||||
|
3373
drivers/infiniband/ulp/srp/ib_srp.c
Normal file
3373
drivers/infiniband/ulp/srp/ib_srp.c
Normal file
File diff suppressed because it is too large
Load diff
279
drivers/infiniband/ulp/srp/ib_srp.h
Normal file
279
drivers/infiniband/ulp/srp/ib_srp.h
Normal file
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* Copyright (c) 2005 Cisco Systems. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef IB_SRP_H
|
||||
#define IB_SRP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_sa.h>
|
||||
#include <rdma/ib_cm.h>
|
||||
#include <rdma/ib_fmr_pool.h>
|
||||
|
||||
enum {
|
||||
SRP_PATH_REC_TIMEOUT_MS = 1000,
|
||||
SRP_ABORT_TIMEOUT_MS = 5000,
|
||||
|
||||
SRP_PORT_REDIRECT = 1,
|
||||
SRP_DLID_REDIRECT = 2,
|
||||
SRP_STALE_CONN = 3,
|
||||
|
||||
SRP_MAX_LUN = 512,
|
||||
SRP_DEF_SG_TABLESIZE = 12,
|
||||
|
||||
SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
|
||||
SRP_RSP_SQ_SIZE = 1,
|
||||
SRP_TSK_MGMT_SQ_SIZE = 1,
|
||||
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
|
||||
SRP_TSK_MGMT_SQ_SIZE,
|
||||
|
||||
SRP_TAG_NO_REQ = ~0U,
|
||||
SRP_TAG_TSK_MGMT = 1U << 31,
|
||||
|
||||
SRP_MAX_PAGES_PER_MR = 512,
|
||||
|
||||
LOCAL_INV_WR_ID_MASK = 1,
|
||||
FAST_REG_WR_ID_MASK = 2,
|
||||
};
|
||||
|
||||
enum srp_target_state {
|
||||
SRP_TARGET_LIVE,
|
||||
SRP_TARGET_REMOVED,
|
||||
};
|
||||
|
||||
enum srp_iu_type {
|
||||
SRP_IU_CMD,
|
||||
SRP_IU_TSK_MGMT,
|
||||
SRP_IU_RSP,
|
||||
};
|
||||
|
||||
/*
|
||||
* @mr_page_mask: HCA memory registration page mask.
|
||||
* @mr_page_size: HCA memory registration page size.
|
||||
* @mr_max_size: Maximum size in bytes of a single FMR / FR registration
|
||||
* request.
|
||||
*/
|
||||
struct srp_device {
|
||||
struct list_head dev_list;
|
||||
struct ib_device *dev;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
u64 mr_page_mask;
|
||||
int mr_page_size;
|
||||
int mr_max_size;
|
||||
int max_pages_per_mr;
|
||||
bool has_fmr;
|
||||
bool has_fr;
|
||||
bool use_fast_reg;
|
||||
};
|
||||
|
||||
struct srp_host {
|
||||
struct srp_device *srp_dev;
|
||||
u8 port;
|
||||
struct device dev;
|
||||
struct list_head target_list;
|
||||
spinlock_t target_lock;
|
||||
struct completion released;
|
||||
struct list_head list;
|
||||
struct mutex add_target_mutex;
|
||||
};
|
||||
|
||||
struct srp_request {
|
||||
struct list_head list;
|
||||
struct scsi_cmnd *scmnd;
|
||||
struct srp_iu *cmd;
|
||||
union {
|
||||
struct ib_pool_fmr **fmr_list;
|
||||
struct srp_fr_desc **fr_list;
|
||||
};
|
||||
u64 *map_page;
|
||||
struct srp_direct_buf *indirect_desc;
|
||||
dma_addr_t indirect_dma_addr;
|
||||
short nmdesc;
|
||||
short index;
|
||||
};
|
||||
|
||||
struct srp_target_port {
|
||||
/* These are RW in the hot path, and commonly used together */
|
||||
struct list_head free_tx;
|
||||
struct list_head free_reqs;
|
||||
spinlock_t lock;
|
||||
s32 req_lim;
|
||||
|
||||
/* These are read-only in the hot path */
|
||||
struct ib_cq *send_cq ____cacheline_aligned_in_smp;
|
||||
struct ib_cq *recv_cq;
|
||||
struct ib_qp *qp;
|
||||
union {
|
||||
struct ib_fmr_pool *fmr_pool;
|
||||
struct srp_fr_pool *fr_pool;
|
||||
};
|
||||
u32 lkey;
|
||||
u32 rkey;
|
||||
enum srp_target_state state;
|
||||
unsigned int max_iu_len;
|
||||
unsigned int cmd_sg_cnt;
|
||||
unsigned int indirect_size;
|
||||
bool allow_ext_sg;
|
||||
|
||||
/* Everything above this point is used in the hot path of
|
||||
* command processing. Try to keep them packed into cachelines.
|
||||
*/
|
||||
|
||||
__be64 id_ext;
|
||||
__be64 ioc_guid;
|
||||
__be64 service_id;
|
||||
__be64 initiator_ext;
|
||||
u16 io_class;
|
||||
struct srp_host *srp_host;
|
||||
struct Scsi_Host *scsi_host;
|
||||
struct srp_rport *rport;
|
||||
char target_name[32];
|
||||
unsigned int scsi_id;
|
||||
unsigned int sg_tablesize;
|
||||
int queue_size;
|
||||
int req_ring_size;
|
||||
int comp_vector;
|
||||
int tl_retry_count;
|
||||
|
||||
struct ib_sa_path_rec path;
|
||||
__be16 orig_dgid[8];
|
||||
struct ib_sa_query *path_query;
|
||||
int path_query_id;
|
||||
|
||||
u32 rq_tmo_jiffies;
|
||||
bool connected;
|
||||
|
||||
struct ib_cm_id *cm_id;
|
||||
|
||||
int max_ti_iu_len;
|
||||
|
||||
int zero_req_lim;
|
||||
|
||||
struct srp_iu **tx_ring;
|
||||
struct srp_iu **rx_ring;
|
||||
struct srp_request *req_ring;
|
||||
|
||||
struct work_struct tl_err_work;
|
||||
struct work_struct remove_work;
|
||||
|
||||
struct list_head list;
|
||||
struct completion done;
|
||||
int status;
|
||||
bool qp_in_error;
|
||||
|
||||
struct completion tsk_mgmt_done;
|
||||
u8 tsk_mgmt_status;
|
||||
};
|
||||
|
||||
struct srp_iu {
|
||||
struct list_head list;
|
||||
u64 dma;
|
||||
void *buf;
|
||||
size_t size;
|
||||
enum dma_data_direction direction;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srp_fr_desc - fast registration work request arguments
|
||||
* @entry: Entry in srp_fr_pool.free_list.
|
||||
* @mr: Memory region.
|
||||
* @frpl: Fast registration page list.
|
||||
*/
|
||||
struct srp_fr_desc {
|
||||
struct list_head entry;
|
||||
struct ib_mr *mr;
|
||||
struct ib_fast_reg_page_list *frpl;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srp_fr_pool - pool of fast registration descriptors
|
||||
*
|
||||
* An entry is available for allocation if and only if it occurs in @free_list.
|
||||
*
|
||||
* @size: Number of descriptors in this pool.
|
||||
* @max_page_list_len: Maximum fast registration work request page list length.
|
||||
* @lock: Protects free_list.
|
||||
* @free_list: List of free descriptors.
|
||||
* @desc: Fast registration descriptor pool.
|
||||
*/
|
||||
struct srp_fr_pool {
|
||||
int size;
|
||||
int max_page_list_len;
|
||||
spinlock_t lock;
|
||||
struct list_head free_list;
|
||||
struct srp_fr_desc desc[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srp_map_state - per-request DMA memory mapping state
|
||||
* @desc: Pointer to the element of the SRP buffer descriptor array
|
||||
* that is being filled in.
|
||||
* @pages: Array with DMA addresses of pages being considered for
|
||||
* memory registration.
|
||||
* @base_dma_addr: DMA address of the first page that has not yet been mapped.
|
||||
* @dma_len: Number of bytes that will be registered with the next
|
||||
* FMR or FR memory registration call.
|
||||
* @total_len: Total number of bytes in the sg-list being mapped.
|
||||
* @npages: Number of page addresses in the pages[] array.
|
||||
* @nmdesc: Number of FMR or FR memory descriptors used for mapping.
|
||||
* @ndesc: Number of SRP buffer descriptors that have been filled in.
|
||||
* @unmapped_sg: First element of the sg-list that is mapped via FMR or FR.
|
||||
* @unmapped_index: Index of the first element mapped via FMR or FR.
|
||||
* @unmapped_addr: DMA address of the first element mapped via FMR or FR.
|
||||
*/
|
||||
struct srp_map_state {
|
||||
union {
|
||||
struct ib_pool_fmr **next_fmr;
|
||||
struct srp_fr_desc **next_fr;
|
||||
};
|
||||
struct srp_direct_buf *desc;
|
||||
u64 *pages;
|
||||
dma_addr_t base_dma_addr;
|
||||
u32 dma_len;
|
||||
u32 total_len;
|
||||
unsigned int npages;
|
||||
unsigned int nmdesc;
|
||||
unsigned int ndesc;
|
||||
struct scatterlist *unmapped_sg;
|
||||
int unmapped_index;
|
||||
dma_addr_t unmapped_addr;
|
||||
};
|
||||
|
||||
#endif /* IB_SRP_H */
|
12
drivers/infiniband/ulp/srpt/Kconfig
Normal file
12
drivers/infiniband/ulp/srpt/Kconfig
Normal file
|
@ -0,0 +1,12 @@
|
|||
config INFINIBAND_SRPT
|
||||
tristate "InfiniBand SCSI RDMA Protocol target support"
|
||||
depends on INFINIBAND && TARGET_CORE
|
||||
---help---
|
||||
|
||||
Support for the SCSI RDMA Protocol (SRP) Target driver. The
|
||||
SRP protocol is a protocol that allows an initiator to access
|
||||
a block storage device on another host (target) over a network
|
||||
that supports the RDMA protocol. Currently the RDMA protocol is
|
||||
supported by InfiniBand and by iWarp network hardware. More
|
||||
information about the SRP protocol can be found on the website
|
||||
of the INCITS T10 technical committee (http://www.t10.org/).
|
2
drivers/infiniband/ulp/srpt/Makefile
Normal file
2
drivers/infiniband/ulp/srpt/Makefile
Normal file
|
@ -0,0 +1,2 @@
|
|||
ccflags-y := -Idrivers/target
|
||||
obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
|
139
drivers/infiniband/ulp/srpt/ib_dm_mad.h
Normal file
139
drivers/infiniband/ulp/srpt/ib_dm_mad.h
Normal file
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef IB_DM_MAD_H
|
||||
#define IB_DM_MAD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <rdma/ib_mad.h>
|
||||
|
||||
enum {
|
||||
/*
|
||||
* See also section 13.4.7 Status Field, table 115 MAD Common Status
|
||||
* Field Bit Values and also section 16.3.1.1 Status Field in the
|
||||
* InfiniBand Architecture Specification.
|
||||
*/
|
||||
DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
|
||||
DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
|
||||
DM_MAD_STATUS_INVALID_FIELD = 0x001c,
|
||||
DM_MAD_STATUS_NO_IOC = 0x0100,
|
||||
|
||||
/*
|
||||
* See also the Device Management chapter, section 16.3.3 Attributes,
|
||||
* table 279 Device Management Attributes in the InfiniBand
|
||||
* Architecture Specification.
|
||||
*/
|
||||
DM_ATTR_CLASS_PORT_INFO = 0x01,
|
||||
DM_ATTR_IOU_INFO = 0x10,
|
||||
DM_ATTR_IOC_PROFILE = 0x11,
|
||||
DM_ATTR_SVC_ENTRIES = 0x12
|
||||
};
|
||||
|
||||
struct ib_dm_hdr {
|
||||
u8 reserved[28];
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure of management datagram sent by the SRP target implementation.
|
||||
* Contains a management datagram header, reliable multi-packet transaction
|
||||
* protocol (RMPP) header and ib_dm_hdr. Notes:
|
||||
* - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
|
||||
* management datagrams.
|
||||
* - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
|
||||
* is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
|
||||
* - The maximum supported size for a management datagram when not using RMPP
|
||||
* is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
|
||||
*/
|
||||
struct ib_dm_mad {
|
||||
struct ib_mad_hdr mad_hdr;
|
||||
struct ib_rmpp_hdr rmpp_hdr;
|
||||
struct ib_dm_hdr dm_hdr;
|
||||
u8 data[IB_MGMT_DEVICE_DATA];
|
||||
};
|
||||
|
||||
/*
|
||||
* IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
|
||||
* Architecture Specification.
|
||||
*/
|
||||
struct ib_dm_iou_info {
|
||||
__be16 change_id;
|
||||
u8 max_controllers;
|
||||
u8 op_rom;
|
||||
u8 controller_list[128];
|
||||
};
|
||||
|
||||
/*
|
||||
* IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
|
||||
* the InfiniBand Architecture Specification.
|
||||
*/
|
||||
struct ib_dm_ioc_profile {
|
||||
__be64 guid;
|
||||
__be32 vendor_id;
|
||||
__be32 device_id;
|
||||
__be16 device_version;
|
||||
__be16 reserved1;
|
||||
__be32 subsys_vendor_id;
|
||||
__be32 subsys_device_id;
|
||||
__be16 io_class;
|
||||
__be16 io_subclass;
|
||||
__be16 protocol;
|
||||
__be16 protocol_version;
|
||||
__be16 service_conn;
|
||||
__be16 initiators_supported;
|
||||
__be16 send_queue_depth;
|
||||
u8 reserved2;
|
||||
u8 rdma_read_depth;
|
||||
__be32 send_size;
|
||||
__be32 rdma_size;
|
||||
u8 op_cap_mask;
|
||||
u8 svc_cap_mask;
|
||||
u8 num_svc_entries;
|
||||
u8 reserved3[9];
|
||||
u8 id_string[64];
|
||||
};
|
||||
|
||||
struct ib_dm_svc_entry {
|
||||
u8 name[40];
|
||||
__be64 id;
|
||||
};
|
||||
|
||||
/*
|
||||
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
|
||||
* Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
|
||||
*/
|
||||
struct ib_dm_svc_entries {
|
||||
struct ib_dm_svc_entry service_entries[4];
|
||||
};
|
||||
|
||||
#endif
|
4051
drivers/infiniband/ulp/srpt/ib_srpt.c
Normal file
4051
drivers/infiniband/ulp/srpt/ib_srpt.c
Normal file
File diff suppressed because it is too large
Load diff
443
drivers/infiniband/ulp/srpt/ib_srpt.h
Normal file
443
drivers/infiniband/ulp/srpt/ib_srpt.h
Normal file
|
@ -0,0 +1,443 @@
|
|||
/*
|
||||
* Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
|
||||
* Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef IB_SRPT_H
|
||||
#define IB_SRPT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_sa.h>
|
||||
#include <rdma/ib_cm.h>
|
||||
|
||||
#include <scsi/srp.h>
|
||||
|
||||
#include "ib_dm_mad.h"
|
||||
|
||||
/*
|
||||
* The prefix the ServiceName field must start with in the device management
|
||||
* ServiceEntries attribute pair. See also the SRP specification.
|
||||
*/
|
||||
#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
|
||||
|
||||
enum {
|
||||
/*
|
||||
* SRP IOControllerProfile attributes for SRP target ports that have
|
||||
* not been defined in <scsi/srp.h>. Source: section B.7, table B.7
|
||||
* in the SRP specification.
|
||||
*/
|
||||
SRP_PROTOCOL = 0x0108,
|
||||
SRP_PROTOCOL_VERSION = 0x0001,
|
||||
SRP_IO_SUBCLASS = 0x609e,
|
||||
SRP_SEND_TO_IOC = 0x01,
|
||||
SRP_SEND_FROM_IOC = 0x02,
|
||||
SRP_RDMA_READ_FROM_IOC = 0x08,
|
||||
SRP_RDMA_WRITE_FROM_IOC = 0x20,
|
||||
|
||||
/*
|
||||
* srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
|
||||
* specification.
|
||||
*/
|
||||
SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
|
||||
SRP_LOSOLNT = 0x10, /* logout solicited notification */
|
||||
SRP_CRSOLNT = 0x20, /* credit request solicited notification */
|
||||
SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
|
||||
|
||||
/*
|
||||
* srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
|
||||
* 18 and 20 in the SRP specification.
|
||||
*/
|
||||
SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
|
||||
SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
|
||||
|
||||
/*
|
||||
* srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
|
||||
* 16 and 22 in the SRP specification.
|
||||
*/
|
||||
SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
|
||||
|
||||
/* See also table 24 in the SRP specification. */
|
||||
SRP_TSK_MGMT_SUCCESS = 0x00,
|
||||
SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
|
||||
SRP_TSK_MGMT_FAILED = 0x05,
|
||||
|
||||
/* See also table 21 in the SRP specification. */
|
||||
SRP_CMD_SIMPLE_Q = 0x0,
|
||||
SRP_CMD_HEAD_OF_Q = 0x1,
|
||||
SRP_CMD_ORDERED_Q = 0x2,
|
||||
SRP_CMD_ACA = 0x4,
|
||||
|
||||
SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
|
||||
SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
|
||||
SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
|
||||
|
||||
SRPT_DEF_SG_TABLESIZE = 128,
|
||||
SRPT_DEF_SG_PER_WQE = 16,
|
||||
|
||||
MIN_SRPT_SQ_SIZE = 16,
|
||||
DEF_SRPT_SQ_SIZE = 4096,
|
||||
SRPT_RQ_SIZE = 128,
|
||||
MIN_SRPT_SRQ_SIZE = 4,
|
||||
DEFAULT_SRPT_SRQ_SIZE = 4095,
|
||||
MAX_SRPT_SRQ_SIZE = 65535,
|
||||
MAX_SRPT_RDMA_SIZE = 1U << 24,
|
||||
MAX_SRPT_RSP_SIZE = 1024,
|
||||
|
||||
MIN_MAX_REQ_SIZE = 996,
|
||||
DEFAULT_MAX_REQ_SIZE
|
||||
= sizeof(struct srp_cmd)/*48*/
|
||||
+ sizeof(struct srp_indirect_buf)/*20*/
|
||||
+ 128 * sizeof(struct srp_direct_buf)/*16*/,
|
||||
|
||||
MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
|
||||
DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
|
||||
|
||||
DEFAULT_MAX_RDMA_SIZE = 65536,
|
||||
};
|
||||
|
||||
enum srpt_opcode {
|
||||
SRPT_RECV,
|
||||
SRPT_SEND,
|
||||
SRPT_RDMA_MID,
|
||||
SRPT_RDMA_ABORT,
|
||||
SRPT_RDMA_READ_LAST,
|
||||
SRPT_RDMA_WRITE_LAST,
|
||||
};
|
||||
|
||||
static inline u64 encode_wr_id(u8 opcode, u32 idx)
|
||||
{
|
||||
return ((u64)opcode << 32) | idx;
|
||||
}
|
||||
static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
|
||||
{
|
||||
return wr_id >> 32;
|
||||
}
|
||||
static inline u32 idx_from_wr_id(u64 wr_id)
|
||||
{
|
||||
return (u32)wr_id;
|
||||
}
|
||||
|
||||
struct rdma_iu {
|
||||
u64 raddr;
|
||||
u32 rkey;
|
||||
struct ib_sge *sge;
|
||||
u32 sge_cnt;
|
||||
int mem_id;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum srpt_command_state - SCSI command state managed by SRPT.
|
||||
* @SRPT_STATE_NEW: New command arrived and is being processed.
|
||||
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
|
||||
* for data arrival.
|
||||
* @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
|
||||
* being processed.
|
||||
* @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
|
||||
* @SRPT_STATE_MGMT: Processing a SCSI task management command.
|
||||
* @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
|
||||
* @SRPT_STATE_DONE: Command processing finished successfully, command
|
||||
* processing has been aborted or command processing
|
||||
* failed.
|
||||
*/
|
||||
enum srpt_command_state {
|
||||
SRPT_STATE_NEW = 0,
|
||||
SRPT_STATE_NEED_DATA = 1,
|
||||
SRPT_STATE_DATA_IN = 2,
|
||||
SRPT_STATE_CMD_RSP_SENT = 3,
|
||||
SRPT_STATE_MGMT = 4,
|
||||
SRPT_STATE_MGMT_RSP_SENT = 5,
|
||||
SRPT_STATE_DONE = 6,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_ioctx - Shared SRPT I/O context information.
|
||||
* @buf: Pointer to the buffer.
|
||||
* @dma: DMA address of the buffer.
|
||||
* @index: Index of the I/O context in its ioctx_ring array.
|
||||
*/
|
||||
struct srpt_ioctx {
|
||||
void *buf;
|
||||
dma_addr_t dma;
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_recv_ioctx - SRPT receive I/O context.
|
||||
* @ioctx: See above.
|
||||
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
|
||||
*/
|
||||
struct srpt_recv_ioctx {
|
||||
struct srpt_ioctx ioctx;
|
||||
struct list_head wait_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_send_ioctx - SRPT send I/O context.
|
||||
* @ioctx: See above.
|
||||
* @ch: Channel pointer.
|
||||
* @free_list: Node in srpt_rdma_ch.free_list.
|
||||
* @n_rbuf: Number of data buffers in the received SRP command.
|
||||
* @rbufs: Pointer to SRP data buffer array.
|
||||
* @single_rbuf: SRP data buffer if the command has only a single buffer.
|
||||
* @sg: Pointer to sg-list associated with this I/O context.
|
||||
* @sg_cnt: SG-list size.
|
||||
* @mapped_sg_count: ib_dma_map_sg() return value.
|
||||
* @n_rdma_ius: Number of elements in the rdma_ius array.
|
||||
* @rdma_ius: Array with information about the RDMA mapping.
|
||||
* @tag: Tag of the received SRP information unit.
|
||||
* @spinlock: Protects 'state'.
|
||||
* @state: I/O context state.
|
||||
* @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
|
||||
* the already initiated transfers have finished.
|
||||
* @cmd: Target core command data structure.
|
||||
* @sense_data: SCSI sense data.
|
||||
*/
|
||||
struct srpt_send_ioctx {
|
||||
struct srpt_ioctx ioctx;
|
||||
struct srpt_rdma_ch *ch;
|
||||
struct rdma_iu *rdma_ius;
|
||||
struct srp_direct_buf *rbufs;
|
||||
struct srp_direct_buf single_rbuf;
|
||||
struct scatterlist *sg;
|
||||
struct list_head free_list;
|
||||
spinlock_t spinlock;
|
||||
enum srpt_command_state state;
|
||||
bool rdma_aborted;
|
||||
struct se_cmd cmd;
|
||||
struct completion tx_done;
|
||||
u64 tag;
|
||||
int sg_cnt;
|
||||
int mapped_sg_count;
|
||||
u16 n_rdma_ius;
|
||||
u8 n_rdma;
|
||||
u8 n_rbuf;
|
||||
bool queue_status_only;
|
||||
u8 sense_data[SCSI_SENSE_BUFFERSIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* enum rdma_ch_state - SRP channel state.
|
||||
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
|
||||
* @CH_LIVE: QP is in RTS state.
|
||||
* @CH_DISCONNECTING: DREQ has been received; waiting for DREP
|
||||
* or DREQ has been send and waiting for DREP
|
||||
* or .
|
||||
* @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
|
||||
* @CH_RELEASING: Last WQE event has been received; releasing resources.
|
||||
*/
|
||||
enum rdma_ch_state {
|
||||
CH_CONNECTING,
|
||||
CH_LIVE,
|
||||
CH_DISCONNECTING,
|
||||
CH_DRAINING,
|
||||
CH_RELEASING
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_rdma_ch - RDMA channel.
|
||||
* @wait_queue: Allows the kernel thread to wait for more work.
|
||||
* @thread: Kernel thread that processes the IB queues associated with
|
||||
* the channel.
|
||||
* @cm_id: IB CM ID associated with the channel.
|
||||
* @qp: IB queue pair used for communicating over this channel.
|
||||
* @cq: IB completion queue for this channel.
|
||||
* @rq_size: IB receive queue size.
|
||||
* @rsp_size IB response message size in bytes.
|
||||
* @sq_wr_avail: number of work requests available in the send queue.
|
||||
* @sport: pointer to the information of the HCA port used by this
|
||||
* channel.
|
||||
* @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
|
||||
* @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
|
||||
* @max_ti_iu_len: maximum target-to-initiator information unit length.
|
||||
* @req_lim: request limit: maximum number of requests that may be sent
|
||||
* by the initiator without having received a response.
|
||||
* @req_lim_delta: Number of credits not yet sent back to the initiator.
|
||||
* @spinlock: Protects free_list and state.
|
||||
* @free_list: Head of list with free send I/O contexts.
|
||||
* @state: channel state. See also enum rdma_ch_state.
|
||||
* @ioctx_ring: Send ring.
|
||||
* @wc: IB work completion array for srpt_process_completion().
|
||||
* @list: Node for insertion in the srpt_device.rch_list list.
|
||||
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
|
||||
* list contains struct srpt_ioctx elements and is protected
|
||||
* against concurrent modification by the cm_id spinlock.
|
||||
* @sess: Session information associated with this SRP channel.
|
||||
* @sess_name: Session name.
|
||||
* @release_work: Allows scheduling of srpt_release_channel().
|
||||
* @release_done: Enables waiting for srpt_release_channel() completion.
|
||||
*/
|
||||
struct srpt_rdma_ch {
|
||||
wait_queue_head_t wait_queue;
|
||||
struct task_struct *thread;
|
||||
struct ib_cm_id *cm_id;
|
||||
struct ib_qp *qp;
|
||||
struct ib_cq *cq;
|
||||
int rq_size;
|
||||
u32 rsp_size;
|
||||
atomic_t sq_wr_avail;
|
||||
struct srpt_port *sport;
|
||||
u8 i_port_id[16];
|
||||
u8 t_port_id[16];
|
||||
int max_ti_iu_len;
|
||||
atomic_t req_lim;
|
||||
atomic_t req_lim_delta;
|
||||
spinlock_t spinlock;
|
||||
struct list_head free_list;
|
||||
enum rdma_ch_state state;
|
||||
struct srpt_send_ioctx **ioctx_ring;
|
||||
struct ib_wc wc[16];
|
||||
struct list_head list;
|
||||
struct list_head cmd_wait_list;
|
||||
struct se_session *sess;
|
||||
u8 sess_name[36];
|
||||
struct work_struct release_work;
|
||||
struct completion *release_done;
|
||||
bool in_shutdown;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_port_attib - Attributes for SRPT port
|
||||
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
|
||||
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
|
||||
* @srp_sq_size: Shared receive queue (SRQ) size.
|
||||
*/
|
||||
struct srpt_port_attrib {
|
||||
u32 srp_max_rdma_size;
|
||||
u32 srp_max_rsp_size;
|
||||
u32 srp_sq_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_port - Information associated by SRPT with a single IB port.
|
||||
* @sdev: backpointer to the HCA information.
|
||||
* @mad_agent: per-port management datagram processing information.
|
||||
* @enabled: Whether or not this target port is enabled.
|
||||
* @port_guid: ASCII representation of Port GUID
|
||||
* @port: one-based port number.
|
||||
* @sm_lid: cached value of the port's sm_lid.
|
||||
* @lid: cached value of the port's lid.
|
||||
* @gid: cached value of the port's gid.
|
||||
* @port_acl_lock spinlock for port_acl_list:
|
||||
* @work: work structure for refreshing the aforementioned cached values.
|
||||
* @port_tpg_1 Target portal group = 1 data.
|
||||
* @port_wwn: Target core WWN data.
|
||||
* @port_acl_list: Head of the list with all node ACLs for this port.
|
||||
*/
|
||||
struct srpt_port {
|
||||
struct srpt_device *sdev;
|
||||
struct ib_mad_agent *mad_agent;
|
||||
bool enabled;
|
||||
u8 port_guid[64];
|
||||
u8 port;
|
||||
u16 sm_lid;
|
||||
u16 lid;
|
||||
union ib_gid gid;
|
||||
spinlock_t port_acl_lock;
|
||||
struct work_struct work;
|
||||
struct se_portal_group port_tpg_1;
|
||||
struct se_wwn port_wwn;
|
||||
struct list_head port_acl_list;
|
||||
struct srpt_port_attrib port_attrib;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_device - Information associated by SRPT with a single HCA.
|
||||
* @device: Backpointer to the struct ib_device managed by the IB core.
|
||||
* @pd: IB protection domain.
|
||||
* @mr: L_Key (local key) with write access to all local memory.
|
||||
* @srq: Per-HCA SRQ (shared receive queue).
|
||||
* @cm_id: Connection identifier.
|
||||
* @dev_attr: Attributes of the InfiniBand device as obtained during the
|
||||
* ib_client.add() callback.
|
||||
* @srq_size: SRQ size.
|
||||
* @ioctx_ring: Per-HCA SRQ.
|
||||
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
|
||||
* @ch_releaseQ: Enables waiting for removal from rch_list.
|
||||
* @spinlock: Protects rch_list and tpg.
|
||||
* @port: Information about the ports owned by this HCA.
|
||||
* @event_handler: Per-HCA asynchronous IB event handler.
|
||||
* @list: Node in srpt_dev_list.
|
||||
*/
|
||||
struct srpt_device {
|
||||
struct ib_device *device;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
struct ib_srq *srq;
|
||||
struct ib_cm_id *cm_id;
|
||||
struct ib_device_attr dev_attr;
|
||||
int srq_size;
|
||||
struct srpt_recv_ioctx **ioctx_ring;
|
||||
struct list_head rch_list;
|
||||
wait_queue_head_t ch_releaseQ;
|
||||
spinlock_t spinlock;
|
||||
struct srpt_port port[2];
|
||||
struct ib_event_handler event_handler;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
|
||||
* @i_port_id: 128-bit SRP initiator port ID.
|
||||
* @sport: port information.
|
||||
* @nacl: Target core node ACL information.
|
||||
* @list: Element of the per-HCA ACL list.
|
||||
*/
|
||||
struct srpt_node_acl {
|
||||
u8 i_port_id[16];
|
||||
struct srpt_port *sport;
|
||||
struct se_node_acl nacl;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
* SRP-releated SCSI persistent reservation definitions.
|
||||
*
|
||||
* See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
|
||||
* See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
|
||||
* SCSI over an RDMA interface).
|
||||
*/
|
||||
|
||||
enum {
|
||||
SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
|
||||
};
|
||||
|
||||
struct spc_rdma_transport_id {
|
||||
uint8_t protocol_identifier;
|
||||
uint8_t reserved[7];
|
||||
uint8_t i_port_id[16];
|
||||
};
|
||||
|
||||
#endif /* IB_SRPT_H */
|
Loading…
Add table
Add a link
Reference in a new issue