Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,12 @@
config INFINIBAND_ISER
tristate "iSCSI Extensions for RDMA (iSER)"
depends on SCSI && INET && INFINIBAND_ADDR_TRANS
select SCSI_ISCSI_ATTRS
---help---
Support for the iSCSI Extensions for RDMA (iSER) Protocol
over InfiniBand. This allows you to access storage devices
that speak iSCSI over iSER over InfiniBand.
The iSER protocol is defined by IETF.
See <http://www.ietf.org/rfc/rfc5046.txt>
and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF>

View file

@ -0,0 +1,4 @@
obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o
ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \
iscsi_iser.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,670 @@
/*
* iSER transport for the Open iSCSI Initiator & iSER transport internals
*
* Copyright (C) 2004 Dmitry Yusupov
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* based on code maintained by open-iscsi@googlegroups.com
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __ISCSI_ISER_H__
#define __ISCSI_ISER_H__
#include <linux/types.h>
#include <linux/net.h>
#include <linux/printk.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/mempool.h>
#include <linux/uio.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
#define DRV_VER "1.4.8"
#define iser_dbg(fmt, arg...) \
do { \
if (iser_debug_level > 2) \
printk(KERN_DEBUG PFX "%s: " fmt,\
__func__ , ## arg); \
} while (0)
#define iser_warn(fmt, arg...) \
do { \
if (iser_debug_level > 0) \
pr_warn(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \
if (iser_debug_level > 1) \
pr_info(PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define iser_err(fmt, arg...) \
do { \
printk(KERN_ERR PFX "%s: " fmt, \
__func__ , ## arg); \
} while (0)
#define SHIFT_4K 12
#define SIZE_4K (1ULL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
/* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
#define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
#else
#define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
#endif
#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
* supports -EAGAIN scheme where tx is suspended till the QP has room for more *
* send WR. D=8 comes from 64K/8K */
#define ISER_INFLIGHT_DATAOUTS 8
#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
/* Max registration work requests per command */
#define ISER_MAX_REG_WR_PER_CMD 5
/* For Signature we don't support DATAOUTs so no need to make room for them */
#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
(1 + ISER_MAX_REG_WR_PER_CMD) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
#define ISER_WC_BATCH_COUNT 16
#define ISER_SIGNAL_CMD_COUNT 32
#define ISER_VER 0x10
#define ISER_WSV 0x08
#define ISER_RSV 0x04
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
/**
* struct iser_hdr - iSER header
*
* @flags: flags support (zbva, remote_inv)
* @rsvd: reserved
* @write_stag: write rkey
* @write_va: write virtual address
* @reaf_stag: read rkey
* @read_va: read virtual address
*/
struct iser_hdr {
u8 flags;
u8 rsvd[3];
__be32 write_stag;
__be64 write_va;
__be32 read_stag;
__be64 read_va;
} __attribute__((packed));
#define ISER_ZBVA_NOT_SUPPORTED 0x80
#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
struct iser_cm_hdr {
u8 flags;
u8 rsvd[3];
} __packed;
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
#define ISER_RECV_DATA_SEG_LEN 128
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
enum iser_conn_state {
ISER_CONN_INIT, /* descriptor allocd, no conn */
ISER_CONN_PENDING, /* in the process of being established */
ISER_CONN_UP, /* up and running */
ISER_CONN_TERMINATING, /* in the process of being terminated */
ISER_CONN_DOWN, /* shut down */
ISER_CONN_STATES_NUM
};
enum iser_task_status {
ISER_TASK_STATUS_INIT = 0,
ISER_TASK_STATUS_STARTED,
ISER_TASK_STATUS_COMPLETED
};
enum iser_data_dir {
ISER_DIR_IN = 0, /* to initiator */
ISER_DIR_OUT, /* from initiator */
ISER_DIRS_NUM
};
/**
* struct iser_data_buf - iSER data buffer
*
* @buf: pointer to the sg list
* @size: num entries of this sg
* @data_len: total beffer byte len
* @dma_nents: returned by dma_map_sg
* @copy_buf: allocated copy buf for SGs unaligned
* for rdma which are copied
* @sg_single: SG-ified clone of a non SG SC or
* unaligned SG
*/
struct iser_data_buf {
void *buf;
unsigned int size;
unsigned long data_len;
unsigned int dma_nents;
char *copy_buf;
struct scatterlist sg_single;
};
/* fwd declarations */
struct iser_device;
struct iscsi_iser_task;
struct iscsi_endpoint;
/**
* struct iser_mem_reg - iSER memory registration info
*
* @lkey: MR local key
* @rkey: MR remote key
* @va: MR start address (buffer va)
* @len: MR length
* @mem_h: pointer to registration context (FMR/Fastreg)
* @is_mr: indicates weather we registered the buffer
*/
struct iser_mem_reg {
u32 lkey;
u32 rkey;
u64 va;
u64 len;
void *mem_h;
int is_mr;
};
/**
* struct iser_regd_buf - iSER buffer registration desc
*
* @reg: memory registration info
* @virt_addr: virtual address of buffer
* @device: reference to iser device
* @direction: dma direction (for dma_unmap)
* @data_size: data buffer size in bytes
*/
struct iser_regd_buf {
struct iser_mem_reg reg;
void *virt_addr;
struct iser_device *device;
enum dma_data_direction direction;
unsigned int data_size;
};
enum iser_desc_type {
ISCSI_TX_CONTROL ,
ISCSI_TX_SCSI_COMMAND,
ISCSI_TX_DATAOUT
};
/**
* struct iser_tx_desc - iSER TX descriptor (for send wr_id)
*
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
* @dam_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
*/
struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
enum iser_desc_type type;
u64 dma_addr;
struct ib_sge tx_sg[2];
int num_sge;
};
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
sizeof(u64) + sizeof(struct ib_sge)))
/**
* struct iser_rx_desc - iSER RX descriptor (for recv wr_id)
*
* @iser_header: iser header
* @iscsi_header: iscsi header
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
char data[ISER_RECV_DATA_SEG_LEN];
u64 dma_addr;
struct ib_sge rx_sg;
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
#define ISER_MAX_CQ 4
struct iser_conn;
struct ib_conn;
struct iscsi_iser_task;
/**
* struct iser_comp - iSER completion context
*
* @device: pointer to device handle
* @cq: completion queue
* @wcs: work completion array
* @tasklet: Tasklet handle
* @active_qps: Number of active QPs attached
* to completion context
*/
struct iser_comp {
struct iser_device *device;
struct ib_cq *cq;
struct ib_wc wcs[ISER_WC_BATCH_COUNT];
struct tasklet_struct tasklet;
int active_qps;
};
/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
* @pd: Protection Domain for this device
* @dev_attr: Device attributes container
* @mr: Global DMA memory region
* @event_handler: IB events handle routine
* @ig_list: entry in devices list
* @refcount: Reference counter, dominated by open iser connections
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
* Memory registration pool Function pointers (FMR or Fastreg):
* @iser_alloc_rdma_reg_res: Allocation of memory regions pool
* @iser_free_rdma_reg_res: Free of memory regions pool
* @iser_reg_rdma_mem: Memory registration routine
* @iser_unreg_rdma_mem: Memory deregistration routine
*/
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
struct ib_device_attr dev_attr;
struct ib_mr *mr;
struct ib_event_handler event_handler;
struct list_head ig_list;
int refcount;
int comps_used;
struct iser_comp comps[ISER_MAX_CQ];
int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
};
#define ISER_CHECK_GUARD 0xc0
#define ISER_CHECK_REFTAG 0x0f
#define ISER_CHECK_APPTAG 0x30
enum iser_reg_indicator {
ISER_DATA_KEY_VALID = 1 << 0,
ISER_PROT_KEY_VALID = 1 << 1,
ISER_SIG_KEY_VALID = 1 << 2,
ISER_FASTREG_PROTECTED = 1 << 3,
};
/**
* struct iser_pi_context - Protection information context
*
* @prot_mr: protection memory region
* @prot_frpl: protection fastreg page list
* @sig_mr: signature feature enabled memory region
*/
struct iser_pi_context {
struct ib_mr *prot_mr;
struct ib_fast_reg_page_list *prot_frpl;
struct ib_mr *sig_mr;
};
/**
* struct fast_reg_descriptor - Fast registration descriptor
*
* @list: entry in connection fastreg pool
* @data_mr: data memory region
* @data_frpl: data fastreg page list
* @pi_ctx: protection information context
* @reg_indicators: fast registration indicators
*/
struct fast_reg_descriptor {
struct list_head list;
struct ib_mr *data_mr;
struct ib_fast_reg_page_list *data_frpl;
struct iser_pi_context *pi_ctx;
u8 reg_indicators;
};
/**
* struct ib_conn - Infiniband related objects
*
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
* @post_recv_buf_count: post receive counter
* @sig_count: send work request signal count
* @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @comp: iser completion context
* @pi_support: Indicate device T10-PI support
* @beacon: beacon send wr to signal all flush errors were drained
* @flush_comp: completes when all connection completions consumed
* @lock: protects fmr/fastreg pool
* @union.fmr:
* @pool: FMR pool for fast registrations
* @page_vec: page vector to hold mapped commands pages
* used for registration
* @union.fastreg:
* @pool: Fast registration descriptors pool for fast
* registrations
* @pool_size: Size of pool
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
int post_recv_buf_count;
u8 sig_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_comp *comp;
bool pi_support;
struct ib_send_wr beacon;
struct completion flush_comp;
spinlock_t lock;
union {
struct {
struct ib_fmr_pool *pool;
struct iser_page_vec *page_vec;
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
};
/**
* struct iser_conn - iSER connection context
*
* @ib_conn: connection RDMA resources
* @iscsi_conn: link to matching iscsi connection
* @ep: transport handle
* @state: connection logical state
* @qp_max_recv_dtos: maximum number of data outs, corresponds
* to max number of post recvs
* @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
* @min_posted_rx: (qp_max_recv_dtos >> 2)
* @name: connection peer portal
* @release_work: deffered work for release job
* @state_mutex: protects iser onnection state
* @stop_completion: conn_stop completion
* @ib_completion: RDMA cleanup completion
* @up_completion: connection establishment completed
* (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list
* @login_buf: login data buffer (stores login parameters)
* @login_req_buf: login request buffer
* @login_req_dma: login request buffer dma address
* @login_resp_buf: login response buffer
* @login_resp_dma: login response buffer dma address
* @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
*/
struct iser_conn {
struct ib_conn ib_conn;
struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep;
enum iser_conn_state state;
unsigned qp_max_recv_dtos;
unsigned qp_max_recv_dtos_mask;
unsigned min_posted_rx;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
struct mutex state_mutex;
struct completion stop_completion;
struct completion ib_completion;
struct completion up_completion;
struct list_head conn_list;
char *login_buf;
char *login_req_buf, *login_resp_buf;
u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
};
/**
* struct iscsi_iser_task - iser task context
*
* @desc: TX descriptor
* @iser_conn: link to iser connection
* @status: current task status
* @sc: link to scsi command
* @command_sent: indicate if command was sent
* @dir: iser data direction
* @rdma_regd: task rdma registration desc
* @data: iser data buffer desc
* @data_copy: iser data copy buffer desc (bounce buffer)
* @prot: iser protection buffer desc
* @prot_copy: iser protection copy buffer desc (bounce buffer)
*/
struct iscsi_iser_task {
struct iser_tx_desc desc;
struct iser_conn *iser_conn;
enum iser_task_status status;
struct scsi_cmnd *sc;
int command_sent;
int dir[ISER_DIRS_NUM];
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];
struct iser_data_buf data[ISER_DIRS_NUM];
struct iser_data_buf data_copy[ISER_DIRS_NUM];
struct iser_data_buf prot[ISER_DIRS_NUM];
struct iser_data_buf prot_copy[ISER_DIRS_NUM];
};
struct iser_page_vec {
u64 *pages;
int length;
int offset;
int data_size;
};
/**
* struct iser_global: iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
* @connlist_mutex: protects connlist
* @connlist: iser connections global list
* @desc_cache: kmem cache for tx dataout
*/
struct iser_global {
struct mutex device_list_mutex;
struct list_head device_list;
struct mutex connlist_mutex;
struct list_head connlist;
struct kmem_cache *desc_cache;
};
extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
extern int iser_pi_guard;
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task);
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr);
void iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr,
char *rx_data,
int rx_data_len);
void iser_conn_init(struct iser_conn *iser_conn);
void iser_conn_release(struct iser_conn *iser_conn);
int iser_conn_terminate(struct iser_conn *iser_conn);
void iser_release_work(struct work_struct *work);
void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct ib_conn *ib_conn);
void iser_snd_completion(struct iser_tx_desc *desc,
struct ib_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
int iser_reg_page_vec(struct ib_conn *ib_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg);
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum dma_data_direction dir);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
#endif

View file

@ -0,0 +1,732 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Data size is stored in
* task->data[ISER_DIR_IN].data_len, Protection size
* os stored in task->prot[ISER_DIR_IN].data_len
*/
static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
err = iser_dma_map_task_data(iser_task,
buf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
if (scsi_prot_sg_count(iser_task->sc)) {
struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
err = iser_dma_map_task_data(iser_task,
pbuf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
}
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
}
regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->read_va = cpu_to_be64(regd_buf->reg.va);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va);
return 0;
}
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Data size is stored in
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
static int
iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int imm_sz,
unsigned int unsol_sz,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
buf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
if (scsi_prot_sg_count(iser_task->sc)) {
struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
err = iser_dma_map_task_data(iser_task,
pbuf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
}
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
}
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
tx_dsg->addr = regd_buf->reg.va;
tx_dsg->length = imm_sz;
tx_dsg->lkey = regd_buf->reg.lkey;
iser_task->desc.num_sge = 2;
}
return 0;
}
/* creates a new tx descriptor and adds header regd buffer */
static void iser_create_send_desc(struct iser_conn *iser_conn,
struct iser_tx_desc *tx_desc)
{
struct iser_device *device = iser_conn->ib_conn.device;
ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
tx_desc->iser_header.flags = ISER_VER;
tx_desc->num_sge = 1;
if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
}
}
static void iser_free_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
if (!iser_conn->login_buf)
return;
if (iser_conn->login_req_dma)
ib_dma_unmap_single(device->ib_device,
iser_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
if (iser_conn->login_resp_dma)
ib_dma_unmap_single(device->ib_device,
iser_conn->login_resp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->login_buf);
/* make sure we never redo any unmapping */
iser_conn->login_req_dma = 0;
iser_conn->login_resp_dma = 0;
iser_conn->login_buf = NULL;
}
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
int req_err, resp_err;
BUG_ON(device == NULL);
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!iser_conn->login_buf)
goto out_err;
iser_conn->login_req_buf = iser_conn->login_buf;
iser_conn->login_resp_buf = iser_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
iser_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device,
iser_conn->login_resp_dma);
if (req_err || resp_err) {
if (req_err)
iser_conn->login_req_dma = 0;
if (resp_err)
iser_conn->login_resp_dma = 0;
goto free_login_buf;
}
return 0;
free_login_buf:
iser_free_login_buf(iser_conn);
out_err:
iser_err("unable to alloc or map login buf\n");
return -ENOMEM;
}
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->mr->lkey;
}
iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
rx_desc = iser_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
iser_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
{
int i;
struct iser_rx_desc *rx_desc;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
if (device->iser_free_rdma_reg_res)
device->iser_free_rdma_reg_res(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
/* make sure we never redo any unmapping */
iser_conn->rx_descs = NULL;
iser_free_login_buf(iser_conn);
}
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
return 0;
/*
* Check that there is one posted recv buffer
* (for the last login response).
*/
WARN_ON(ib_conn->post_recv_buf_count != 1);
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
return 0;
} else
iser_info("Normal session, posting batch of RX %d buffers\n",
iser_conn->min_posted_rx);
/* Initial post receive buffers */
if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
return -ENOMEM;
return 0;
}
static inline bool iser_signal_comp(u8 sig_count)
{
return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
}
/**
* iser_send_command - send command PDU
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl;
int err;
struct iser_data_buf *data_buf, *prot_buf;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
iser_create_send_desc(iser_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN];
prot_buf = &iser_task->prot[ISER_DIR_IN];
} else {
data_buf = &iser_task->data[ISER_DIR_OUT];
prot_buf = &iser_task->prot[ISER_DIR_OUT];
}
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
data_buf->data_len = scsi_bufflen(sc);
if (scsi_prot_sg_count(sc)) {
prot_buf->buf = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
prot_buf->data_len = (data_buf->data_len >>
ilog2(sc->device->sector_size)) * 8;
}
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(task);
if (err)
goto send_command_error;
}
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
err = iser_prepare_write_cmd(task,
task->imm_count,
task->imm_count +
task->unsol_r2t.data_length,
edtl);
if (err)
goto send_command_error;
}
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_conn->ib_conn, tx_desc,
iser_signal_comp(sig_count));
if (!err)
return 0;
send_command_error:
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
/**
* iser_send_data_out - send data out PDU
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
struct iser_regd_buf *regd_buf;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err = 0;
struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
buf_offset = ntohl(hdr->offset);
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (tx_desc == NULL) {
iser_err("Failed to alloc desc for post dataout\n");
return -ENOMEM;
}
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
/* build the tx desc */
iser_initialize_task_headers(task, tx_desc);
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
tx_dsg->addr = regd_buf->reg.va + buf_offset;
tx_dsg->length = data_seg_len;
tx_dsg->lkey = regd_buf->reg.lkey;
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
"inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
goto send_data_out_error;
}
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
itt, buf_offset, data_seg_len);
err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
if (!err)
return 0;
send_data_out_error:
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
struct iser_device *device;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
iser_create_send_desc(iser_conn, mdesc);
device = iser_conn->ib_conn.device;
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
if (task != conn->login_task) {
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
ib_dma_sync_single_for_cpu(device->ib_device,
iser_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
memcpy(iser_conn->login_req_buf, task->data, task->data_count);
ib_dma_sync_single_for_device(device->ib_device,
iser_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
tx_dsg->addr = iser_conn->login_req_dma;
tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
}
if (task == conn->login_task) {
iser_dbg("op %x dsl %lx, posting login rx buffer\n",
task->hdr->opcode, data_seg_len);
err = iser_post_recvl(iser_conn);
if (err)
goto send_control_error;
err = iser_post_rx_bufs(conn, task->hdr);
if (err)
goto send_control_error;
}
err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
if (!err)
return 0;
send_control_error:
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
/**
* iser_rcv_dto_completion - recv DTO completion
*/
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len,
struct ib_conn *ib_conn)
{
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn);
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */
if ((char *)rx_desc == iser_conn->login_resp_buf) {
rx_dma = iser_conn->login_resp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
} else {
rx_dma = rx_desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
}
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header;
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
ib_conn->post_recv_buf_count--;
if (rx_dma == iser_conn->login_resp_dma)
return;
outstanding = ib_conn->post_recv_buf_count;
if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
count = min(iser_conn->qp_max_recv_dtos - outstanding,
iser_conn->min_posted_rx);
err = iser_post_recvm(iser_conn, count);
if (err)
iser_err("posting %d rx bufs err %d\n", count, err);
}
}
void iser_snd_completion(struct iser_tx_desc *tx_desc,
struct ib_conn *ib_conn)
{
struct iscsi_task *task;
struct iser_device *device = ib_conn->device;
if (tx_desc->type == ISCSI_TX_DATAOUT) {
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
tx_desc = NULL;
}
if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
if (task->hdr->itt == RESERVED_ITT)
iscsi_put_task(task);
}
}
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
iser_task->status = ISER_TASK_STATUS_INIT;
iser_task->dir[ISER_DIR_IN] = 0;
iser_task->dir[ISER_DIR_OUT] = 0;
iser_task->data[ISER_DIR_IN].data_len = 0;
iser_task->data[ISER_DIR_OUT].data_len = 0;
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf));
memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
sizeof(struct iser_regd_buf));
}
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
int is_rdma_data_aligned = 1;
int is_rdma_prot_aligned = 1;
int prot_count = scsi_prot_sg_count(iser_task->sc);
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_IN],
&iser_task->data_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_OUT],
&iser_task->data_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_IN],
&iser_task->prot_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_OUT],
&iser_task->prot_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
if (iser_task->dir[ISER_DIR_IN]) {
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
if (prot_count && is_rdma_prot_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_IN],
DMA_FROM_DEVICE);
}
if (iser_task->dir[ISER_DIR_OUT]) {
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
if (prot_count && is_rdma_prot_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_OUT],
DMA_TO_DEVICE);
}
}

View file

@ -0,0 +1,800 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
/**
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
char *mem = NULL;
unsigned long cmd_data_len = 0;
int dma_nents, i;
for_each_sg(sgl, sg, data->size, i)
cmd_data_len += ib_sg_dma_len(dev, sg);
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
mem = (void *)__get_free_pages(GFP_ATOMIC,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
mem = kmalloc(cmd_data_len, GFP_ATOMIC);
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
data->size, (int)cmd_data_len);
return -ENOMEM;
}
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
int i;
char *p, *from;
sgl = (struct scatterlist *)data->buf;
p = mem;
for_each_sg(sgl, sg, data->size, i) {
from = kmap_atomic(sg_page(sg));
memcpy(p,
from + sg->offset,
sg->length);
kunmap_atomic(from);
p += sg->length;
}
}
sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
data_copy->buf = &data_copy->sg_single;
data_copy->size = 1;
data_copy->copy_buf = mem;
dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
data_copy->dma_nents = dma_nents;
data_copy->data_len = cmd_data_len;
return 0;
}
/**
* iser_finalize_rdma_unaligned_sg
*/
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
struct ib_device *dev;
unsigned long cmd_data_len;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (cmd_dir == ISER_DIR_IN) {
char *mem;
struct scatterlist *sgl, *sg;
unsigned char *p, *to;
unsigned int sg_size;
int i;
/* copy back read RDMA to unaligned sg */
mem = data_copy->copy_buf;
sgl = (struct scatterlist *)data->buf;
sg_size = data->size;
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
to = kmap_atomic(sg_page(sg));
memcpy(to + sg->offset,
p,
sg->length);
kunmap_atomic(to);
p += sg->length;
}
}
cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
free_pages((unsigned long)data_copy->copy_buf,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
kfree(data_copy->copy_buf);
data_copy->copy_buf = NULL;
}
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
* the original due to possible compaction).
*
* we build a "page vec" under the assumption that the SG meets the RDMA
* alignment requirements. Other then the first and last SG elements, all
* the "internal" elements can be compacted into a list whose elements are
* dma addresses of physical pages. The code supports also the weird case
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct ib_device *ibdev, u64 *pages,
int *offset, int *data_size)
{
struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
unsigned int dma_len;
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
*offset = (u64) sgl[0].offset & ~MASK_4K;
new_chunk = 1;
cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
start_addr = ib_sg_dma_address(ibdev, sg);
if (new_chunk)
chunk_start = start_addr;
dma_len = ib_sg_dma_len(ibdev, sg);
end_addr = start_addr + dma_len;
total_sz += dma_len;
/* collect page fragments until aligned or end of SG list */
if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
new_chunk = 0;
continue;
}
new_chunk = 1;
/* address of the first page in the contiguous chunk;
masking relevant for the very first SG entry,
which might be unaligned */
page = chunk_start & MASK_4K;
do {
pages[cur_page++] = page;
page += SIZE_4K;
} while (page < end_addr);
}
*data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n",
*data_size, cur_page);
return cur_page;
}
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
* for RDMA sub-list of a scatter-gather list of memory buffers, and returns
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
{
struct scatterlist *sgl, *sg, *next_sg = NULL;
u64 start_addr, end_addr;
int i, ret_len, start_check = 0;
if (data->dma_nents == 1)
return 1;
sgl = (struct scatterlist *)data->buf;
start_addr = ib_sg_dma_address(ibdev, sgl);
for_each_sg(sgl, sg, data->dma_nents, i) {
if (start_check && !IS_4K_ALIGNED(start_addr))
break;
next_sg = sg_next(sg);
if (!next_sg)
break;
end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
start_addr = ib_sg_dma_address(ibdev, next_sg);
if (end_addr == start_addr) {
start_check = 0;
continue;
} else
start_check = 1;
if (!IS_4K_ALIGNED(end_addr))
break;
}
ret_len = (next_sg) ? i : i+1;
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data);
return ret_len;
}
static void iser_data_buf_dump(struct iser_data_buf *data,
struct ib_device *ibdev)
{
struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, data->dma_nents, i)
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
sg_page(sg), sg->offset,
sg->length, ib_sg_dma_len(ibdev, sg));
}
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
int i;
iser_err("page vec length %d data size %d\n",
page_vec->length, page_vec->data_size);
for (i = 0; i < page_vec->length; i++)
iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}
static void iser_page_vec_build(struct iser_data_buf *data,
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
int page_vec_len = 0;
page_vec->length = 0;
page_vec->offset = 0;
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
&page_vec->offset,
&page_vec->data_size);
iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
page_vec->length = page_vec_len;
if (page_vec_len * SIZE_4K < page_vec->data_size) {
iser_err("page_vec too short to hold this SG\n");
iser_data_buf_dump(data, ibdev);
iser_dump_page_vec(page_vec);
BUG();
}
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
return 0;
}
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum dma_data_direction dir)
{
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, dir);
}
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
struct ib_device *ibdev,
struct iser_data_buf *mem,
struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir,
int aligned_len)
{
struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
aligned_len, mem->size);
if (iser_debug_level > 0)
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_task, mem,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
return -ENOMEM;
return 0;
}
/**
* iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
* using FMR (if possible) obtaining rkey and va
*
* returns 0 on success, errno code on failure
*/
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
int err;
int i;
struct scatterlist *sg;
regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
err = fall_to_bounce_buf(iser_task, ibdev, mem,
&iser_task->data_copy[cmd_dir],
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
mem = &iser_task->data_copy[cmd_dir];
}
/* if there a single dma entry, FMR is not needed */
if (mem->dma_nents == 1) {
sg = (struct scatterlist *)mem->buf;
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
(unsigned int)regd_buf->reg.lkey,
(unsigned int)regd_buf->reg.rkey,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
&regd_buf->reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->fmr.page_vec->data_size,
ib_conn->fmr.page_vec->length,
ib_conn->fmr.page_vec->offset);
for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long)ib_conn->fmr.page_vec->pages[i]);
}
if (err)
return err;
}
return 0;
}
static inline void
iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = sc->device->sector_size;
domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff;
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
*/
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 ||
scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2)
domain->sig.dif.ref_remap = true;
};
static int
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
/*
* At the moment we use this modparam to tell what is
* the memory bg_type, in the future we will take it
* from sc.
*/
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
/*
* At the moment we use this modparam to tell what is
* the memory bg_type, in the future we will take it
* from sc.
*/
sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM :
IB_T10DIF_CRC;
break;
default:
iser_err("Unsupported PI operation %d\n",
scsi_get_prot_op(sc));
return -EINVAL;
}
return 0;
}
static int
iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
switch (scsi_get_prot_type(sc)) {
case SCSI_PROT_DIF_TYPE0:
break;
case SCSI_PROT_DIF_TYPE1:
case SCSI_PROT_DIF_TYPE2:
*mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
break;
case SCSI_PROT_DIF_TYPE3:
*mask = ISER_CHECK_GUARD;
break;
default:
iser_err("Unsupported protection type %d\n",
scsi_get_prot_type(sc));
return -EINVAL;
}
return 0;
}
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
struct ib_sge *prot_sge, struct ib_sge *sig_sge)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx;
struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
struct ib_sig_attrs sig_attrs;
int ret;
u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
if (ret)
goto err;
ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
if (ret)
goto err;
if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
memset(&inv_wr, 0, sizeof(inv_wr));
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
wr = &inv_wr;
/* Bump the key */
key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
sig_wr.sg_list = data_sge;
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (scsi_prot_sg_count(iser_task->sc))
sig_wr.wr.sig_handover.prot = prot_sge;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
if (!wr)
wr = &sig_wr;
else
wr->next = &sig_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) {
iser_err("reg_sig_mr failed, ret:%d\n", ret);
goto err;
}
desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
sig_sge->lkey = pi_ctx->sig_mr->lkey;
sig_sge->addr = 0;
sig_sge->length = data_sge->length + prot_sge->length;
if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
sig_sge->length += (data_sge->length /
iser_task->sc->device->sector_size) * 8;
}
iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
sig_sge->addr, sig_sge->length,
sig_sge->lkey);
err:
return ret;
}
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_regd_buf *regd_buf,
struct iser_data_buf *mem,
enum iser_reg_indicator ind,
struct ib_sge *sge)
{
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
u8 key;
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
if (mem->dma_nents == 1) {
struct scatterlist *sg = (struct scatterlist *)mem->buf;
sge->lkey = device->mr->lkey;
sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
sge->length = ib_sg_dma_len(ibdev, &sg[0]);
iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
sge->lkey, sge->addr, sge->length);
return 0;
}
if (ind == ISER_DATA_KEY_VALID) {
mr = desc->data_mr;
frpl = desc->data_frpl;
} else {
mr = desc->pi_ctx->prot_mr;
frpl = desc->pi_ctx->prot_frpl;
}
plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
&offset, &size);
if (plen * SIZE_4K < size) {
iser_err("fast reg page_list too short to hold this SG\n");
return -EINVAL;
}
if (!(desc->reg_indicators & ind)) {
memset(&inv_wr, 0, sizeof(inv_wr));
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = mr->rkey;
wr = &inv_wr;
/* Bump the key */
key = (u8)(mr->rkey & 0x000000FF);
ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
memset(&fastreg_wr, 0, sizeof(fastreg_wr));
fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
fastreg_wr.opcode = IB_WR_FAST_REG_MR;
fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
fastreg_wr.wr.fast_reg.page_list = frpl;
fastreg_wr.wr.fast_reg.page_list_len = plen;
fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
fastreg_wr.wr.fast_reg.length = size;
fastreg_wr.wr.fast_reg.rkey = mr->rkey;
fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (!wr)
wr = &fastreg_wr;
else
wr->next = &fastreg_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) {
iser_err("fast registration failed, ret:%d\n", ret);
return ret;
}
desc->reg_indicators &= ~ind;
sge->lkey = mr->lkey;
sge->addr = frpl->page_list[0] + offset;
sge->length = size;
return ret;
}
/**
* iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
* using Fast Registration WR (if possible) obtaining rkey and va
*
* returns 0 on success, errno code on failure
*/
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
struct fast_reg_descriptor *desc = NULL;
struct ib_sge data_sge;
int err, aligned_len;
unsigned long flags;
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
err = fall_to_bounce_buf(iser_task, ibdev, mem,
&iser_task->data_copy[cmd_dir],
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
mem = &iser_task->data_copy[cmd_dir];
}
if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
spin_lock_irqsave(&ib_conn->lock, flags);
desc = list_first_entry(&ib_conn->fastreg.pool,
struct fast_reg_descriptor, list);
list_del(&desc->list);
spin_unlock_irqrestore(&ib_conn->lock, flags);
regd_buf->reg.mem_h = desc;
}
err = iser_fast_reg_mr(iser_task, regd_buf, mem,
ISER_DATA_KEY_VALID, &data_sge);
if (err)
goto err_reg;
if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
struct ib_sge prot_sge, sig_sge;
memset(&prot_sge, 0, sizeof(prot_sge));
if (scsi_prot_sg_count(iser_task->sc)) {
mem = &iser_task->prot[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
err = fall_to_bounce_buf(iser_task, ibdev, mem,
&iser_task->prot_copy[cmd_dir],
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
mem = &iser_task->prot_copy[cmd_dir];
}
err = iser_fast_reg_mr(iser_task, regd_buf, mem,
ISER_PROT_KEY_VALID, &prot_sge);
if (err)
goto err_reg;
}
err = iser_reg_sig_mr(iser_task, desc, &data_sge,
&prot_sge, &sig_sge);
if (err) {
iser_err("Failed to register signature mr\n");
return err;
}
desc->reg_indicators |= ISER_FASTREG_PROTECTED;
regd_buf->reg.lkey = sig_sge.lkey;
regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
regd_buf->reg.va = sig_sge.addr;
regd_buf->reg.len = sig_sge.length;
regd_buf->reg.is_mr = 1;
} else {
if (desc) {
regd_buf->reg.rkey = desc->data_mr->rkey;
regd_buf->reg.is_mr = 1;
} else {
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.is_mr = 0;
}
regd_buf->reg.lkey = data_sge.lkey;
regd_buf->reg.va = data_sge.addr;
regd_buf->reg.len = data_sge.length;
}
return 0;
err_reg:
if (desc) {
spin_lock_irqsave(&ib_conn->lock, flags);
list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_irqrestore(&ib_conn->lock, flags);
}
return err;
}

File diff suppressed because it is too large Load diff