Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,5 @@
config TCM_FC
tristate "TCM_FC fabric Plugin"
depends on LIBFC
help
Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM

View file

@ -0,0 +1,6 @@
tcm_fc-y += tfc_cmd.o \
tfc_conf.o \
tfc_io.o \
tfc_sess.o
obj-$(CONFIG_TCM_FC) += tcm_fc.o

View file

@ -0,0 +1,183 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];
__u8 wwpn[8];
__u8 __resvd2[8];
} __attribute__((__packed__));
/*
* Session (remote port).
*/
struct ft_sess {
u32 port_id; /* for hash lookup use only */
u32 params;
u16 max_frame; /* maximum frame size */
u64 port_name; /* port name for transport ID */
struct ft_tport *tport;
struct se_session *se_sess;
struct hlist_node hash; /* linkage in ft_sess_hash table */
struct rcu_head rcu;
struct kref kref; /* ref for hash and outstanding I/Os */
};
/*
* Hash table of sessions per local port.
* Hash lookup by remote port FC_ID.
*/
#define FT_SESS_HASH_BITS 6
#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
/*
* Per local port data.
* This is created only after a TPG exists that allows target function
* for the local port. If the TPG exists, this is allocated when
* we're notified that the local port has been created, or when
* the first PRLI provider callback is received.
*/
struct ft_tport {
struct fc_lport *lport;
struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
u32 sess_count; /* number of sessions in hash */
struct rcu_head rcu;
struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
};
/*
* Node ID and authentication.
*/
struct ft_node_auth {
u64 port_name;
u64 node_name;
};
/*
* Node ACL for FC remote port session.
*/
struct ft_node_acl {
struct ft_node_auth node_auth;
struct se_node_acl se_node_acl;
};
struct ft_lun {
u32 index;
char name[FT_LUN_NAMELEN];
};
/*
* Target portal group (local port).
*/
struct ft_tpg {
u32 index;
struct ft_lport_wwn *lport_wwn;
struct ft_tport *tport; /* active tport or NULL */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct workqueue_struct *workqueue;
};
struct ft_lport_wwn {
u64 wwpn;
char name[FT_NAMELEN];
struct list_head ft_wwn_node;
struct ft_tpg *tpg;
struct se_wwn se_wwn;
};
/*
* Commands
*/
struct ft_cmd {
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
struct fc_frame *req_frame;
u32 write_data_len; /* data received on writes */
struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
u32 aborted:1; /* Set if aborted by reset or timeout */
struct scatterlist *sg; /* Set only if DDP is setup */
u32 sg_cnt; /* No. of item in scatterlist */
};
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
* Fabric methods.
*/
/*
* Session ops.
*/
void ft_sess_put(struct ft_sess *);
int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
void ft_lport_add(struct fc_lport *, void *);
void ft_lport_del(struct fc_lport *, void *);
int ft_lport_notify(struct notifier_block *, unsigned long, void *);
/*
* IO methods.
*/
int ft_check_stop_free(struct se_cmd *);
void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
void ft_aborted_task(struct se_cmd *);
/*
* other internal functions.
*/
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
/*
* Underlying HW specific helper function
*/
void ft_invl_hw_context(struct ft_cmd *);
#endif /* __TCM_FC_H__ */

View file

@ -0,0 +1,585 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
/*
* Dump cmd state for debugging.
*/
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
struct scatterlist *sg;
int count;
se_cmd = &cmd->se_cmd;
pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_data_nents,
se_cmd->data_length, se_cmd->se_cmd_flags);
for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
pr_debug("%s: cmd %p sg %p page %p "
"len 0x%x off 0x%x\n",
caller, cmd, sg,
sg_page(sg), sg->length, sg->offset);
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
}
}
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
if (unlikely(ft_debug_logging))
_ft_dump_cmd(cmd, caller);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
struct ft_sess *sess;
if (!cmd)
return;
sess = cmd->sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
ft_free_cmd(cmd);
}
int ft_check_stop_free(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
/*
* Send response.
*/
int ft_queue_status(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_resp_with_ext *fcp;
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
int rc;
if (cmd->aborted)
return 0;
ft_dump_cmd(cmd, __func__);
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
len = se_cmd->scsi_sense_length;
if (len) {
fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
fcp->ext.fr_sns_len = htonl(len);
memcpy((fcp + 1), se_cmd->sense_buffer, len);
}
/*
* Test underflow and overflow with one mask. Usually both are off.
* Bidirectional commands are not handled yet.
*/
if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
fcp->resp.fr_flags |= FCP_RESID_OVER;
else
fcp->resp.fr_flags |= FCP_RESID_UNDER;
fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
}
/*
* Send response.
*/
cmd->seq = lport->tt.seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
rc = lport->tt.seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
/*
* Generate a TASK_SET_FULL status to notify the initiator
* to reduce it's queue_depth after the se_cmd response has
* been re-queued by target-core.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
lport->tt.exch_done(cmd->seq);
return 0;
}
int ft_write_pending_status(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
return cmd->write_data_len != se_cmd->data_length;
}
/*
* Send TX_RDY (transfer ready).
*/
int ft_write_pending(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_txrdy *txrdy;
struct fc_lport *lport;
struct fc_exch *ep;
struct fc_frame_header *fh;
u32 f_ctl;
ft_dump_cmd(cmd, __func__);
if (cmd->aborted)
return 0;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
cmd->seq = lport->tt.seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
fh = fc_frame_header_get(fp);
f_ctl = ntoh24(fh->fh_f_ctl);
/* Only if it is 'Exchange Responder' */
if (f_ctl & FC_FC_EX_CTX) {
/* Target is 'exchange responder' and sending XFER_READY
* to 'exchange initiator (initiator)'
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
cmd->was_ddp_setup = 1;
}
}
lport->tt.seq_send(lport, cmd->seq, fp);
return 0;
}
u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
if (cmd->aborted)
return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
}
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
{
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
if (unlikely(IS_ERR(fp))) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
cmd->aborted = true;
return;
}
fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_SOL_DATA: /* write data */
ft_recv_write_data(cmd, fp);
break;
case FC_RCTL_DD_UNSOL_CTL: /* command */
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
ft_invl_hw_context(cmd);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
}
/*
* Send a FCP response including SCSI status and optional FCP rsp_code.
* status is SAM_STAT_GOOD (zero) iff code is valid.
* This is used in error cases, such as allocation failures.
*/
static void ft_send_resp_status(struct fc_lport *lport,
const struct fc_frame *rx_fp,
u32 status, enum fcp_resp_rsp_codes code)
{
struct fc_frame *fp;
struct fc_seq *sp;
const struct fc_frame_header *fh;
size_t len;
struct fcp_resp_with_ext *fcp;
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
len += sizeof(*info);
fp = fc_frame_alloc(lport, len);
if (!fp)
return;
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = status;
if (status == SAM_STAT_GOOD) {
fcp->ext.fr_rsp_len = htonl(sizeof(*info));
fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
info = (struct fcp_resp_rsp_info *)(fcp + 1);
info->rsp_code = code;
}
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
lport->tt.seq_send(lport, sp, fp);
lport->tt.exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
}
/*
* Send error or task management response.
*/
static void ft_send_resp_code(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_status(cmd->sess->tport->lport,
cmd->req_frame, SAM_STAT_GOOD, code);
}
/*
* Send error or task management response.
* Always frees the cmd and associated state.
*/
static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_code(cmd, code);
ft_free_cmd(cmd);
}
/*
* Handle Task Management Request.
*/
static void ft_send_tm(struct ft_cmd *cmd)
{
struct fcp_cmnd *fcp;
int rc;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
break;
case FCP_TMF_CLR_TASK_SET:
tm_func = TMR_CLEAR_TASK_SET;
break;
case FCP_TMF_ABT_TASK_SET:
tm_func = TMR_ABORT_TASK_SET;
break;
case FCP_TMF_CLR_ACA:
tm_func = TMR_CLEAR_ACA;
break;
default:
/*
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
/* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
cmd, tm_func, GFP_KERNEL, 0, 0);
if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
}
/*
* Send status from completed task management request.
*/
void ft_queue_tm_resp(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct se_tmr_req *tmr = se_cmd->se_tmr_req;
enum fcp_resp_rsp_codes code;
if (cmd->aborted)
return;
switch (tmr->response) {
case TMR_FUNCTION_COMPLETE:
code = FCP_TMF_CMPL;
break;
case TMR_LUN_DOES_NOT_EXIST:
code = FCP_TMF_INVALID_LUN;
break;
case TMR_FUNCTION_REJECTED:
code = FCP_TMF_REJECTED;
break;
case TMR_TASK_DOES_NOT_EXIST:
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
default:
code = FCP_TMF_FAILED;
break;
}
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
}
void ft_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static void ft_send_work(struct work_struct *work);
/*
* Handle incoming FCP command.
*/
static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
goto busy;
cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->seq = lport->tt.seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
INIT_WORK(&cmd->work, ft_send_work);
queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
}
/*
* Handle incoming FCP frame.
* Caller has verified that the frame is type FCP.
*/
void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_UNSOL_CMD: /* command */
ft_recv_cmd(sess, fp);
break;
case FC_RCTL_DD_SOL_DATA: /* write data */
case FC_RCTL_DD_UNSOL_CTL:
case FC_RCTL_DD_SOL_CTL:
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
break;
}
}
/*
* Send new command to target.
*/
static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct fcp_cmnd *fcp;
int data_dir = 0;
int task_attr;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
if (!fcp)
goto err;
if (fcp->fc_flags & FCP_CFL_LEN_MASK)
goto err; /* not handling longer CDBs yet */
/*
* Check for FCP task management flags
*/
if (fcp->fc_tm_flags) {
ft_send_tm(cmd);
return;
}
switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
case 0:
data_dir = DMA_NONE;
break;
case FCP_CFL_RDDATA:
data_dir = DMA_FROM_DEVICE;
break;
case FCP_CFL_WRDATA:
data_dir = DMA_TO_DEVICE;
break;
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
/*
* Locate the SAM Task Attr from fc_pri_ta
*/
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
case FCP_PTA_HEADQ:
task_attr = MSG_HEAD_TAG;
break;
case FCP_PTA_ORDERED:
task_attr = MSG_ORDERED_TAG;
break;
case FCP_PTA_ACA:
task_attr = MSG_ACA_TAG;
break;
case FCP_PTA_SIMPLE: /* Fallthrough */
default:
task_attr = MSG_SIMPLE_TAG;
}
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir, 0))
goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}

View file

@ -0,0 +1,642 @@
/*******************************************************************************
* Filename: tcm_fc.c
*
* This file contains the configfs implementation for TCM_fc fabric node.
* Based on tcm_loop_configfs.c
*
* Copyright (c) 2010 Cisco Systems, Inc.
* Copyright (c) 2009,2010 Rising Tide, Inc.
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
struct target_fabric_configfs *ft_configfs;
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
* the name is the same as what would be generated by ft_format_wwn()
* so the name and wwn are mapped one-to-one.
*/
static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
u32 byte = 0;
u32 pos = 0;
u32 err;
int val;
*wwn = 0;
for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
c = *cp;
if (c == '\n' && cp[1] == '\0')
continue;
if (strict && pos++ == 2 && byte++ < 7) {
pos = 0;
if (c == ':')
continue;
err = 1;
goto fail;
}
if (c == '\0') {
err = 2;
if (strict && byte != 8)
goto fail;
return cp - name;
}
err = 3;
val = hex_to_bin(c);
if (val < 0 || (strict && isupper(c)))
goto fail;
*wwn = (*wwn << 4) | val;
}
err = 4;
fail:
pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
{
u8 b[8];
put_unaligned_be64(wwn, b);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
static ssize_t ft_wwn_show(void *arg, char *buf)
{
u64 *wwn = arg;
ssize_t len;
len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
buf[len++] = '\n';
return len;
}
static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
{
ssize_t ret;
u64 wwn;
ret = ft_parse_wwn(buf, &wwn, 0);
if (ret > 0)
*(u64 *)arg = wwn;
return ret;
}
/*
* ACL auth ops.
*/
static ssize_t ft_nacl_show_port_name(
struct se_node_acl *se_nacl,
char *page)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.port_name, page);
}
static ssize_t ft_nacl_store_port_name(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.port_name, page, count);
}
TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
static ssize_t ft_nacl_show_node_name(
struct se_node_acl *se_nacl,
char *page)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.node_name, page);
}
static ssize_t ft_nacl_store_node_name(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.node_name, page, count);
}
TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
static struct configfs_attribute *ft_nacl_base_attrs[] = {
&ft_nacl_port_name.attr,
&ft_nacl_node_name.attr,
NULL,
};
/*
* ACL ops.
*/
/*
* Add ACL for an initiator. The ACL is named arbitrarily.
* The port_name and/or node_name are attributes.
*/
static struct se_node_acl *ft_add_acl(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct ft_node_acl *acl;
struct ft_tpg *tpg;
u64 wwpn;
u32 q_depth;
pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
q_depth = 32; /* XXX bogus default - get from tpg? */
return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
&acl->se_node_acl, name, q_depth);
}
static void ft_del_acl(struct se_node_acl *se_acl)
{
struct se_portal_group *se_tpg = se_acl->se_tpg;
struct ft_tpg *tpg;
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
kfree(acl);
}
struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
{
struct ft_node_acl *found = NULL;
struct ft_node_acl *acl;
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl;
spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
break;
}
}
spin_unlock_irq(&se_tpg->acl_node_lock);
return found;
}
static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl) {
pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct se_node_acl *se_acl)
{
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
pr_debug("acl %p\n", acl);
kfree(acl);
}
/*
* local_port port_group (tpg) ops.
*/
static struct se_portal_group *ft_add_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
struct workqueue_struct *wq;
unsigned long index;
int ret;
pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
*/
if (strstr(name, "tpgt_") != name)
return NULL;
ret = kstrtoul(name + 5, 10, &index);
if (ret)
return NULL;
if (index > UINT_MAX)
return NULL;
if ((index != 1)) {
pr_err("Error, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1);
if (!wq) {
kfree(tpg);
return NULL;
}
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(wq);
kfree(tpg);
return NULL;
}
tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
}
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
}
mutex_unlock(&ft_lport_lock);
core_tpg_deregister(se_tpg);
kfree(tpg);
}
/*
* Verify that an lport is configured to use the tcm_fc module, and return
* the target port group that should be used.
*
* The caller holds ft_lport_lock.
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
struct ft_lport_wwn *ft_wwn;
list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (ft_wwn->wwpn == lport->wwpn)
return ft_wwn->tpg;
}
return NULL;
}
/*
* target config instance ops.
*/
/*
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
if (!ft_wwn)
return NULL;
ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
return NULL;
}
}
list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
return &ft_wwn->se_wwn;
}
static void ft_del_wwn(struct se_wwn *wwn)
{
struct ft_lport_wwn *ft_wwn = container_of(wwn,
struct ft_lport_wwn, se_wwn);
pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
}
static ssize_t ft_wwn_show_attr_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
}
TF_WWN_ATTR_RO(ft, version);
static struct configfs_attribute *ft_wwn_attrs[] = {
&ft_wwn_version.attr,
NULL,
};
static char *ft_get_fabric_name(void)
{
return "fc";
}
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return tpg->index;
}
static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
}
static int ft_check_false(struct se_portal_group *se_tpg)
{
return 0;
}
static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
{
}
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->index;
}
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
.tpg_get_default_depth = ft_get_default_depth,
.tpg_get_pr_transport_id = fc_get_pr_transport_id,
.tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
.tpg_check_demo_mode = ft_check_false,
.tpg_check_demo_mode_cache = ft_check_false,
.tpg_check_demo_mode_write_protect = ft_check_false,
.tpg_check_prod_mode_write_protect = ft_check_false,
.tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
.write_pending_status = ft_write_pending_status,
.set_default_node_attributes = ft_set_default_node_attr,
.get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
.aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = &ft_add_wwn,
.fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
.fabric_post_link = NULL,
.fabric_pre_unlink = NULL,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
.fabric_make_nodeacl = &ft_add_acl,
.fabric_drop_nodeacl = &ft_del_acl,
};
static int ft_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
if (IS_ERR(fabric)) {
pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
ft_nacl_base_attrs;
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* register the fabric for use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
target_fabric_configfs_free(fabric);
return -1;
}
/*
* Setup our local pointer to *fabric.
*/
ft_configfs = fabric;
return 0;
}
static void ft_deregister_configfs(void)
{
if (!ft_configfs)
return;
target_fabric_configfs_deregister(ft_configfs);
ft_configfs = NULL;
}
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
};
static int __init ft_init(void)
{
if (ft_register_configfs())
return -1;
if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
ft_deregister_configfs();
return -1;
}
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
}
static void __exit ft_exit(void)
{
blocking_notifier_chain_unregister(&fc_lport_notifier_head,
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
ft_deregister_configfs();
synchronize_rcu();
}
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
MODULE_LICENSE("GPL");
module_init(ft_init);
module_exit(ft_exit);

View file

@ -0,0 +1,380 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
*
* Copyright (c) 2007 Intel Corporation. All rights reserved.
* Copyright (c) 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2008 Mike Christie
* Copyright (c) 2009 Rising Tide, Inc.
* Copyright (c) 2009 Linux-iSCSI.org
* Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
/*
* Deliver read data back to initiator.
* XXX TBD handle resource problems later.
*/
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
void *from;
void *to = NULL;
if (cmd->aborted)
return 0;
if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
goto queue_status;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(remaining && !se_cmd->t_data_sg);
if (remaining) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
use_sg = !(remaining % 4);
while (remaining) {
struct fc_seq *seq = cmd->seq;
if (!seq) {
pr_debug("%s: Command aborted, xid 0x%x\n",
__func__, ep->xid);
break;
}
if (!mem_len) {
sg = sg_next(sg);
mem_len = min((size_t)sg->length, remaining);
mem_off = sg->offset;
page = sg_page(sg);
}
if (!frame_len) {
/*
* If lport's has capability of Large Send Offload LSO)
* , then allow 'frame_len' to be as big as 'lso_max'
* if indicated transfer length is >= lport->lso_max
*/
frame_len = (lport->seq_offload) ? lport->lso_max :
cmd->sess->max_frame;
frame_len = min(frame_len, remaining);
fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
if (!fp)
return -ENOMEM;
to = fc_frame_payload_get(fp, 0);
fh_off = frame_off;
frame_off += frame_len;
/*
* Setup the frame's max payload which is used by base
* driver to indicate HW about max frame size, so that
* HW can do fragmentation appropriately based on
* "gso_max_size" of underline netdev.
*/
fr_max_payload(fp) = cmd->sess->max_frame;
}
tlen = min(mem_len, frame_len);
if (use_sg) {
off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
skb_shinfo(fp_skb(fp))->nr_frags,
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
from += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
to += tlen;
}
mem_off += tlen;
mem_len -= tlen;
frame_len -= tlen;
remaining -= tlen;
if (frame_len &&
(skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
continue;
if (!remaining)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
/*
* Go ahead and set TASK_SET_FULL status ignoring the
* rest of the DataIN, and immediately attempt to
* send the response via ft_queue_status() in order
* to notify the initiator that it should reduce it's
* per LUN queue_depth.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
break;
}
}
queue_status:
return ft_queue_status(se_cmd);
}
static void ft_execute_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
target_execute_cmd(&cmd->se_cmd);
}
/*
* Receive write data frame.
*/
void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
struct fc_frame_header *fh;
struct scatterlist *sg = NULL;
u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
size_t mem_len = 0;
size_t tlen;
struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
f_ctl = ntoh24(fh->fh_f_ctl);
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
BUG_ON(!ep);
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
* payload is expected to be copied directly to user buffers.
*/
buf = fc_frame_payload_get(fp, 1);
if (buf)
pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
se_cmd->t_data_sg, se_cmd->t_data_nents);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
* situation (success and error).
*/
ft_invl_hw_context(cmd);
/*
* If "Sequence Initiative (TSI)" bit set in f_ctl, means last
* write data frame is received successfully where payload is
* posted directly to user buffer and only the last frame's
* header is posted in receive queue.
*
* If "Sequence Initiative (TSI)" bit is not set, means error
* condition w.r.t. DDP, hence drop the packet and let explict
* ABORTS from other end of exchange timer trigger the recovery.
*/
if (f_ctl & FC_FC_SEQ_INIT)
goto last_frame;
else
goto drop;
}
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
if (frame_len <= sizeof(*fh))
goto drop;
frame_len -= sizeof(*fh);
from = fc_frame_payload_get(fp, 0);
if (rel_off >= se_cmd->data_length)
goto drop;
if (frame_len + rel_off > se_cmd->data_length)
frame_len = se_cmd->data_length - rel_off;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(frame_len && !se_cmd->t_data_sg);
if (frame_len) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
sg = sg_next(sg);
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
mem_len = 0;
continue;
}
mem_off += rel_off;
mem_len -= rel_off;
rel_off = 0;
tlen = min(mem_len, frame_len);
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
from += tlen;
frame_len -= tlen;
mem_off += tlen;
mem_len -= tlen;
cmd->write_data_len += tlen;
}
last_frame:
if (cmd->write_data_len == se_cmd->data_length) {
INIT_WORK(&cmd->work, ft_execute_work);
queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
}
drop:
fc_frame_free(fp);
}
/*
* Handle and cleanup any HW specific resources if
* received ABORTS, errors, timeouts.
*/
void ft_invl_hw_context(struct ft_cmd *cmd)
{
struct fc_seq *seq;
struct fc_exch *ep = NULL;
struct fc_lport *lport = NULL;
BUG_ON(!cmd);
seq = cmd->seq;
/* Cleanup the DDP context in HW if DDP was setup */
if (cmd->was_ddp_setup && seq) {
ep = fc_seq_exch(seq);
if (ep) {
lport = ep->lp;
if (lport && (ep->xid <= lport->lro_xid)) {
/*
* "ddp_done" trigger invalidation of HW
* specific DDP context
*/
cmd->write_data_len = lport->tt.ddp_done(lport,
ep->xid);
/*
* Resetting same variable to indicate HW's
* DDP context has been invalidated to avoid
* re_invalidation of same context (context is
* identified using ep->xid)
*/
cmd->was_ddp_setup = 0;
}
}
}
}

View file

@ -0,0 +1,504 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kref.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
static void ft_sess_delete_all(struct ft_tport *);
/*
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
int i;
tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
tpg = ft_lport_find_tpg(lport);
if (!tpg)
return NULL;
if (tport) {
tport->tpg = tpg;
tpg->tport = tport;
return tport;
}
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport)
return NULL;
tport->lport = lport;
tport->tpg = tpg;
tpg->tport = tport;
for (i = 0; i < FT_SESS_HASH_SIZE; i++)
INIT_HLIST_HEAD(&tport->hash[i]);
rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
return tport;
}
/*
* Delete a target local port.
* Caller holds ft_lport_lock.
*/
static void ft_tport_delete(struct ft_tport *tport)
{
struct fc_lport *lport;
struct ft_tpg *tpg;
ft_sess_delete_all(tport);
lport = tport->lport;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
tpg = tport->tpg;
if (tpg) {
tpg->tport = NULL;
tport->tpg = NULL;
}
kfree_rcu(tport, rcu);
}
/*
* Add local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
ft_tport_get(lport);
mutex_unlock(&ft_lport_lock);
}
/*
* Delete local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_del(struct fc_lport *lport, void *arg)
{
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = lport->prov[FC_TYPE_FCP];
if (tport)
ft_tport_delete(tport);
mutex_unlock(&ft_lport_lock);
}
/*
* Notification of local port change from libfc.
* Create or delete local port and associated tport.
*/
int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
{
struct fc_lport *lport = arg;
switch (event) {
case FC_LPORT_EV_ADD:
ft_lport_add(lport, NULL);
break;
case FC_LPORT_EV_DEL:
ft_lport_del(lport, NULL);
break;
}
return NOTIFY_DONE;
}
/*
* Hash function for FC_IDs.
*/
static u32 ft_sess_hash(u32 port_id)
{
return hash_32(port_id, FT_SESS_HASH_BITS);
}
/*
* Find session in local port.
* Sessions and hash lists are RCU-protected.
* A reference is taken which must be eventually freed.
*/
static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
if (!tport)
goto out;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
pr_debug("port_id %x not found\n", port_id);
return NULL;
}
/*
* Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct ft_node_acl *acl)
{
struct ft_sess *sess;
struct hlist_head *head;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
return NULL;
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
sess->se_sess->se_node_acl = &acl->se_node_acl;
sess->tport = tport;
sess->port_id = port_id;
kref_init(&sess->kref); /* ref for table entry */
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
return sess;
}
/*
* Unhash the session.
* Caller holds ft_lport_lock.
*/
static void ft_sess_unhash(struct ft_sess *sess)
{
struct ft_tport *tport = sess->tport;
hlist_del_rcu(&sess->hash);
BUG_ON(!tport->sess_count);
tport->sess_count--;
sess->port_id = -1;
sess->params = 0;
}
/*
* Delete session from hash.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
}
}
return NULL;
}
/*
* Delete all sessions from tport.
* Caller holds ft_lport_lock.
*/
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
}
}
}
/*
* TCM ops for sessions.
*/
/*
* Determine whether session is allowed to be shutdown in the current context.
* Returns non-zero if the session should be shutdown.
*/
int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
pr_debug("port_id %x\n", sess->port_id);
return 1;
}
/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
void ft_sess_close(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
u32 port_id;
mutex_lock(&ft_lport_lock);
port_id = sess->port_id;
if (port_id == -1) {
mutex_unlock(&ft_lport_lock);
return;
}
pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
ft_sess_put(sess);
/* XXX Send LOGO or PRLO */
synchronize_rcu(); /* let transport deregister happen */
}
u32 ft_sess_get_index(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return sess->port_id; /* XXX TBD probably not what is needed */
}
u32 ft_sess_get_port_name(struct se_session *se_sess,
unsigned char *buf, u32 len)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return ft_format_wwn(buf, len, sess->port_name);
}
/*
* libfc ops involving sessions.
*/
static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
struct ft_tport *tport;
struct ft_sess *sess;
struct ft_node_acl *acl;
u32 fcp_parm;
tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
return FC_SPP_RESP_NO_PA;
/*
* If both target and initiator bits are off, the SPP is invalid.
*/
fcp_parm = ntohl(rspp->spp_params);
if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
return FC_SPP_RESP_INVL;
/*
* Create session (image pair) only if requested by
* EST_IMG_PAIR flag and if the requestor is an initiator.
*/
if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, acl);
if (!sess)
return FC_SPP_RESP_RES;
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
sess->port_name = rdata->ids.port_name;
sess->max_frame = rdata->maxframe_size;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* OR in our service parameters with other provider (initiator), if any.
*/
fill:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
not_target:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_TARG_FCN;
spp->spp_params = htonl(fcp_parm);
return 0;
}
/**
* tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)
* @spp: response service parameter page
*
* Returns spp response code.
*/
static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
int ret;
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
transport_deregister_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
void ft_sess_put(struct ft_sess *sess)
{
int sess_held = atomic_read(&sess->kref.refcount);
BUG_ON(!sess_held);
kref_put(&sess->kref, ft_sess_free);
}
static void ft_prlo(struct fc_rport_priv *rdata)
{
struct ft_sess *sess;
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;
}
sess = ft_sess_delete(tport, rdata->ids.port_id);
if (!sess) {
mutex_unlock(&ft_lport_lock);
return;
}
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
rdata->prli_count--;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* Handle incoming FCP request.
* Caller has verified that the frame is type FCP.
*/
static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
}
ft_recv_req(sess, fp); /* must do ft_sess_put() */
}
/*
* Provider ops for libfc.
*/
struct fc4_prov ft_prov = {
.prli = ft_prli,
.prlo = ft_prlo,
.recv = ft_recv,
.module = THIS_MODULE,
};