Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

46
drivers/target/Kconfig Normal file
View file

@ -0,0 +1,46 @@
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
select CRC_T10DIF
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
control path for target_core_mod. This includes built-in TCM RAMDISK
subsystem logic for virtual LUN 0 access
if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
select BLK_DEV_INTEGRITY
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
config TCM_FILEIO
tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
help
Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
access to Linux/VFS struct file or struct block_device
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
config TCM_USER
tristate "TCM/USER Subsystem Plugin for Linux"
depends on UIO && NET
help
Say Y here to enable the TCM/USER subsystem plugin for a userspace
process to handle requests
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
source "drivers/target/iscsi/Kconfig"
source "drivers/target/sbp/Kconfig"
endif

31
drivers/target/Makefile Normal file
View file

@ -0,0 +1,31 @@
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
target_core_fabric_configfs.o \
target_core_fabric_lib.o \
target_core_hba.o \
target_core_pr.o \
target_core_alua.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
target_core_sbc.o \
target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
target_core_stat.o \
target_core_xcopy.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
# Subsystem modules
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
obj-$(CONFIG_TCM_USER) += target_core_user.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
obj-$(CONFIG_TCM_FC) += tcm_fc/
obj-$(CONFIG_ISCSI_TARGET) += iscsi/
obj-$(CONFIG_SBP_TARGET) += sbp/

View file

@ -0,0 +1,9 @@
config ISCSI_TARGET
tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
depends on NET
select CRYPTO
select CRYPTO_CRC32C
select CRYPTO_CRC32C_INTEL if X86
help
Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
Target Mode Stack.

View file

@ -0,0 +1,21 @@
iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_seq_pdu_list.o \
iscsi_target_tq.o \
iscsi_target_auth.o \
iscsi_target_datain_values.o \
iscsi_target_device.o \
iscsi_target_erl0.o \
iscsi_target_erl1.o \
iscsi_target_erl2.o \
iscsi_target_login.o \
iscsi_target_nego.o \
iscsi_target_nodeattrib.o \
iscsi_target_tmr.o \
iscsi_target_tpg.o \
iscsi_target_util.o \
iscsi_target.o \
iscsi_target_configfs.o \
iscsi_target_stat.o \
iscsi_target_transport.o
obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,50 @@
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
extern void iscsit_login_kref_put(struct kref *);
extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
struct iscsi_tpg_np *);
extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
struct iscsi_np *, int);
extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
char *, int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
extern int iscsit_close_session(struct iscsi_session *);
extern void iscsit_fail_session(struct iscsi_session *);
extern int iscsit_free_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
extern struct iscsit_global *iscsit_global;
extern struct target_fabric_configfs *lio_target_fabric_configfs;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
extern struct kmem_cache *lio_qr_cache;
extern struct kmem_cache *lio_r2t_cache;
extern struct idr sess_idr;
extern struct mutex auth_id_lock;
extern spinlock_t sess_idr_lock;
#endif /*** ISCSI_TARGET_H ***/

View file

@ -0,0 +1,502 @@
/*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include "iscsi_target_core.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
int j = DIV_ROUND_UP(len, 2), rc;
rc = hex2bin(dst, src, j);
if (rc < 0)
pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
}
static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
{
int i;
for (i = 0; i < src_len; i++) {
sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
}
}
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
*c_len += 1;
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
}
static int chap_check_algorithm(const char *a_str)
{
char *tmp, *orig, *token;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
return CHAP_DIGEST_UNKNOWN;
}
orig = tmp;
token = strsep(&tmp, "=");
if (!token)
goto out;
if (strcmp(token, "CHAP_A")) {
pr_err("Unable to locate CHAP_A key\n");
goto out;
}
while (token) {
token = strsep(&tmp, ",");
if (!token)
goto out;
if (!strncmp(token, "5", 1)) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;
}
}
out:
kfree(orig);
return CHAP_DIGEST_UNKNOWN;
}
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
const char *a_str,
char *aic_str,
unsigned int *aic_len)
{
int ret;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
!(auth->naf_flags & NAF_PASSWORD_SET)) {
pr_err("CHAP user or password not set for"
" Initiator ACL\n");
return NULL;
}
conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
if (!conn->auth_protocol)
return NULL;
chap = conn->auth_protocol;
ret = chap_check_algorithm(a_str);
switch (ret) {
case CHAP_DIGEST_MD5:
pr_debug("[server] Got CHAP_A=5\n");
/*
* Send back CHAP_A set to MD5.
*/
*aic_len = sprintf(aic_str, "CHAP_A=5");
*aic_len += 1;
chap->digest_type = CHAP_DIGEST_MD5;
pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
break;
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
return NULL;
}
/*
* Set Identifier.
*/
chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
/*
* Generate Challenge.
*/
chap_gen_challenge(conn, 1, aic_str, aic_len);
return chap;
}
static void chap_close(struct iscsi_conn *conn)
{
kfree(conn->auth_protocol);
conn->auth_protocol = NULL;
}
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
unsigned long id;
unsigned char id_as_uchar;
unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
int auth_ret = -1, ret, challenge_len;
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
memset(digest, 0, MD5_SIGNATURE_SIZE);
memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
memset(client_digest, 0, MD5_SIGNATURE_SIZE);
memset(server_digest, 0, MD5_SIGNATURE_SIZE);
challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!challenge) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!challenge_binhex) {
pr_err("Unable to allocate challenge_binhex buffer\n");
goto out;
}
/*
* Extract CHAP_N.
*/
if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
&type) < 0) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
if (type == HEX) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
/* Include the terminating NULL in the compare */
compare_len = strlen(auth->userid) + 1;
if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
pr_debug("[server] Got CHAP_N=%s\n", chap_n);
/*
* Extract CHAP_R.
*/
if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
&type) < 0) {
pr_err("Could not find CHAP_R.\n");
goto out;
}
if (type != HEX) {
pr_err("Could not find CHAP_R.\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("Unable to allocate struct crypto_hash\n");
goto out;
}
desc.tfm = tfm;
desc.flags = 0;
ret = crypto_hash_init(&desc);
if (ret < 0) {
pr_err("crypto_hash_init() failed\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
crypto_free_hash(tfm);
goto out;
}
ret = crypto_hash_final(&desc, server_digest);
if (ret < 0) {
pr_err("crypto_hash_final() failed for server digest\n");
crypto_free_hash(tfm);
goto out;
}
crypto_free_hash(tfm);
chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
pr_debug("[server] MD5 Digests do not match!\n\n");
goto out;
} else
pr_debug("[server] MD5 Digests match, CHAP connetication"
" successful.\n\n");
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
*/
if (!auth->authenticate_target) {
kfree(challenge);
kfree(challenge_binhex);
return 0;
}
/*
* Get CHAP_I.
*/
if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
if (type == HEX)
ret = kstrtoul(&identifier[2], 0, &id);
else
ret = kstrtoul(identifier, 0, &id);
if (ret < 0) {
pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
goto out;
}
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
}
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
challenge, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
if (type != HEX) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
pr_debug("[server] Got CHAP_C=%s\n", challenge);
challenge_len = chap_string_to_hex(challenge_binhex, challenge,
strlen(challenge));
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
if (challenge_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
}
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("Unable to allocate struct crypto_hash\n");
goto out;
}
desc.tfm = tfm;
desc.flags = 0;
ret = crypto_hash_init(&desc);
if (ret < 0) {
pr_err("crypto_hash_init() failed\n");
crypto_free_hash(tfm);
goto out;
}
/* To handle both endiannesses */
id_as_uchar = id;
sg_init_one(&sg, &id_as_uchar, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
pr_err("crypto_hash_update() failed for"
" password_mutual\n");
crypto_free_hash(tfm);
goto out;
}
/*
* Convert received challenge to binary hex.
*/
sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
crypto_free_hash(tfm);
goto out;
}
ret = crypto_hash_final(&desc, digest);
if (ret < 0) {
pr_err("crypto_hash_final() failed for ma digest\n");
crypto_free_hash(tfm);
goto out;
}
crypto_free_hash(tfm);
/*
* Generate CHAP_N and CHAP_R.
*/
*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
/*
* Convert response from binary hex to ascii hext.
*/
chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_R=0x%s\n", response);
auth_ret = 0;
out:
kfree(challenge);
kfree(challenge_binhex);
return auth_ret;
}
static int chap_got_response(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
if (chap_server_compute_md5(conn, auth, nr_in_ptr,
nr_out_ptr, nr_out_len) < 0)
return -1;
return 0;
default:
pr_err("Unknown CHAP digest type %d!\n",
chap->digest_type);
return -1;
}
}
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *in_text,
char *out_text,
int *in_len,
int *out_len)
{
struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
if (!chap)
return 2;
chap->chap_state = CHAP_STAGE_SERVER_AIC;
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
if (chap_got_response(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
}
if (auth->authenticate_target)
chap->chap_state = CHAP_STAGE_SERVER_NR;
else
*out_len = 0;
chap_close(conn);
return 1;
}
return 2;
}

View file

@ -0,0 +1,32 @@
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6
#define CHAP_CHALLENGE_LENGTH 16
#define CHAP_CHALLENGE_STR_LEN 4096
#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
#define CHAP_STAGE_CLIENT_NR 3
#define CHAP_STAGE_CLIENT_NRIC 4
#define CHAP_STAGE_SERVER_NR 5
extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
int *, int *);
struct iscsi_chap {
unsigned char digest_type;
unsigned char id;
unsigned char challenge[CHAP_CHALLENGE_LENGTH];
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
#endif /*** _ISCSI_CHAP_H_ ***/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,7 @@
#ifndef ISCSI_TARGET_CONFIGFS_H
#define ISCSI_TARGET_CONFIGFS_H
extern int iscsi_target_register_configfs(void);
extern void iscsi_target_deregister_configfs(void);
#endif /* ISCSI_TARGET_CONFIGFS_H */

View file

@ -0,0 +1,884 @@
#ifndef ISCSI_TARGET_CORE_H
#define ISCSI_TARGET_CORE_H
#include <linux/in.h>
#include <linux/configfs.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#define ISCSIT_VERSION "v4.1.0"
#define ISCSI_MAX_DATASN_MISSING_COUNT 16
#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
#define SECONDS_FOR_ASYNC_LOGOUT 10
#define SECONDS_FOR_ASYNC_TEXT 10
#define SECONDS_FOR_LOGOUT_COMP 15
#define WHITE_SPACE " \t\v\f\n\r"
#define ISCSIT_MIN_TAGS 16
#define ISCSIT_EXTRA_TAGS 8
#define ISCSIT_TCP_BACKLOG 256
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
#define NA_DATAOUT_TIMEOUT_MAX 60
#define NA_DATAOUT_TIMEOUT_MIX 2
#define NA_DATAOUT_TIMEOUT_RETRIES 5
#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
#define NA_NOPIN_TIMEOUT 15
#define NA_NOPIN_TIMEOUT_MAX 60
#define NA_NOPIN_TIMEOUT_MIN 3
#define NA_NOPIN_RESPONSE_TIMEOUT 30
#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
#define NA_RANDOM_R2T_OFFSETS 0
/* struct iscsi_tpg_attrib sanity values */
#define TA_AUTHENTICATION 1
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
#define TA_NETIF_TIMEOUT 2
#define TA_NETIF_TIMEOUT_MAX 15
#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
#define TA_CACHE_DYNAMIC_ACLS 0
/* Enabled by default in demo mode (generic_node_acls=1) */
#define TA_DEMO_MODE_WRITE_PROTECT 1
/* Disabled by default in production mode w/ explict ACLs */
#define TA_PROD_MODE_WRITE_PROTECT 0
#define TA_DEMO_MODE_DISCOVERY 1
#define TA_DEFAULT_ERL 0
#define TA_CACHE_CORE_NPS 0
/* T10 protection information disabled by default */
#define TA_DEFAULT_T10_PI 0
#define ISCSI_IOV_DATA_BUFFER 5
enum iscsit_transport_type {
ISCSI_TCP = 0,
ISCSI_SCTP_TCP = 1,
ISCSI_SCTP_UDP = 2,
ISCSI_IWARP_TCP = 3,
ISCSI_IWARP_SCTP = 4,
ISCSI_INFINIBAND = 5,
};
/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
enum target_conn_state_table {
TARG_CONN_STATE_FREE = 0x1,
TARG_CONN_STATE_XPT_UP = 0x3,
TARG_CONN_STATE_IN_LOGIN = 0x4,
TARG_CONN_STATE_LOGGED_IN = 0x5,
TARG_CONN_STATE_IN_LOGOUT = 0x6,
TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
};
/* RFC-3720 7.3.2 Session State Diagram for a Target */
enum target_sess_state_table {
TARG_SESS_STATE_FREE = 0x1,
TARG_SESS_STATE_ACTIVE = 0x2,
TARG_SESS_STATE_LOGGED_IN = 0x3,
TARG_SESS_STATE_FAILED = 0x4,
TARG_SESS_STATE_IN_CONTINUE = 0x5,
};
/* struct iscsi_data_count->type */
enum data_count_type {
ISCSI_RX_DATA = 1,
ISCSI_TX_DATA = 2,
};
/* struct iscsi_datain_req->dr_complete */
enum datain_req_comp_table {
DATAIN_COMPLETE_NORMAL = 1,
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
};
/* struct iscsi_datain_req->recovery */
enum datain_req_rec_table {
DATAIN_WITHIN_COMMAND_RECOVERY = 1,
DATAIN_CONNECTION_RECOVERY = 2,
};
/* struct iscsi_portal_group->state */
enum tpg_state_table {
TPG_STATE_FREE = 0,
TPG_STATE_ACTIVE = 1,
TPG_STATE_INACTIVE = 2,
TPG_STATE_COLD_RESET = 3,
};
/* struct iscsi_tiqn->tiqn_state */
enum tiqn_state_table {
TIQN_STATE_ACTIVE = 1,
TIQN_STATE_SHUTDOWN = 2,
};
/* struct iscsi_cmd->cmd_flags */
enum cmd_flags_table {
ICF_GOT_LAST_DATAOUT = 0x00000001,
ICF_GOT_DATACK_SNACK = 0x00000002,
ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
ICF_SENT_LAST_R2T = 0x00000008,
ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
IFC_SENDTARGETS_ALL = 0x00000100,
IFC_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
enum cmd_i_state_table {
ISTATE_NO_STATE = 0,
ISTATE_NEW_CMD = 1,
ISTATE_DEFERRED_CMD = 2,
ISTATE_UNSOLICITED_DATA = 3,
ISTATE_RECEIVE_DATAOUT = 4,
ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
ISTATE_RECEIVED_LAST_DATAOUT = 6,
ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
ISTATE_IN_CONNECTION_RECOVERY = 8,
ISTATE_RECEIVED_TASKMGT = 9,
ISTATE_SEND_ASYNCMSG = 10,
ISTATE_SENT_ASYNCMSG = 11,
ISTATE_SEND_DATAIN = 12,
ISTATE_SEND_LAST_DATAIN = 13,
ISTATE_SENT_LAST_DATAIN = 14,
ISTATE_SEND_LOGOUTRSP = 15,
ISTATE_SENT_LOGOUTRSP = 16,
ISTATE_SEND_NOPIN = 17,
ISTATE_SENT_NOPIN = 18,
ISTATE_SEND_REJECT = 19,
ISTATE_SENT_REJECT = 20,
ISTATE_SEND_R2T = 21,
ISTATE_SENT_R2T = 22,
ISTATE_SEND_R2T_RECOVERY = 23,
ISTATE_SENT_R2T_RECOVERY = 24,
ISTATE_SEND_LAST_R2T = 25,
ISTATE_SENT_LAST_R2T = 26,
ISTATE_SEND_LAST_R2T_RECOVERY = 27,
ISTATE_SENT_LAST_R2T_RECOVERY = 28,
ISTATE_SEND_STATUS = 29,
ISTATE_SEND_STATUS_BROKEN_PC = 30,
ISTATE_SENT_STATUS = 31,
ISTATE_SEND_STATUS_RECOVERY = 32,
ISTATE_SENT_STATUS_RECOVERY = 33,
ISTATE_SEND_TASKMGTRSP = 34,
ISTATE_SENT_TASKMGTRSP = 35,
ISTATE_SEND_TEXTRSP = 36,
ISTATE_SENT_TEXTRSP = 37,
ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
ISTATE_REMOVE = 41,
ISTATE_FREE = 42,
};
/* Used for iscsi_recover_cmdsn() return values */
enum recover_cmdsn_ret_table {
CMDSN_ERROR_CANNOT_RECOVER = -1,
CMDSN_NORMAL_OPERATION = 0,
CMDSN_LOWER_THAN_EXP = 1,
CMDSN_HIGHER_THAN_EXP = 2,
CMDSN_MAXCMDSN_OVERRUN = 3,
};
/* Used for iscsi_handle_immediate_data() return values */
enum immedate_data_ret_table {
IMMEDIATE_DATA_CANNOT_RECOVER = -1,
IMMEDIATE_DATA_NORMAL_OPERATION = 0,
IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
};
/* Used for iscsi_decide_dataout_action() return values */
enum dataout_action_ret_table {
DATAOUT_CANNOT_RECOVER = -1,
DATAOUT_NORMAL = 0,
DATAOUT_SEND_R2T = 1,
DATAOUT_SEND_TO_TRANSPORT = 2,
DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
};
/* Used for struct iscsi_node_auth->naf_flags */
enum naf_flags_table {
NAF_USERID_SET = 0x01,
NAF_PASSWORD_SET = 0x02,
NAF_USERID_IN_SET = 0x04,
NAF_PASSWORD_IN_SET = 0x08,
};
/* Used by various struct timer_list to manage iSCSI specific state */
enum iscsi_timer_flags_table {
ISCSI_TF_RUNNING = 0x01,
ISCSI_TF_STOP = 0x02,
ISCSI_TF_EXPIRED = 0x04,
};
/* Used for struct iscsi_np->np_flags */
enum np_flags_table {
NPF_IP_NETWORK = 0x00,
};
/* Used for struct iscsi_np->np_thread_state */
enum np_thread_state_table {
ISCSI_NP_THREAD_ACTIVE = 1,
ISCSI_NP_THREAD_INACTIVE = 2,
ISCSI_NP_THREAD_RESET = 3,
ISCSI_NP_THREAD_SHUTDOWN = 4,
ISCSI_NP_THREAD_EXIT = 5,
};
struct iscsi_conn_ops {
u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
u8 DataDigest; /* [0,1] == [None,CRC32C] */
u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
u8 OFMarker; /* [0,1] == [No,Yes] */
u8 IFMarker; /* [0,1] == [No,Yes] */
u32 OFMarkInt; /* [1..65535] */
u32 IFMarkInt; /* [1..65535] */
/*
* iSER specific connection parameters
*/
u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
};
struct iscsi_sess_ops {
char InitiatorName[224];
char InitiatorAlias[256];
char TargetName[224];
char TargetAlias[256];
char TargetAddress[256];
u16 TargetPortalGroupTag; /* [0..65535] */
u16 MaxConnections; /* [1..65535] */
u8 InitialR2T; /* [0,1] == [No,Yes] */
u8 ImmediateData; /* [0,1] == [No,Yes] */
u32 MaxBurstLength; /* [512..2**24-1] */
u32 FirstBurstLength; /* [512..2**24-1] */
u16 DefaultTime2Wait; /* [0..3600] */
u16 DefaultTime2Retain; /* [0..3600] */
u16 MaxOutstandingR2T; /* [1..65535] */
u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
u8 ErrorRecoveryLevel; /* [0..2] */
u8 SessionType; /* [0,1] == [Normal,Discovery]*/
/*
* iSER specific session parameters
*/
u8 RDMAExtensions; /* [0,1] == [No,Yes] */
};
struct iscsi_queue_req {
int state;
struct iscsi_cmd *cmd;
struct list_head qr_list;
};
struct iscsi_data_count {
int data_length;
int sync_and_steering;
enum data_count_type type;
u32 iov_count;
u32 ss_iov_count;
u32 ss_marker_count;
struct kvec *iov;
};
struct iscsi_param_list {
bool iser;
struct list_head param_list;
struct list_head extra_response_list;
};
struct iscsi_datain_req {
enum datain_req_comp_table dr_complete;
int generate_recovery_values;
enum datain_req_rec_table recovery;
u32 begrun;
u32 runlength;
u32 data_length;
u32 data_offset;
u32 data_sn;
u32 next_burst_len;
u32 read_data_done;
u32 seq_send_order;
struct list_head cmd_datain_node;
} ____cacheline_aligned;
struct iscsi_ooo_cmdsn {
u16 cid;
u32 batch_count;
u32 cmdsn;
u32 exp_cmdsn;
struct iscsi_cmd *cmd;
struct list_head ooo_list;
} ____cacheline_aligned;
struct iscsi_datain {
u8 flags;
u32 data_sn;
u32 length;
u32 offset;
} ____cacheline_aligned;
struct iscsi_r2t {
int seq_complete;
int recovery_r2t;
int sent_r2t;
u32 r2t_sn;
u32 offset;
u32 targ_xfer_tag;
u32 xfer_len;
struct list_head r2t_list;
} ____cacheline_aligned;
struct iscsi_cmd {
enum iscsi_timer_flags_table dataout_timer_flags;
/* DataOUT timeout retries */
u8 dataout_timeout_retries;
/* Within command recovery count */
u8 error_recovery_count;
/* iSCSI dependent state for out or order CmdSNs */
enum cmd_i_state_table deferred_i_state;
/* iSCSI dependent state */
enum cmd_i_state_table i_state;
/* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
u8 immediate_cmd;
/* Immediate data present */
u8 immediate_data;
/* iSCSI Opcode */
u8 iscsi_opcode;
/* iSCSI Response Code */
u8 iscsi_response;
/* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
u8 logout_reason;
/* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
u8 logout_response;
/* MaxCmdSN has been incremented */
u8 maxcmdsn_inc;
/* Immediate Unsolicited Dataout */
u8 unsolicited_data;
/* Reject reason code */
u8 reject_reason;
/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
u16 logout_cid;
/* Command flags */
enum cmd_flags_table cmd_flags;
/* Initiator Task Tag assigned from Initiator */
itt_t init_task_tag;
/* Target Transfer Tag assigned from Target */
u32 targ_xfer_tag;
/* CmdSN assigned from Initiator */
u32 cmd_sn;
/* ExpStatSN assigned from Initiator */
u32 exp_stat_sn;
/* StatSN assigned to this ITT */
u32 stat_sn;
/* DataSN Counter */
u32 data_sn;
/* R2TSN Counter */
u32 r2t_sn;
/* Last DataSN acknowledged via DataAck SNACK */
u32 acked_data_sn;
/* Used for echoing NOPOUT ping data */
u32 buf_ptr_size;
/* Used to store DataDigest */
u32 data_crc;
/* Counter for MaxOutstandingR2T */
u32 outstanding_r2ts;
/* Next R2T Offset when DataSequenceInOrder=Yes */
u32 r2t_offset;
/* Iovec current and orig count for iscsi_cmd->iov_data */
u32 iov_data_count;
u32 orig_iov_data_count;
/* Number of miscellaneous iovecs used for IP stack calls */
u32 iov_misc_count;
/* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_count;
/* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
u32 seq_count;
/* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
u32 seq_no;
/* Lowest offset in current DataOUT sequence */
u32 seq_start_offset;
/* Highest offset in current DataOUT sequence */
u32 seq_end_offset;
/* Total size in bytes received so far of READ data */
u32 read_data_done;
/* Total size in bytes received so far of WRITE data */
u32 write_data_done;
/* Counter for FirstBurstLength key */
u32 first_burst_len;
/* Counter for MaxBurstLength key */
u32 next_burst_len;
/* Transfer size used for IP stack calls */
u32 tx_size;
/* Buffer used for various purposes */
void *buf_ptr;
/* Used by SendTargets=[iqn.,eui.] discovery */
void *text_in_ptr;
/* See include/linux/dma-mapping.h */
enum dma_data_direction data_direction;
/* iSCSI PDU Header + CRC */
unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
/* Number of times struct iscsi_cmd is present in immediate queue */
atomic_t immed_queue_count;
atomic_t response_queue_count;
spinlock_t datain_lock;
spinlock_t dataout_timeout_lock;
/* spinlock for protecting struct iscsi_cmd->i_state */
spinlock_t istate_lock;
/* spinlock for adding within command recovery entries */
spinlock_t error_lock;
/* spinlock for adding R2Ts */
spinlock_t r2t_lock;
/* DataIN List */
struct list_head datain_list;
/* R2T List */
struct list_head cmd_r2t_list;
/* Timer for DataOUT */
struct timer_list dataout_timer;
/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
struct kvec *iov_data;
/* Iovecs for miscellaneous purposes */
#define ISCSI_MISC_IOVECS 5
struct kvec iov_misc[ISCSI_MISC_IOVECS];
/* Array of struct iscsi_pdu used for DataPDUInOrder=No */
struct iscsi_pdu *pdu_list;
/* Current struct iscsi_pdu used for DataPDUInOrder=No */
struct iscsi_pdu *pdu_ptr;
/* Array of struct iscsi_seq used for DataSequenceInOrder=No */
struct iscsi_seq *seq_list;
/* Current struct iscsi_seq used for DataSequenceInOrder=No */
struct iscsi_seq *seq_ptr;
/* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
struct iscsi_tmr_req *tmr_req;
/* Connection this command is alligient to */
struct iscsi_conn *conn;
/* Pointer to connection recovery entry */
struct iscsi_conn_recovery *cr;
/* Session the command is part of, used for connection recovery */
struct iscsi_session *sess;
/* list_head for connection list */
struct list_head i_conn_node;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd se_cmd;
/* Sense buffer that will be mapped into outgoing status */
#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
u32 padding;
u8 pad_bytes[4];
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {
bool task_reassign:1;
u32 exp_data_sn;
struct iscsi_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
struct iscsi_conn {
wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
/* State connection is currently in */
u8 conn_state;
u8 conn_logout_reason;
u8 network_transport;
enum iscsi_timer_flags_table nopin_timer_flags;
enum iscsi_timer_flags_table nopin_response_timer_flags;
/* Used to know what thread encountered a transport failure */
u8 which_thread;
/* connection id assigned by the Initiator */
u16 cid;
/* Remote TCP Port */
u16 login_port;
u16 local_port;
int net_size;
int login_family;
u32 auth_id;
u32 conn_flags;
/* Used for iscsi_tx_login_rsp() */
itt_t login_itt;
u32 exp_statsn;
/* Per connection status sequence number */
u32 stat_sn;
/* IFMarkInt's Current Value */
u32 if_marker;
/* OFMarkInt's Current Value */
u32 of_marker;
/* Used for calculating OFMarker offset to next PDU */
u32 of_marker_offset;
#define IPV6_ADDRESS_SPACE 48
unsigned char login_ip[IPV6_ADDRESS_SPACE];
unsigned char local_ip[IPV6_ADDRESS_SPACE];
int conn_usage_count;
int conn_waiting_on_uc;
atomic_t check_immediate_queue;
atomic_t conn_logout_remove;
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
struct completion conn_post_wait_comp;
struct completion conn_wait_comp;
struct completion conn_wait_rcfr_comp;
struct completion conn_waiting_on_uc_comp;
struct completion conn_logout_comp;
struct completion tx_half_close_comp;
struct completion rx_half_close_comp;
/* socket used by this connection */
struct socket *sock;
void (*orig_data_ready)(struct sock *);
void (*orig_state_change)(struct sock *);
#define LOGIN_FLAGS_READ_ACTIVE 1
#define LOGIN_FLAGS_CLOSED 2
#define LOGIN_FLAGS_READY 4
unsigned long login_flags;
struct delayed_work login_work;
struct delayed_work login_cleanup_work;
struct iscsi_login *login;
struct timer_list nopin_timer;
struct timer_list nopin_response_timer;
struct timer_list transport_timer;
struct task_struct *login_kworker;
/* Spinlock used for add/deleting cmd's from conn_cmd_list */
spinlock_t cmd_lock;
spinlock_t conn_usage_lock;
spinlock_t immed_queue_lock;
spinlock_t nopin_timer_lock;
spinlock_t response_queue_lock;
spinlock_t state_lock;
/* libcrypto RX and TX contexts for crc32c */
struct hash_desc conn_rx_hash;
struct hash_desc conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
unsigned int conn_rx_reset_cpumask:1;
unsigned int conn_tx_reset_cpumask:1;
/* list_head of struct iscsi_cmd for this connection */
struct list_head conn_cmd_list;
struct list_head immed_queue_list;
struct list_head response_queue_list;
struct iscsi_conn_ops *conn_ops;
struct iscsi_login *conn_login;
struct iscsit_transport *conn_transport;
struct iscsi_param_list *param_list;
/* Used for per connection auth state machine */
void *auth_protocol;
void *context;
struct iscsi_login_thread_s *login_thread;
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
/* Pointer to parent session */
struct iscsi_session *sess;
/* Pointer to thread_set in use for this conn's threads */
struct iscsi_thread_set *thread_set;
/* list_head for session connection list */
struct list_head conn_list;
} ____cacheline_aligned;
struct iscsi_conn_recovery {
u16 cid;
u32 cmd_count;
u32 maxrecvdatasegmentlength;
u32 maxxmitdatasegmentlength;
int ready_for_reallegiance;
struct list_head conn_recovery_cmd_list;
spinlock_t conn_recovery_cmd_lock;
struct timer_list time2retain_timer;
struct iscsi_session *sess;
struct list_head cr_list;
} ____cacheline_aligned;
struct iscsi_session {
u8 initiator_vendor;
u8 isid[6];
enum iscsi_timer_flags_table time2retain_timer_flags;
u8 version_active;
u16 cid_called;
u16 conn_recovery_count;
u16 tsih;
/* state session is currently in */
u32 session_state;
/* session wide counter: initiator assigned task tag */
itt_t init_task_tag;
/* session wide counter: target assigned task tag */
u32 targ_xfer_tag;
u32 cmdsn_window;
/* protects cmdsn values */
struct mutex cmdsn_mutex;
/* session wide counter: expected command sequence number */
u32 exp_cmd_sn;
/* session wide counter: maximum allowed command sequence number */
u32 max_cmd_sn;
struct list_head sess_ooo_cmdsn_list;
/* LIO specific session ID */
u32 sid;
char auth_type[8];
/* unique within the target */
int session_index;
/* Used for session reference counting */
int session_usage_count;
int session_waiting_on_uc;
atomic_long_t cmd_pdus;
atomic_long_t rsp_pdus;
atomic_long_t tx_data_octets;
atomic_long_t rx_data_octets;
atomic_long_t conn_digest_errors;
atomic_long_t conn_timeout_errors;
u64 creation_time;
/* Number of active connections */
atomic_t nconn;
atomic_t session_continuation;
atomic_t session_fall_back_to_erl0;
atomic_t session_logout;
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
struct list_head cr_inactive_list;
spinlock_t conn_lock;
spinlock_t cr_a_lock;
spinlock_t cr_i_lock;
spinlock_t session_usage_lock;
spinlock_t ttt_lock;
struct completion async_msg_comp;
struct completion reinstatement_comp;
struct completion session_wait_comp;
struct completion session_waiting_on_uc_comp;
struct timer_list time2retain_timer;
struct iscsi_sess_ops *sess_ops;
struct se_session *se_sess;
struct iscsi_portal_group *tpg;
} ____cacheline_aligned;
struct iscsi_login {
u8 auth_complete;
u8 checked_for_existing;
u8 current_stage;
u8 leading_connection;
u8 first_request;
u8 version_min;
u8 version_max;
u8 login_complete;
u8 login_failed;
bool zero_tsih;
char isid[6];
u32 cmd_sn;
itt_t init_task_tag;
u32 initial_exp_statsn;
u32 rsp_length;
u16 cid;
u16 tsih;
char req[ISCSI_HDR_LEN];
char rsp[ISCSI_HDR_LEN];
char *req_buf;
char *rsp_buf;
struct iscsi_conn *conn;
struct iscsi_np *np;
} ____cacheline_aligned;
struct iscsi_node_attrib {
u32 dataout_timeout;
u32 dataout_timeout_retries;
u32 default_erl;
u32 nopin_timeout;
u32 nopin_response_timeout;
u32 random_datain_pdu_offsets;
u32 random_datain_seq_offsets;
u32 random_r2t_offsets;
u32 tmr_cold_reset;
u32 tmr_warm_reset;
struct iscsi_node_acl *nacl;
};
struct se_dev_entry_s;
struct iscsi_node_auth {
enum naf_flags_table naf_flags;
int authenticate_target;
/* Used for iscsit_global->discovery_auth,
* set to zero (auth disabled) by default */
int enforce_discovery_auth;
#define MAX_USER_LEN 256
#define MAX_PASS_LEN 256
char userid[MAX_USER_LEN];
char password[MAX_PASS_LEN];
char userid_mutual[MAX_USER_LEN];
char password_mutual[MAX_PASS_LEN];
};
#include "iscsi_target_stat.h"
struct iscsi_node_stat_grps {
struct config_group iscsi_sess_stats_group;
struct config_group iscsi_conn_stats_group;
};
struct iscsi_node_acl {
struct iscsi_node_attrib node_attrib;
struct iscsi_node_auth node_auth;
struct iscsi_node_stat_grps node_stat_grps;
struct se_node_acl se_node_acl;
};
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
u32 demo_mode_write_protect;
u32 prod_mode_write_protect;
u32 demo_mode_discovery;
u32 default_erl;
u8 t10_pi;
struct iscsi_portal_group *tpg;
};
struct iscsi_np {
int np_network_transport;
int np_ip_proto;
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
unsigned char np_ip[IPV6_ADDRESS_SPACE];
u16 np_port;
spinlock_t np_thread_lock;
struct completion np_restart_comp;
struct socket *np_socket;
struct __kernel_sockaddr_storage np_sockaddr;
struct task_struct *np_thread;
struct timer_list np_login_timer;
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned;
struct iscsi_tpg_np {
struct iscsi_np *tpg_np;
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np_parent;
struct list_head tpg_np_list;
struct list_head tpg_np_child_list;
struct list_head tpg_np_parent_list;
struct se_tpg_np se_tpg_np;
spinlock_t tpg_np_parent_lock;
struct completion tpg_np_comp;
struct kref tpg_np_kref;
};
struct iscsi_portal_group {
unsigned char tpg_chap_id;
/* TPG State */
enum tpg_state_table tpg_state;
/* Target Portal Group Tag */
u16 tpgt;
/* Id assigned to target sessions */
u16 ntsih;
/* Number of active sessions */
u32 nsessions;
/* Number of Network Portals available for this TPG */
u32 num_tpg_nps;
/* Per TPG LIO specific session ID. */
u32 sid;
/* Spinlock for adding/removing Network Portals */
spinlock_t tpg_np_lock;
spinlock_t tpg_state_lock;
struct se_portal_group tpg_se_tpg;
struct mutex tpg_access_lock;
struct semaphore np_login_sem;
struct iscsi_tpg_attrib tpg_attrib;
struct iscsi_node_auth tpg_demo_auth;
/* Pointer to default list of iSCSI parameters for TPG */
struct iscsi_param_list *param_list;
struct iscsi_tiqn *tpg_tiqn;
struct list_head tpg_gnp_list;
struct list_head tpg_list;
} ____cacheline_aligned;
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
struct config_group iscsi_sess_err_group;
struct config_group iscsi_tgt_attr_group;
struct config_group iscsi_login_stats_group;
struct config_group iscsi_logout_stats_group;
};
struct iscsi_tiqn {
#define ISCSI_IQN_LEN 224
unsigned char tiqn[ISCSI_IQN_LEN];
enum tiqn_state_table tiqn_state;
int tiqn_access_count;
u32 tiqn_active_tpgs;
u32 tiqn_ntpgs;
u32 tiqn_num_tpg_nps;
u32 tiqn_nsessions;
struct list_head tiqn_list;
struct list_head tiqn_tpg_list;
spinlock_t tiqn_state_lock;
spinlock_t tiqn_tpg_lock;
struct se_wwn tiqn_wwn;
struct iscsi_wwn_stat_grps tiqn_stat_grps;
int tiqn_index;
struct iscsi_sess_err_stats sess_err_stats;
struct iscsi_login_stats login_stats;
struct iscsi_logout_stats logout_stats;
} ____cacheline_aligned;
struct iscsit_global {
/* In core shutdown */
u32 in_shutdown;
u32 active_ts;
/* Unique identifier used for the authentication daemon */
u32 auth_id;
u32 inactive_ts;
/* Thread Set bitmap count */
int ts_bitmap_count;
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
};
#endif /* ISCSI_TARGET_CORE_H */

View file

@ -0,0 +1,526 @@
/*******************************************************************************
* This file contains the iSCSI Target DataIN value generation functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <scsi/iscsi_proto.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_datain_values.h"
struct iscsi_datain_req *iscsit_allocate_datain_req(void)
{
struct iscsi_datain_req *dr;
dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
if (!dr) {
pr_err("Unable to allocate memory for"
" struct iscsi_datain_req\n");
return NULL;
}
INIT_LIST_HEAD(&dr->cmd_datain_node);
return dr;
}
void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
spin_unlock(&cmd->datain_lock);
}
void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_del(&dr->cmd_datain_node);
spin_unlock(&cmd->datain_lock);
kmem_cache_free(lio_dr_cache, dr);
}
void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
{
struct iscsi_datain_req *dr, *dr_tmp;
spin_lock(&cmd->datain_lock);
list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
list_del(&dr->cmd_datain_node);
kmem_cache_free(lio_dr_cache, dr);
}
spin_unlock(&cmd->datain_lock);
}
struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
{
if (list_empty(&cmd->datain_list)) {
pr_err("cmd->datain_list is empty for ITT:"
" 0x%08x\n", cmd->init_task_tag);
return NULL;
}
return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
cmd_datain_node);
}
/*
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
struct iscsi_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
next_burst_len = (!dr->recovery) ?
cmd->next_burst_len : dr->next_burst_len;
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
(read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
next_burst_len))) {
datain->length = read_data_left;
datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
} else {
if ((next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
datain->length =
conn->conn_ops->MaxRecvDataSegmentLength;
next_burst_len += datain->length;
} else {
datain->length = (conn->sess->sess_ops->MaxBurstLength -
next_burst_len);
next_burst_len = 0;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
}
}
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
datain->offset = read_data_done;
if (!dr->recovery) {
cmd->next_burst_len = next_burst_len;
cmd->read_data_done += datain->length;
} else {
dr->next_burst_len = next_burst_len;
dr->read_data_done += datain->length;
}
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
struct iscsi_cmd *cmd,
struct iscsi_datain *datain)
{
u32 offset, read_data_done, read_data_left, seq_send_order;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_seq *seq;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
if (!seq)
return NULL;
seq->sent = 1;
if (!dr->recovery && !seq->next_burst_len)
seq->first_datasn = cmd->data_sn;
offset = (seq->offset + seq->next_burst_len);
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->se_cmd.data_length) {
datain->length = (cmd->se_cmd.data_length - offset);
datain->offset = offset;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
} else {
if ((seq->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
datain->length =
conn->conn_ops->MaxRecvDataSegmentLength;
datain->offset = (seq->offset + seq->next_burst_len);
seq->next_burst_len += datain->length;
} else {
datain->length = (conn->sess->sess_ops->MaxBurstLength -
seq->next_burst_len);
datain->offset = (seq->offset + seq->next_burst_len);
datain->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
datain->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
}
}
if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
datain->flags |= ISCSI_FLAG_DATA_STATUS;
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->seq_send_order = seq_send_order;
cmd->read_data_done += datain->length;
} else {
dr->seq_send_order = seq_send_order;
dr->read_data_done += datain->length;
}
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_CMD_FINAL)
seq->last_datasn = datain->data_sn;
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
struct iscsi_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
next_burst_len = (!dr->recovery) ?
cmd->next_burst_len : dr->next_burst_len;
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return dr;
}
pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
if (!pdu)
return dr;
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
next_burst_len = 0;
} else {
if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength)
next_burst_len += pdu->length;
else {
pdu->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
next_burst_len = 0;
}
}
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->next_burst_len = next_burst_len;
cmd->read_data_done += pdu->length;
} else {
dr->next_burst_len = next_burst_len;
dr->read_data_done += pdu->length;
}
datain->flags = pdu->flags;
datain->length = pdu->length;
datain->offset = pdu->offset;
datain->data_sn = pdu->data_sn;
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
/*
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
struct iscsi_cmd *cmd,
struct iscsi_datain *datain)
{
u32 read_data_done, read_data_left, seq_send_order;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
struct iscsi_seq *seq = NULL;
dr = iscsit_get_datain_req(cmd);
if (!dr)
return NULL;
if (dr->recovery && dr->generate_recovery_values) {
if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
cmd, dr) < 0)
return NULL;
dr->generate_recovery_values = 0;
}
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
return NULL;
}
seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
if (!seq)
return NULL;
seq->sent = 1;
if (!dr->recovery && !seq->next_burst_len)
seq->first_datasn = cmd->data_sn;
pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
if (!pdu)
return NULL;
if (seq->pdu_send_order == seq->pdu_count) {
pdu->flags |= ISCSI_FLAG_CMD_FINAL;
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
seq->next_burst_len = 0;
seq_send_order++;
} else
seq->next_burst_len += pdu->length;
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
pdu->flags |= ISCSI_FLAG_DATA_STATUS;
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
if (!dr->recovery) {
cmd->seq_send_order = seq_send_order;
cmd->read_data_done += pdu->length;
} else {
dr->seq_send_order = seq_send_order;
dr->read_data_done += pdu->length;
}
datain->flags = pdu->flags;
datain->length = pdu->length;
datain->offset = pdu->offset;
datain->data_sn = pdu->data_sn;
if (!dr->recovery) {
if (datain->flags & ISCSI_FLAG_CMD_FINAL)
seq->last_datasn = datain->data_sn;
if (datain->flags & ISCSI_FLAG_DATA_STATUS)
dr->dr_complete = DATAIN_COMPLETE_NORMAL;
return dr;
}
if (!dr->runlength) {
if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
} else {
if ((dr->begrun + dr->runlength) == dr->data_sn) {
dr->dr_complete =
(dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
DATAIN_COMPLETE_CONNECTION_RECOVERY;
}
}
return dr;
}
struct iscsi_datain_req *iscsit_get_datain_values(
struct iscsi_cmd *cmd,
struct iscsi_datain *datain)
{
struct iscsi_conn *conn = cmd->conn;
if (conn->sess->sess_ops->DataSequenceInOrder &&
conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_yes_and_yes(cmd, datain);
else if (!conn->sess->sess_ops->DataSequenceInOrder &&
conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_no_and_yes(cmd, datain);
else if (conn->sess->sess_ops->DataSequenceInOrder &&
!conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_yes_and_no(cmd, datain);
else if (!conn->sess->sess_ops->DataSequenceInOrder &&
!conn->sess->sess_ops->DataPDUInOrder)
return iscsit_set_datain_values_no_and_no(cmd, datain);
return NULL;
}

View file

@ -0,0 +1,12 @@
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
struct iscsi_datain *);
#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/

View file

@ -0,0 +1,66 @@
/*******************************************************************************
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
{
struct se_node_acl *se_nacl;
/*
* This is a discovery session, the single queue slot was already
* assigned in iscsi_login_zero_tsih(). Since only Logout and
* Text Opcodes are allowed during discovery we do not have to worry
* about the HBA's queue depth here.
*/
if (sess->sess_ops->SessionType)
return;
se_nacl = sess->se_sess->se_node_acl;
/*
* This is a normal session, set the Session's CmdSN window to the
* struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
* has already been validated as a legal value in
* core_set_queue_depth_for_node().
*/
sess->cmdsn_window = se_nacl->queue_depth;
sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
}
void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
{
if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
return;
cmd->maxcmdsn_inc = 1;
mutex_lock(&sess->cmdsn_mutex);
sess->max_cmd_sn += 1;
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
}
EXPORT_SYMBOL(iscsit_increment_maxcmdsn);

View file

@ -0,0 +1,7 @@
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
#endif /* ISCSI_TARGET_DEVICE_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,15 @@
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
extern void iscsit_start_time2retain_handler(struct iscsi_session *);
extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
#endif /*** ISCSI_TARGET_ERL0_H ***/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,26 @@
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
struct iscsi_cmd *, struct iscsi_datain_req *);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
struct iscsi_cmd *, struct iscsi_datain_req *);
extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
itt_t, u32, u32, u32);
extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
u32, u32);
extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
#endif /* ISCSI_TARGET_ERL1_H */

View file

@ -0,0 +1,436 @@
/*******************************************************************************
* This file contains error recovery level two functions used by
* the iSCSI Target driver.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_util.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
/*
* FIXME: Does RData SNACK apply here as well?
*/
void iscsit_create_conn_recovery_datain_values(
struct iscsi_cmd *cmd,
__be32 exp_data_sn)
{
u32 data_sn = 0;
struct iscsi_conn *conn = cmd->conn;
cmd->next_burst_len = 0;
cmd->read_data_done = 0;
while (be32_to_cpu(exp_data_sn) > data_sn) {
if ((cmd->next_burst_len +
conn->conn_ops->MaxRecvDataSegmentLength) <
conn->sess->sess_ops->MaxBurstLength) {
cmd->read_data_done +=
conn->conn_ops->MaxRecvDataSegmentLength;
cmd->next_burst_len +=
conn->conn_ops->MaxRecvDataSegmentLength;
} else {
cmd->read_data_done +=
(conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len);
cmd->next_burst_len = 0;
}
data_sn++;
}
}
void iscsit_create_conn_recovery_dataout_values(
struct iscsi_cmd *cmd)
{
u32 write_data_done = 0;
struct iscsi_conn *conn = cmd->conn;
cmd->data_sn = 0;
cmd->next_burst_len = 0;
while (cmd->write_data_done > write_data_done) {
if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
cmd->write_data_done)
write_data_done += conn->sess->sess_ops->MaxBurstLength;
else
break;
}
cmd->write_data_done = write_data_done;
}
static int iscsit_attach_active_connection_recovery_entry(
struct iscsi_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_a_lock);
list_add_tail(&cr->cr_list, &sess->cr_active_list);
spin_unlock(&sess->cr_a_lock);
return 0;
}
static int iscsit_attach_inactive_connection_recovery_entry(
struct iscsi_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_i_lock);
list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
sess->conn_recovery_count++;
pr_debug("Incremented connection recovery count to %u for"
" SID: %u\n", sess->conn_recovery_count, sess->sid);
spin_unlock(&sess->cr_i_lock);
return 0;
}
struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *sess,
u16 cid)
{
struct iscsi_conn_recovery *cr;
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
if (cr->cid == cid) {
spin_unlock(&sess->cr_i_lock);
return cr;
}
}
spin_unlock(&sess->cr_i_lock);
return NULL;
}
void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
{
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
spin_lock(&sess->cr_a_lock);
list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
list_del(&cr->cr_list);
spin_unlock(&sess->cr_a_lock);
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock(&sess->cr_a_lock);
kfree(cr);
}
spin_unlock(&sess->cr_a_lock);
spin_lock(&sess->cr_i_lock);
list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
list_del(&cr->cr_list);
spin_unlock(&sess->cr_i_lock);
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock(&sess->cr_i_lock);
kfree(cr);
}
spin_unlock(&sess->cr_i_lock);
}
int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
struct iscsi_session *sess)
{
spin_lock(&sess->cr_a_lock);
list_del(&cr->cr_list);
sess->conn_recovery_count--;
pr_debug("Decremented connection recovery count to %u for"
" SID: %u\n", sess->conn_recovery_count, sess->sid);
spin_unlock(&sess->cr_a_lock);
kfree(cr);
return 0;
}
static void iscsit_remove_inactive_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
struct iscsi_session *sess)
{
spin_lock(&sess->cr_i_lock);
list_del(&cr->cr_list);
spin_unlock(&sess->cr_i_lock);
}
/*
* Called with cr->conn_recovery_cmd_lock help.
*/
int iscsit_remove_cmd_from_connection_recovery(
struct iscsi_cmd *cmd,
struct iscsi_session *sess)
{
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
BUG();
}
cr = cmd->cr;
list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
void iscsit_discard_cr_cmds_by_expstatsn(
struct iscsi_conn_recovery *cr,
u32 exp_statsn)
{
u32 dropped_count = 0;
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_session *sess = cr->sess;
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
(cmd->deferred_i_state != ISTATE_REMOVE)) ||
(cmd->stat_sn >= exp_statsn)) {
continue;
}
dropped_count++;
pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
" 0x%08x, CID: %hu.\n", cmd->init_task_tag,
cmd->stat_sn, cr->cid);
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
pr_debug("Dropped %u total acknowledged commands on"
" CID: %hu less than old ExpStatSN: 0x%08x\n",
dropped_count, cr->cid, exp_statsn);
if (!cr->cmd_count) {
pr_debug("No commands to be reassigned for failed"
" connection CID: %hu on SID: %u\n",
cr->cid, sess->sid);
iscsit_remove_inactive_connection_recovery_entry(cr, sess);
iscsit_attach_active_connection_recovery_entry(sess, cr);
pr_debug("iSCSI connection recovery successful for CID:"
" %hu on SID: %u\n", cr->cid, sess->sid);
iscsit_remove_active_connection_recovery_entry(cr, sess);
} else {
iscsit_remove_inactive_connection_recovery_entry(cr, sess);
iscsit_attach_active_connection_recovery_entry(sess, cr);
}
}
int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
{
u32 dropped_count = 0;
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
struct iscsi_session *sess = conn->sess;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cid != conn->cid)
continue;
dropped_count++;
pr_debug("Dropping unacknowledged CmdSN:"
" 0x%08x during connection recovery on CID: %hu\n",
ooo_cmdsn->cmdsn, conn->cid);
iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
}
mutex_unlock(&sess->cmdsn_mutex);
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
pr_debug("Dropped %u total unacknowledged commands on CID:"
" %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
sess->exp_cmd_sn);
return 0;
}
int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
{
u32 cmd_count = 0;
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr;
/*
* Allocate an struct iscsi_conn_recovery for this connection.
* Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
* (struct iscsi_cmd->cr) so we need to allocate this before preparing the
* connection's command list for connection recovery.
*/
cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
if (!cr) {
pr_err("Unable to allocate memory for"
" struct iscsi_conn_recovery.\n");
return -1;
}
INIT_LIST_HEAD(&cr->cr_list);
INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
spin_lock_init(&cr->conn_recovery_cmd_lock);
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
* list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
* sending the TMR response.
*/
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
pr_debug("Not performing realligence on"
" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
/*
* Special case where commands greater than or equal to
* the session's ExpCmdSN are attached to the connection
* list but not to the out of order CmdSN list. The one
* obvious case is when a command with immediate data
* attached must only check the CmdSN against ExpCmdSN
* after the data is received. The special case below
* is when the connection fails before data is received,
* but also may apply to other PDUs, so it has been
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
continue;
}
cmd_count++;
pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
" realligence.\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
conn->cid);
cmd->deferred_i_state = cmd->i_state;
cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
cmd->sess = conn->sess;
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
spin_lock(&cr->conn_recovery_cmd_lock);
list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock_bh(&conn->cmd_lock);
cmd->cr = cr;
cmd->conn = NULL;
}
spin_unlock_bh(&conn->cmd_lock);
/*
* Fill in the various values in the preallocated struct iscsi_conn_recovery.
*/
cr->cid = conn->cid;
cr->cmd_count = cmd_count;
cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength;
cr->sess = conn->sess;
iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
return 0;
}
int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
{
atomic_set(&conn->connection_recovery, 1);
if (iscsit_close_connection(conn) < 0)
return -1;
return 0;
}

View file

@ -0,0 +1,18 @@
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *, u16);
extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
extern int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *, struct iscsi_session *);
extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
struct iscsi_session *);
extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
#endif /*** ISCSI_TARGET_ERL2_H ***/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,21 @@
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
extern int iscsi_login_setup_crypto(struct iscsi_conn *);
extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
struct __kernel_sockaddr_storage *);
extern int iscsi_target_setup_login_socket(struct iscsi_np *,
struct __kernel_sockaddr_storage *);
extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
bool, bool);
extern int iscsi_target_login_thread(void *);
extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
#endif /*** ISCSI_TARGET_LOGIN_H ***/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,20 @@
#ifndef ISCSI_TARGET_NEGO_H
#define ISCSI_TARGET_NEGO_H
#define DECIMAL 0
#define HEX 1
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
extern int iscsi_target_check_login_request(struct iscsi_conn *,
struct iscsi_login *);
extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
struct iscsi_login *);
extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
struct iscsi_login *);
extern int iscsi_target_start_negotiation(
struct iscsi_login *, struct iscsi_conn *);
extern void iscsi_target_nego_release(struct iscsi_conn *);
#endif /* ISCSI_TARGET_NEGO_H */

View file

@ -0,0 +1,261 @@
/*******************************************************************************
* This file contains the main functions related to Initiator Node Attributes.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <target/target_core_base.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target_nodeattrib.h"
static inline char *iscsit_na_get_initiatorname(
struct iscsi_node_acl *nacl)
{
struct se_node_acl *se_nacl = &nacl->se_node_acl;
return &se_nacl->initiatorname[0];
}
void iscsit_set_default_node_attribues(
struct iscsi_node_acl *acl,
struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
a->default_erl = tpg->tpg_attrib.default_erl;
}
int iscsit_na_dataout_timeout(
struct iscsi_node_acl *acl,
u32 dataout_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
pr_err("Requested DataOut Timeout %u larger than"
" maximum %u\n", dataout_timeout,
NA_DATAOUT_TIMEOUT_MAX);
return -EINVAL;
} else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
pr_err("Requested DataOut Timeout %u smaller than"
" minimum %u\n", dataout_timeout,
NA_DATAOUT_TIMEOUT_MIX);
return -EINVAL;
}
a->dataout_timeout = dataout_timeout;
pr_debug("Set DataOut Timeout to %u for Initiator Node"
" %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_dataout_timeout_retries(
struct iscsi_node_acl *acl,
u32 dataout_timeout_retries)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
pr_err("Requested DataOut Timeout Retries %u larger"
" than maximum %u", dataout_timeout_retries,
NA_DATAOUT_TIMEOUT_RETRIES_MAX);
return -EINVAL;
} else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
pr_err("Requested DataOut Timeout Retries %u smaller"
" than minimum %u", dataout_timeout_retries,
NA_DATAOUT_TIMEOUT_RETRIES_MIN);
return -EINVAL;
}
a->dataout_timeout_retries = dataout_timeout_retries;
pr_debug("Set DataOut Timeout Retries to %u for"
" Initiator Node %s\n", a->dataout_timeout_retries,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_nopin_timeout(
struct iscsi_node_acl *acl,
u32 nopin_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
struct iscsi_session *sess;
struct iscsi_conn *conn;
struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
struct se_session *se_sess;
u32 orig_nopin_timeout = a->nopin_timeout;
if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
pr_err("Requested NopIn Timeout %u larger than maximum"
" %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
return -EINVAL;
} else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
(nopin_timeout != 0)) {
pr_err("Requested NopIn Timeout %u smaller than"
" minimum %u and not 0\n", nopin_timeout,
NA_NOPIN_TIMEOUT_MIN);
return -EINVAL;
}
a->nopin_timeout = nopin_timeout;
pr_debug("Set NopIn Timeout to %u for Initiator"
" Node %s\n", a->nopin_timeout,
iscsit_na_get_initiatorname(acl));
/*
* Reenable disabled nopin_timeout timer for all iSCSI connections.
*/
if (!orig_nopin_timeout) {
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list,
conn_list) {
if (conn->conn_state !=
TARG_CONN_STATE_LOGGED_IN)
continue;
spin_lock(&conn->nopin_timer_lock);
__iscsit_start_nopin_timer(conn);
spin_unlock(&conn->nopin_timer_lock);
}
spin_unlock(&sess->conn_lock);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
}
return 0;
}
int iscsit_na_nopin_response_timeout(
struct iscsi_node_acl *acl,
u32 nopin_response_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
pr_err("Requested NopIn Response Timeout %u larger"
" than maximum %u\n", nopin_response_timeout,
NA_NOPIN_RESPONSE_TIMEOUT_MAX);
return -EINVAL;
} else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
pr_err("Requested NopIn Response Timeout %u smaller"
" than minimum %u\n", nopin_response_timeout,
NA_NOPIN_RESPONSE_TIMEOUT_MIN);
return -EINVAL;
}
a->nopin_response_timeout = nopin_response_timeout;
pr_debug("Set NopIn Response Timeout to %u for"
" Initiator Node %s\n", a->nopin_timeout,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_datain_pdu_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_pdu_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
pr_err("Requested Random DataIN PDU Offsets: %u not"
" 0 or 1\n", random_datain_pdu_offsets);
return -EINVAL;
}
a->random_datain_pdu_offsets = random_datain_pdu_offsets;
pr_debug("Set Random DataIN PDU Offsets to %u for"
" Initiator Node %s\n", a->random_datain_pdu_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_datain_seq_offsets(
struct iscsi_node_acl *acl,
u32 random_datain_seq_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
pr_err("Requested Random DataIN Sequence Offsets: %u"
" not 0 or 1\n", random_datain_seq_offsets);
return -EINVAL;
}
a->random_datain_seq_offsets = random_datain_seq_offsets;
pr_debug("Set Random DataIN Sequence Offsets to %u for"
" Initiator Node %s\n", a->random_datain_seq_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_random_r2t_offsets(
struct iscsi_node_acl *acl,
u32 random_r2t_offsets)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
pr_err("Requested Random R2T Offsets: %u not"
" 0 or 1\n", random_r2t_offsets);
return -EINVAL;
}
a->random_r2t_offsets = random_r2t_offsets;
pr_debug("Set Random R2T Offsets to %u for"
" Initiator Node %s\n", a->random_r2t_offsets,
iscsit_na_get_initiatorname(acl));
return 0;
}
int iscsit_na_default_erl(
struct iscsi_node_acl *acl,
u32 default_erl)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
pr_err("Requested default ERL: %u not 0, 1, or 2\n",
default_erl);
return -EINVAL;
}
a->default_erl = default_erl;
pr_debug("Set use ERL0 flag to %u for Initiator"
" Node %s\n", a->default_erl,
iscsit_na_get_initiatorname(acl));
return 0;
}

View file

@ -0,0 +1,15 @@
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
#endif /* ISCSI_TARGET_NODEATTRIB_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,290 @@
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
char key[KEY_MAXLEN];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
struct iscsi_param {
char *name;
char *value;
u8 set_param;
u8 phase;
u8 scope;
u8 sender;
u8 type;
u8 use;
u16 type_range;
u32 state;
struct list_head p_list;
} ____cacheline_aligned;
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
extern int iscsi_copy_param_list(struct iscsi_param_list **,
struct iscsi_param_list *, int);
extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
extern void iscsi_release_param_list(struct iscsi_param_list *);
extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
struct iscsi_param_list *);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
struct iscsi_param_list *, int);
#define YES "Yes"
#define NO "No"
#define ALL "All"
#define IRRELEVANT "Irrelevant"
#define NONE "None"
#define NOTUNDERSTOOD "NotUnderstood"
#define REJECT "Reject"
/*
* The Parameter Names.
*/
#define AUTHMETHOD "AuthMethod"
#define HEADERDIGEST "HeaderDigest"
#define DATADIGEST "DataDigest"
#define MAXCONNECTIONS "MaxConnections"
#define SENDTARGETS "SendTargets"
#define TARGETNAME "TargetName"
#define INITIATORNAME "InitiatorName"
#define TARGETALIAS "TargetAlias"
#define INITIATORALIAS "InitiatorAlias"
#define TARGETADDRESS "TargetAddress"
#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
#define INITIALR2T "InitialR2T"
#define IMMEDIATEDATA "ImmediateData"
#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength"
#define MAXBURSTLENGTH "MaxBurstLength"
#define FIRSTBURSTLENGTH "FirstBurstLength"
#define DEFAULTTIME2WAIT "DefaultTime2Wait"
#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
#define DATAPDUINORDER "DataPDUInOrder"
#define DATASEQUENCEINORDER "DataSequenceInOrder"
#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
#define SESSIONTYPE "SessionType"
#define IFMARKER "IFMarker"
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
#define X_EXTENSIONKEY "X-com.sbei.version"
#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
*/
#define RDMAEXTENSIONS "RDMAExtensions"
#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
/*
* For AuthMethod.
*/
#define KRB5 "KRB5"
#define SPKM1 "SPKM1"
#define SPKM2 "SPKM2"
#define SRP "SRP"
#define CHAP "CHAP"
/*
* Initial values for Parameter Negotiation.
*/
#define INITIAL_AUTHMETHOD CHAP
#define INITIAL_HEADERDIGEST "CRC32C,None"
#define INITIAL_DATADIGEST "CRC32C,None"
#define INITIAL_MAXCONNECTIONS "1"
#define INITIAL_SENDTARGETS ALL
#define INITIAL_TARGETNAME "LIO.Target"
#define INITIAL_INITIATORNAME "LIO.Initiator"
#define INITIAL_TARGETALIAS "LIO Target"
#define INITIAL_INITIATORALIAS "LIO Initiator"
#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
#define INITIAL_TARGETPORTALGROUPTAG "1"
#define INITIAL_INITIALR2T YES
#define INITIAL_IMMEDIATEDATA YES
#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
/*
* Match outgoing MXDSL default to incoming Open-iSCSI default
*/
#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144"
#define INITIAL_MAXBURSTLENGTH "262144"
#define INITIAL_FIRSTBURSTLENGTH "65536"
#define INITIAL_DEFAULTTIME2WAIT "2"
#define INITIAL_DEFAULTTIME2RETAIN "20"
#define INITIAL_MAXOUTSTANDINGR2T "1"
#define INITIAL_DATAPDUINORDER YES
#define INITIAL_DATASEQUENCEINORDER YES
#define INITIAL_ERRORRECOVERYLEVEL "0"
#define INITIAL_SESSIONTYPE NORMAL
#define INITIAL_IFMARKER NO
#define INITIAL_OFMARKER NO
#define INITIAL_IFMARKINT "2048~65535"
#define INITIAL_OFMARKINT "2048~65535"
/*
* Initial values for iSER parameters following RFC-5046 Section 6
*/
#define INITIAL_RDMAEXTENSIONS NO
#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
/*
* For [Header,Data]Digests.
*/
#define CRC32C "CRC32C"
/*
* For SessionType.
*/
#define DISCOVERY "Discovery"
#define NORMAL "Normal"
/*
* struct iscsi_param->use
*/
#define USE_LEADING_ONLY 0x01
#define USE_INITIAL_ONLY 0x02
#define USE_ALL 0x04
#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
#define IS_USE_ALL(p) ((p)->use & USE_ALL)
#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
/*
* struct iscsi_param->sender
*/
#define SENDER_INITIATOR 0x01
#define SENDER_TARGET 0x02
#define SENDER_BOTH 0x03
/* Used in iscsi_check_key() */
#define SENDER_RECEIVER 0x04
#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
/*
* struct iscsi_param->scope
*/
#define SCOPE_CONNECTION_ONLY 0x01
#define SCOPE_SESSION_WIDE 0x02
#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
/*
* struct iscsi_param->phase
*/
#define PHASE_SECURITY 0x01
#define PHASE_OPERATIONAL 0x02
#define PHASE_DECLARATIVE 0x04
#define PHASE_FFP0 0x08
#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
/*
* struct iscsi_param->type
*/
#define TYPE_BOOL_AND 0x01
#define TYPE_BOOL_OR 0x02
#define TYPE_NUMBER 0x04
#define TYPE_NUMBER_RANGE 0x08
#define TYPE_STRING 0x10
#define TYPE_VALUE_LIST 0x20
#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
/*
* struct iscsi_param->type_range
*/
#define TYPERANGE_BOOL_AND 0x0001
#define TYPERANGE_BOOL_OR 0x0002
#define TYPERANGE_0_TO_2 0x0004
#define TYPERANGE_0_TO_3600 0x0008
#define TYPERANGE_0_TO_32767 0x0010
#define TYPERANGE_0_TO_65535 0x0020
#define TYPERANGE_1_TO_65535 0x0040
#define TYPERANGE_2_TO_3600 0x0080
#define TYPERANGE_512_TO_16777215 0x0100
#define TYPERANGE_AUTH 0x0200
#define TYPERANGE_DIGEST 0x0400
#define TYPERANGE_ISCSINAME 0x0800
#define TYPERANGE_MARKINT 0x1000
#define TYPERANGE_SESSIONTYPE 0x2000
#define TYPERANGE_TARGETADDRESS 0x4000
#define TYPERANGE_UTF8 0x8000
#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
TYPERANGE_512_TO_16777215)
#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
TYPERANGE_SESSIONTYPE)
/*
* struct iscsi_param->state
*/
#define PSTATE_ACCEPTOR 0x01
#define PSTATE_NEGOTIATE 0x02
#define PSTATE_PROPOSER 0x04
#define PSTATE_IRRELEVANT 0x08
#define PSTATE_REJECT 0x10
#define PSTATE_REPLY_OPTIONAL 0x20
#define PSTATE_RESPONSE_GOT 0x40
#define PSTATE_RESPONSE_SENT 0x80
#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
#endif /* ISCSI_PARAMETERS_H */

View file

@ -0,0 +1,700 @@
/*******************************************************************************
* This file contains main functions related to iSCSI DataSequenceInOrder=No
* and DataPDUInOrder=No.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/slab.h>
#include <linux/random.h>
#include "iscsi_target_core.h"
#include "iscsi_target_util.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
#define OFFLOAD_BUF_SIZE 32768
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_seq *seq;
pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
cmd->init_task_tag);
for (i = 0; i < cmd->seq_count; i++) {
seq = &cmd->seq_list[i];
pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
" offset: %d, xfer_len: %d, seq_send_order: %d,"
" seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
seq->offset, seq->xfer_len, seq->seq_send_order,
seq->seq_no);
}
}
static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_pdu *pdu;
pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
cmd->init_task_tag);
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
pr_debug("i: %d, offset: %d, length: %d,"
" pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
pdu->length, pdu->pdu_send_order, pdu->seq_no);
}
}
#else
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
#endif
static void iscsit_ordered_seq_lists(
struct iscsi_cmd *cmd,
u8 type)
{
u32 i, seq_count = 0;
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
cmd->seq_list[i].seq_send_order = seq_count++;
}
}
static void iscsit_ordered_pdu_lists(
struct iscsi_cmd *cmd,
u8 type)
{
u32 i, pdu_send_order = 0, seq_no = 0;
for (i = 0; i < cmd->pdu_count; i++) {
redo:
if (cmd->pdu_list[i].seq_no == seq_no) {
cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
continue;
}
seq_no++;
pdu_send_order = 0;
goto redo;
}
}
/*
* Generate count random values into array.
* Use 0x80000000 to mark generates valued in array[].
*/
static void iscsit_create_random_array(u32 *array, u32 count)
{
int i, j, k;
if (count == 1) {
array[0] = 0;
return;
}
for (i = 0; i < count; i++) {
redo:
get_random_bytes(&j, sizeof(u32));
j = (1 + (int) (9999 + 1) - j) % count;
for (k = 0; k < i + 1; k++) {
j |= 0x80000000;
if ((array[k] & 0x80000000) && (array[k] == j))
goto redo;
}
array[i] = j;
}
for (i = 0; i < count; i++)
array[i] &= ~0x80000000;
}
static int iscsit_randomize_pdu_lists(
struct iscsi_cmd *cmd,
u8 type)
{
int i = 0;
u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
redo:
if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
seq_count++;
continue;
}
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory"
" for random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < seq_count; i++)
cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
kfree(array);
seq_offset += seq_count;
seq_count = 0;
seq_no++;
goto redo;
}
if (seq_count) {
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for"
" random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < seq_count; i++)
cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
kfree(array);
}
return 0;
}
static int iscsit_randomize_seq_lists(
struct iscsi_cmd *cmd,
u8 type)
{
int i, j = 0;
u32 *array, seq_count = cmd->seq_count;
if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
seq_count--;
else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
seq_count -= 2;
if (!seq_count)
return 0;
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for random array.\n");
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
for (i = 0; i < cmd->seq_count; i++) {
if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
continue;
cmd->seq_list[i].seq_send_order = array[j++];
}
kfree(array);
return 0;
}
static void iscsit_determine_counts_for_list(
struct iscsi_cmd *cmd,
struct iscsi_build_list *bl,
u32 *seq_count,
u32 *pdu_count)
{
int check_immediate = 0;
u32 burstlength = 0, offset = 0;
u32 unsolicited_data_length = 0;
u32 mdsl;
struct iscsi_conn *conn = cmd->conn;
if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
else
mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
if ((bl->type == PDULIST_IMMEDIATE) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
check_immediate = 1;
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->se_cmd.data_length) {
*pdu_count += 1;
if (check_immediate) {
check_immediate = 0;
offset += bl->immediate_data_length;
*seq_count += 1;
if (unsolicited_data_length)
unsolicited_data_length -=
bl->immediate_data_length;
continue;
}
if (unsolicited_data_length > 0) {
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
unsolicited_data_length -=
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset + mdsl)
>= conn->sess->sess_ops->FirstBurstLength) {
unsolicited_data_length -=
(conn->sess->sess_ops->FirstBurstLength -
offset);
offset += (conn->sess->sess_ops->FirstBurstLength -
offset);
burstlength = 0;
*seq_count += 1;
continue;
}
offset += mdsl;
unsolicited_data_length -= mdsl;
continue;
}
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
offset += (conn->sess->sess_ops->MaxBurstLength -
burstlength);
burstlength = 0;
*seq_count += 1;
continue;
}
burstlength += mdsl;
offset += mdsl;
}
}
/*
* Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
* or DataPDUInOrder=No.
*/
static int iscsit_do_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
struct iscsi_build_list *bl)
{
int check_immediate = 0, datapduinorder, datasequenceinorder;
u32 burstlength = 0, offset = 0, i = 0, mdsl;
u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = cmd->pdu_list;
struct iscsi_seq *seq = cmd->seq_list;
if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
else
mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength;
datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
if ((bl->type == PDULIST_IMMEDIATE) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
check_immediate = 1;
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->se_cmd.data_length) {
pdu_count++;
if (!datapduinorder) {
pdu[i].offset = offset;
pdu[i].seq_no = seq_no;
}
if (!datasequenceinorder && (pdu_count == 1)) {
seq[seq_no].pdu_start = i;
seq[seq_no].seq_no = seq_no;
seq[seq_no].offset = offset;
seq[seq_no].orig_offset = offset;
}
if (check_immediate) {
check_immediate = 0;
if (!datapduinorder) {
pdu[i].type = PDUTYPE_IMMEDIATE;
pdu[i++].length = bl->immediate_data_length;
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_IMMEDIATE;
seq[seq_no].pdu_count = 1;
seq[seq_no].xfer_len =
bl->immediate_data_length;
}
offset += bl->immediate_data_length;
pdu_count = 0;
seq_no++;
if (unsolicited_data_length)
unsolicited_data_length -=
bl->immediate_data_length;
continue;
}
if (unsolicited_data_length > 0) {
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i].length =
(cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->se_cmd.data_length - offset));
}
unsolicited_data_length -=
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset + mdsl) >=
conn->sess->sess_ops->FirstBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i++].length =
(conn->sess->sess_ops->FirstBurstLength -
offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(conn->sess->sess_ops->FirstBurstLength -
offset));
}
unsolicited_data_length -=
(conn->sess->sess_ops->FirstBurstLength -
offset);
offset += (conn->sess->sess_ops->FirstBurstLength -
offset);
burstlength = 0;
pdu_count = 0;
seq_no++;
continue;
}
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i++].length = mdsl;
}
burstlength += mdsl;
offset += mdsl;
unsolicited_data_length -= mdsl;
continue;
}
if ((offset + mdsl) >= cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i].length = (cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->se_cmd.data_length - offset));
}
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + mdsl) >=
conn->sess->sess_ops->MaxBurstLength) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i++].length =
(conn->sess->sess_ops->MaxBurstLength -
burstlength);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(conn->sess->sess_ops->MaxBurstLength -
burstlength));
}
offset += (conn->sess->sess_ops->MaxBurstLength -
burstlength);
burstlength = 0;
pdu_count = 0;
seq_no++;
continue;
}
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i++].length = mdsl;
}
burstlength += mdsl;
offset += mdsl;
}
if (!datasequenceinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
if (bl->randomize & RANDOM_R2T_OFFSETS) {
if (iscsit_randomize_seq_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_seq_lists(cmd, bl->type);
} else if (bl->data_direction & ISCSI_PDU_READ) {
if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
if (iscsit_randomize_seq_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_seq_lists(cmd, bl->type);
}
iscsit_dump_seq_list(cmd);
}
if (!datapduinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
if (iscsit_randomize_pdu_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
} else if (bl->data_direction & ISCSI_PDU_READ) {
if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
if (iscsit_randomize_pdu_lists(cmd, bl->type)
< 0)
return -1;
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
}
iscsit_dump_pdu_list(cmd);
}
return 0;
}
int iscsit_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
u32 immediate_data_length)
{
struct iscsi_build_list bl;
u32 pdu_count = 0, seq_count = 1;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na;
/*
* Do nothing if no OOO shenanigans
*/
if (sess->sess_ops->DataSequenceInOrder &&
sess->sess_ops->DataPDUInOrder)
return 0;
if (cmd->data_direction == DMA_NONE)
return 0;
na = iscsit_tpg_get_node_attrib(sess);
memset(&bl, 0, sizeof(struct iscsi_build_list));
if (cmd->data_direction == DMA_FROM_DEVICE) {
bl.data_direction = ISCSI_PDU_READ;
bl.type = PDULIST_NORMAL;
if (na->random_datain_pdu_offsets)
bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
if (na->random_datain_seq_offsets)
bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
} else {
bl.data_direction = ISCSI_PDU_WRITE;
bl.immediate_data_length = immediate_data_length;
if (na->random_r2t_offsets)
bl.randomize |= RANDOM_R2T_OFFSETS;
if (!cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_NORMAL;
else if (cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE;
else if (!cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_UNSOLICITED;
else if (cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
}
iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
if (!conn->sess->sess_ops->DataSequenceInOrder) {
seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
if (!seq) {
pr_err("Unable to allocate struct iscsi_seq list\n");
return -ENOMEM;
}
cmd->seq_list = seq;
cmd->seq_count = seq_count;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
if (!pdu) {
pr_err("Unable to allocate struct iscsi_pdu list.\n");
kfree(seq);
return -ENOMEM;
}
cmd->pdu_list = pdu;
cmd->pdu_count = pdu_count;
}
return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
}
struct iscsi_pdu *iscsit_get_pdu_holder(
struct iscsi_cmd *cmd,
u32 offset,
u32 length)
{
u32 i;
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
return NULL;
}
pdu = &cmd->pdu_list[0];
for (i = 0; i < cmd->pdu_count; i++)
if ((pdu[i].offset == offset) && (pdu[i].length == length))
return &pdu[i];
pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
" %u, Length: %u\n", cmd->init_task_tag, offset, length);
return NULL;
}
struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
struct iscsi_cmd *cmd,
struct iscsi_seq *seq)
{
u32 i;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
return NULL;
}
if (conn->sess->sess_ops->DataSequenceInOrder) {
redo:
pdu = &cmd->pdu_list[cmd->pdu_start];
for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
"_send_order: %d, pdu[i].offset: %d,"
" pdu[i].length: %d\n", pdu[i].seq_no,
pdu[i].pdu_send_order, pdu[i].offset,
pdu[i].length);
if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
cmd->pdu_send_order++;
return &pdu[i];
}
}
cmd->pdu_start += cmd->pdu_send_order;
cmd->pdu_send_order = 0;
cmd->seq_no++;
if (cmd->pdu_start < cmd->pdu_count)
goto redo;
pr_err("Command ITT: 0x%08x unable to locate"
" struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
cmd->init_task_tag, cmd->pdu_send_order);
return NULL;
} else {
if (!seq) {
pr_err("struct iscsi_seq is NULL!\n");
return NULL;
}
pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
seq->seq_no);
pdu = &cmd->pdu_list[seq->pdu_start];
if (seq->pdu_send_order == seq->pdu_count) {
pr_err("Command ITT: 0x%08x seq->pdu_send"
"_order: %u equals seq->pdu_count: %u\n",
cmd->init_task_tag, seq->pdu_send_order,
seq->pdu_count);
return NULL;
}
for (i = 0; i < seq->pdu_count; i++) {
if (pdu[i].pdu_send_order == seq->pdu_send_order) {
seq->pdu_send_order++;
return &pdu[i];
}
}
pr_err("Command ITT: 0x%08x unable to locate iscsi"
"_pdu_t for seq->pdu_send_order: %u.\n",
cmd->init_task_tag, seq->pdu_send_order);
return NULL;
}
return NULL;
}
struct iscsi_seq *iscsit_get_seq_holder(
struct iscsi_cmd *cmd,
u32 offset,
u32 length)
{
u32 i;
if (!cmd->seq_list) {
pr_err("struct iscsi_cmd->seq_list is NULL!\n");
return NULL;
}
for (i = 0; i < cmd->seq_count; i++) {
pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
"xfer_len: %d, seq_list[i].seq_no %u\n",
cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
cmd->seq_list[i].seq_no);
if ((cmd->seq_list[i].orig_offset +
cmd->seq_list[i].xfer_len) >=
(offset + length))
return &cmd->seq_list[i];
}
pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
" Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
length);
return NULL;
}

View file

@ -0,0 +1,86 @@
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
/* struct iscsi_pdu->status */
#define DATAOUT_PDU_SENT 1
/* struct iscsi_seq->type */
#define SEQTYPE_IMMEDIATE 1
#define SEQTYPE_UNSOLICITED 2
#define SEQTYPE_NORMAL 3
/* struct iscsi_seq->status */
#define DATAOUT_SEQUENCE_GOT_R2T 1
#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
#define DATAOUT_SEQUENCE_COMPLETE 3
/* iscsi_determine_counts_for_list() type */
#define PDULIST_NORMAL 1
#define PDULIST_IMMEDIATE 2
#define PDULIST_UNSOLICITED 3
#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
/* struct iscsi_pdu->type */
#define PDUTYPE_IMMEDIATE 1
#define PDUTYPE_UNSOLICITED 2
#define PDUTYPE_NORMAL 3
/* struct iscsi_pdu->status */
#define ISCSI_PDU_NOT_RECEIVED 0
#define ISCSI_PDU_RECEIVED_OK 1
#define ISCSI_PDU_CRC_FAILED 2
#define ISCSI_PDU_TIMED_OUT 3
/* struct iscsi_build_list->randomize */
#define RANDOM_DATAIN_PDU_OFFSETS 0x01
#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
#define RANDOM_R2T_OFFSETS 0x08
/* struct iscsi_build_list->data_direction */
#define ISCSI_PDU_READ 0x01
#define ISCSI_PDU_WRITE 0x02
struct iscsi_build_list {
int data_direction;
int randomize;
int type;
int immediate_data_length;
};
struct iscsi_pdu {
int status;
int type;
u8 flags;
u32 data_sn;
u32 length;
u32 offset;
u32 pdu_send_order;
u32 seq_no;
} ____cacheline_aligned;
struct iscsi_seq {
int sent;
int status;
int type;
u32 data_sn;
u32 first_datasn;
u32 last_datasn;
u32 next_burst_len;
u32 pdu_start;
u32 pdu_count;
u32 offset;
u32 orig_offset;
u32 pdu_send_order;
u32 r2t_sn;
u32 seq_send_order;
u32 seq_no;
u32 xfer_len;
} ____cacheline_aligned;
extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
#endif /* ISCSI_SEQ_AND_PDU_LIST_H */

View file

@ -0,0 +1,949 @@
/*******************************************************************************
* Modern ConfigFS group context specific iSCSI statistics based on original
* iscsi_target_mib.c code
*
* Copyright (c) 2011-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/configfs.h>
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/configfs_macros.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target_stat.h"
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
/* Instance Attributes Table */
#define ISCSI_INST_NUM_NODES 1
#define ISCSI_INST_DESCR "Storage Engine Target"
#define ISCSI_INST_LAST_FAILURE_TYPE 0
#define ISCSI_DISCONTINUITY_TIME 0
#define ISCSI_NODE_INDEX 1
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
/****************************************************************************
* iSCSI MIB Tables
****************************************************************************/
/*
* Instance Attributes Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
static struct iscsi_stat_instance_attribute \
iscsi_stat_instance_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_instance_show_attr_##_name, \
iscsi_stat_instance_store_attr_##_name);
#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
static struct iscsi_stat_instance_attribute \
iscsi_stat_instance_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_instance_show_attr_##_name);
static ssize_t iscsi_stat_instance_show_attr_inst(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_INSTANCE_ATTR_RO(inst);
static ssize_t iscsi_stat_instance_show_attr_min_ver(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
static ssize_t iscsi_stat_instance_show_attr_max_ver(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
static ssize_t iscsi_stat_instance_show_attr_portals(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
}
ISCSI_STAT_INSTANCE_ATTR_RO(portals);
static ssize_t iscsi_stat_instance_show_attr_nodes(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
}
ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
static ssize_t iscsi_stat_instance_show_attr_sessions(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
}
ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
static ssize_t iscsi_stat_instance_show_attr_fail_sess(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
u32 sess_err_count;
spin_lock_bh(&sess_err->lock);
sess_err_count = (sess_err->digest_errors +
sess_err->cxn_timeout_errors +
sess_err->pdu_format_errors);
spin_unlock_bh(&sess_err->lock);
return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
}
ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
static ssize_t iscsi_stat_instance_show_attr_fail_type(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n",
sess_err->last_sess_failure_type);
}
ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%s\n",
sess_err->last_sess_fail_rem_name[0] ?
sess_err->last_sess_fail_rem_name : NONE);
}
ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
static ssize_t iscsi_stat_instance_show_attr_disc_time(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
}
ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
static ssize_t iscsi_stat_instance_show_attr_description(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
}
ISCSI_STAT_INSTANCE_ATTR_RO(description);
static ssize_t iscsi_stat_instance_show_attr_vendor(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
}
ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
static ssize_t iscsi_stat_instance_show_attr_version(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
}
ISCSI_STAT_INSTANCE_ATTR_RO(version);
CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
iscsi_instance_group);
static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
&iscsi_stat_instance_inst.attr,
&iscsi_stat_instance_min_ver.attr,
&iscsi_stat_instance_max_ver.attr,
&iscsi_stat_instance_portals.attr,
&iscsi_stat_instance_nodes.attr,
&iscsi_stat_instance_sessions.attr,
&iscsi_stat_instance_fail_sess.attr,
&iscsi_stat_instance_fail_type.attr,
&iscsi_stat_instance_fail_rem_name.attr,
&iscsi_stat_instance_disc_time.attr,
&iscsi_stat_instance_description.attr,
&iscsi_stat_instance_vendor.attr,
&iscsi_stat_instance_version.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_instance_item_ops = {
.show_attribute = iscsi_stat_instance_attr_show,
.store_attribute = iscsi_stat_instance_attr_store,
};
struct config_item_type iscsi_stat_instance_cit = {
.ct_item_ops = &iscsi_stat_instance_item_ops,
.ct_attrs = iscsi_stat_instance_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Instance Session Failure Stats Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
static struct iscsi_stat_sess_err_attribute \
iscsi_stat_sess_err_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_sess_err_show_attr_##_name, \
iscsi_stat_sess_err_store_attr_##_name);
#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
static struct iscsi_stat_sess_err_attribute \
iscsi_stat_sess_err_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_sess_err_show_attr_##_name);
static ssize_t iscsi_stat_sess_err_show_attr_inst(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
}
ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
}
ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
}
ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
iscsi_sess_err_group);
static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
&iscsi_stat_sess_err_inst.attr,
&iscsi_stat_sess_err_digest_errors.attr,
&iscsi_stat_sess_err_cxn_errors.attr,
&iscsi_stat_sess_err_format_errors.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
.show_attribute = iscsi_stat_sess_err_attr_show,
.store_attribute = iscsi_stat_sess_err_attr_store,
};
struct config_item_type iscsi_stat_sess_err_cit = {
.ct_item_ops = &iscsi_stat_sess_err_item_ops,
.ct_attrs = iscsi_stat_sess_err_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Attributes Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
static struct iscsi_stat_tgt_attr_attribute \
iscsi_stat_tgt_attr_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_tgt-attr_show_attr_##_name, \
iscsi_stat_tgt_attr_store_attr_##_name);
#define ISCSI_STAT_TGT_ATTR_RO(_name) \
static struct iscsi_stat_tgt_attr_attribute \
iscsi_stat_tgt_attr_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_tgt_attr_show_attr_##_name);
static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_TGT_ATTR_RO(inst);
static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
ISCSI_STAT_TGT_ATTR_RO(indx);
static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 fail_count;
spin_lock(&lstat->lock);
fail_count = (lstat->redirects + lstat->authorize_fails +
lstat->authenticate_fails + lstat->negotiate_fails +
lstat->other_fails);
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
}
ISCSI_STAT_TGT_ATTR_RO(login_fails);
static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_time;
spin_lock(&lstat->lock);
last_fail_time = lstat->last_fail_time ?
(u32)(((u32)lstat->last_fail_time -
INITIAL_JIFFIES) * 100 / HZ) : 0;
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
}
ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_type;
spin_lock(&lstat->lock);
last_fail_type = lstat->last_fail_type;
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
}
ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
unsigned char buf[224];
spin_lock(&lstat->lock);
snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%s\n", buf);
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
spin_lock(&lstat->lock);
if (lstat->last_intr_fail_ip_family == AF_INET6)
ret = snprintf(page, PAGE_SIZE, "ipv6\n");
else
ret = snprintf(page, PAGE_SIZE, "ipv4\n");
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
iscsi_tgt_attr_group);
static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
&iscsi_stat_tgt_attr_inst.attr,
&iscsi_stat_tgt_attr_indx.attr,
&iscsi_stat_tgt_attr_login_fails.attr,
&iscsi_stat_tgt_attr_last_fail_time.attr,
&iscsi_stat_tgt_attr_last_fail_type.attr,
&iscsi_stat_tgt_attr_fail_intr_name.attr,
&iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
&iscsi_stat_tgt_attr_fail_intr_addr.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
.show_attribute = iscsi_stat_tgt_attr_attr_show,
.store_attribute = iscsi_stat_tgt_attr_attr_store,
};
struct config_item_type iscsi_stat_tgt_attr_cit = {
.ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
.ct_attrs = iscsi_stat_tgt_attr_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Login Stats Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
#define ISCSI_STAT_LOGIN(_name, _mode) \
static struct iscsi_stat_login_attribute \
iscsi_stat_login_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_login_show_attr_##_name, \
iscsi_stat_login_store_attr_##_name);
#define ISCSI_STAT_LOGIN_RO(_name) \
static struct iscsi_stat_login_attribute \
iscsi_stat_login_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_login_show_attr_##_name);
static ssize_t iscsi_stat_login_show_attr_inst(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_LOGIN_RO(inst);
static ssize_t iscsi_stat_login_show_attr_indx(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
ISCSI_STAT_LOGIN_RO(indx);
static ssize_t iscsi_stat_login_show_attr_accepts(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(accepts);
static ssize_t iscsi_stat_login_show_attr_other_fails(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(other_fails);
static ssize_t iscsi_stat_login_show_attr_redirects(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(redirects);
static ssize_t iscsi_stat_login_show_attr_authorize_fails(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(authorize_fails);
static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(authenticate_fails);
static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
spin_lock(&lstat->lock);
ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
spin_unlock(&lstat->lock);
return ret;
}
ISCSI_STAT_LOGIN_RO(negotiate_fails);
CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
iscsi_login_stats_group);
static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
&iscsi_stat_login_inst.attr,
&iscsi_stat_login_indx.attr,
&iscsi_stat_login_accepts.attr,
&iscsi_stat_login_other_fails.attr,
&iscsi_stat_login_redirects.attr,
&iscsi_stat_login_authorize_fails.attr,
&iscsi_stat_login_authenticate_fails.attr,
&iscsi_stat_login_negotiate_fails.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
.show_attribute = iscsi_stat_login_attr_show,
.store_attribute = iscsi_stat_login_attr_store,
};
struct config_item_type iscsi_stat_login_cit = {
.ct_item_ops = &iscsi_stat_login_stats_item_ops,
.ct_attrs = iscsi_stat_login_stats_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Target Logout Stats Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
#define ISCSI_STAT_LOGOUT(_name, _mode) \
static struct iscsi_stat_logout_attribute \
iscsi_stat_logout_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_logout_show_attr_##_name, \
iscsi_stat_logout_store_attr_##_name);
#define ISCSI_STAT_LOGOUT_RO(_name) \
static struct iscsi_stat_logout_attribute \
iscsi_stat_logout_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_logout_show_attr_##_name);
static ssize_t iscsi_stat_logout_show_attr_inst(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_LOGOUT_RO(inst);
static ssize_t iscsi_stat_logout_show_attr_indx(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
ISCSI_STAT_LOGOUT_RO(indx);
static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
}
ISCSI_STAT_LOGOUT_RO(normal_logouts);
static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
struct iscsi_wwn_stat_grps *igrps, char *page)
{
struct iscsi_tiqn *tiqn = container_of(igrps,
struct iscsi_tiqn, tiqn_stat_grps);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
}
ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
iscsi_logout_stats_group);
static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
&iscsi_stat_logout_inst.attr,
&iscsi_stat_logout_indx.attr,
&iscsi_stat_logout_normal_logouts.attr,
&iscsi_stat_logout_abnormal_logouts.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
.show_attribute = iscsi_stat_logout_attr_show,
.store_attribute = iscsi_stat_logout_attr_store,
};
struct config_item_type iscsi_stat_logout_cit = {
.ct_item_ops = &iscsi_stat_logout_stats_item_ops,
.ct_attrs = iscsi_stat_logout_stats_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Session Stats Table
*/
CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
#define ISCSI_STAT_SESS(_name, _mode) \
static struct iscsi_stat_sess_attribute \
iscsi_stat_sess_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
iscsi_stat_sess_show_attr_##_name, \
iscsi_stat_sess_store_attr_##_name);
#define ISCSI_STAT_SESS_RO(_name) \
static struct iscsi_stat_sess_attribute \
iscsi_stat_sess_##_name = \
__CONFIGFS_EATTR_RO(_name, \
iscsi_stat_sess_show_attr_##_name);
static ssize_t iscsi_stat_sess_show_attr_inst(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
struct iscsi_tiqn *tiqn = container_of(wwn,
struct iscsi_tiqn, tiqn_wwn);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
ISCSI_STAT_SESS_RO(inst);
static ssize_t iscsi_stat_sess_show_attr_node(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(node);
static ssize_t iscsi_stat_sess_show_attr_indx(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->session_index);
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(indx);
static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(cmd_pdus);
static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(rsp_pdus);
static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(txdata_octs);
static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(rxdata_octs);
static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(conn_digest_errors);
static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
struct iscsi_node_stat_grps *igrps, char *page)
{
struct iscsi_node_acl *acl = container_of(igrps,
struct iscsi_node_acl, node_stat_grps);
struct se_node_acl *se_nacl = &acl->se_node_acl;
struct iscsi_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
ISCSI_STAT_SESS_RO(conn_timeout_errors);
CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
iscsi_sess_stats_group);
static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
&iscsi_stat_sess_inst.attr,
&iscsi_stat_sess_node.attr,
&iscsi_stat_sess_indx.attr,
&iscsi_stat_sess_cmd_pdus.attr,
&iscsi_stat_sess_rsp_pdus.attr,
&iscsi_stat_sess_txdata_octs.attr,
&iscsi_stat_sess_rxdata_octs.attr,
&iscsi_stat_sess_conn_digest_errors.attr,
&iscsi_stat_sess_conn_timeout_errors.attr,
NULL,
};
static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
.show_attribute = iscsi_stat_sess_attr_show,
.store_attribute = iscsi_stat_sess_attr_store,
};
struct config_item_type iscsi_stat_sess_cit = {
.ct_item_ops = &iscsi_stat_sess_stats_item_ops,
.ct_attrs = iscsi_stat_sess_stats_attrs,
.ct_owner = THIS_MODULE,
};

View file

@ -0,0 +1,64 @@
#ifndef ISCSI_TARGET_STAT_H
#define ISCSI_TARGET_STAT_H
/*
* For struct iscsi_tiqn->tiqn_wwn default groups
*/
extern struct config_item_type iscsi_stat_instance_cit;
extern struct config_item_type iscsi_stat_sess_err_cit;
extern struct config_item_type iscsi_stat_tgt_attr_cit;
extern struct config_item_type iscsi_stat_login_cit;
extern struct config_item_type iscsi_stat_logout_cit;
/*
* For struct iscsi_session->se_sess default groups
*/
extern struct config_item_type iscsi_stat_sess_cit;
/* iSCSI session error types */
#define ISCSI_SESS_ERR_UNKNOWN 0
#define ISCSI_SESS_ERR_DIGEST 1
#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
#define ISCSI_SESS_ERR_PDU_FORMAT 3
/* iSCSI session error stats */
struct iscsi_sess_err_stats {
spinlock_t lock;
u32 digest_errors;
u32 cxn_timeout_errors;
u32 pdu_format_errors;
u32 last_sess_failure_type;
char last_sess_fail_rem_name[224];
} ____cacheline_aligned;
/* iSCSI login failure types (sub oids) */
#define ISCSI_LOGIN_FAIL_OTHER 2
#define ISCSI_LOGIN_FAIL_REDIRECT 3
#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
/* iSCSI login stats */
struct iscsi_login_stats {
spinlock_t lock;
u32 accepts;
u32 other_fails;
u32 redirects;
u32 authorize_fails;
u32 authenticate_fails;
u32 negotiate_fails; /* used for notifications */
u64 last_fail_time; /* time stamp (jiffies) */
u32 last_fail_type;
int last_intr_fail_ip_family;
unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
char last_intr_fail_name[224];
} ____cacheline_aligned;
/* iSCSI logout stats */
struct iscsi_logout_stats {
spinlock_t lock;
u32 normal_logouts;
u32 abnormal_logouts;
} ____cacheline_aligned;
#endif /*** ISCSI_TARGET_STAT_H ***/

View file

@ -0,0 +1,849 @@
/*******************************************************************************
* This file contains the iSCSI Target specific Task Management functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <asm/unaligned.h>
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tmr.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
u8 iscsit_tmr_abort_task(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
struct iscsi_cmd *ref_cmd;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
pr_err("RefCmdSN 0x%08x does not equal"
" task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
hdr->refcmdsn, ref_cmd->cmd_sn);
return ISCSI_TMF_RSP_REJECTED;
}
se_tmr->ref_task_tag = (__force u32)hdr->rtt;
tmr_req->ref_cmd = ref_cmd;
tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
return ISCSI_TMF_RSP_COMPLETE;
}
/*
* Called from iscsit_handle_task_mgt_cmd().
*/
int iscsit_tmr_task_warm_reset(
struct iscsi_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_warm_reset) {
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
return -1;
}
/*
* Do the real work in transport_generic_do_tmr().
*/
return 0;
}
int iscsit_tmr_task_cold_reset(
struct iscsi_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_cold_reset) {
pr_err("TMR Opcode TARGET_COLD_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
return -1;
}
/*
* Do the real work in transport_generic_do_tmr().
*/
return 0;
}
u8 iscsit_tmr_task_reassign(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
struct iscsi_cmd *ref_cmd = NULL;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_conn_recovery *cr = NULL;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
int ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
" ignoring request.\n");
return ISCSI_TMF_RSP_NOT_SUPPORTED;
}
ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
if (ret == -2) {
pr_err("Command ITT: 0x%08x is still alligent to CID:"
" %hu\n", ref_cmd->init_task_tag, cr->cid);
return ISCSI_TMF_RSP_TASK_ALLEGIANT;
} else if (ret == -1) {
pr_err("Unable to locate RefTaskTag: 0x%08x in"
" connection recovery command list.\n", hdr->rtt);
return ISCSI_TMF_RSP_NO_TASK;
}
/*
* Temporary check to prevent connection recovery for
* connections with a differing Max*DataSegmentLength.
*/
if (cr->maxrecvdatasegmentlength !=
conn->conn_ops->MaxRecvDataSegmentLength) {
pr_err("Unable to perform connection recovery for"
" differing MaxRecvDataSegmentLength, rejecting"
" TMR TASK_REASSIGN.\n");
return ISCSI_TMF_RSP_REJECTED;
}
if (cr->maxxmitdatasegmentlength !=
conn->conn_ops->MaxXmitDataSegmentLength) {
pr_err("Unable to perform connection recovery for"
" differing MaxXmitDataSegmentLength, rejecting"
" TMR TASK_REASSIGN.\n");
return ISCSI_TMF_RSP_REJECTED;
}
ref_lun = scsilun_to_int(&hdr->lun);
if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
pr_err("Unable to perform connection recovery for"
" differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
ref_lun, ref_cmd->se_cmd.orig_fe_lun);
return ISCSI_TMF_RSP_REJECTED;
}
se_tmr->ref_task_tag = (__force u32)hdr->rtt;
tmr_req->ref_cmd = ref_cmd;
tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn);
tmr_req->conn_recovery = cr;
tmr_req->task_reassign = 1;
/*
* Command can now be reassigned to a new connection.
* The task management response must be sent before the
* reassignment actually happens. See iscsi_tmr_post_handler().
*/
return ISCSI_TMF_RSP_COMPLETE;
}
static void iscsit_task_reassign_remove_cmd(
struct iscsi_cmd *cmd,
struct iscsi_conn_recovery *cr,
struct iscsi_session *sess)
{
int ret;
spin_lock(&cr->conn_recovery_cmd_lock);
ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
if (!ret) {
pr_debug("iSCSI connection recovery successful for CID:"
" %hu on SID: %u\n", cr->cid, sess->sid);
iscsit_remove_active_connection_recovery_entry(cr, sess);
}
}
static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
return -1;
}
cr = cmd->cr;
/*
* Reset the StatSN so a new one for this commands new connection
* will be assigned.
* Reset the ExpStatSN as well so we may receive Status SNACKs.
*/
cmd->stat_sn = cmd->exp_stat_sn = 0;
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_NOPIN;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_write(
struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
int no_build_r2ts = 0;
u32 length = 0, offset = 0;
struct iscsi_conn *conn = cmd->conn;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
* The Initiator must not send a R2T SNACK with a Begrun less than
* the TMR TASK_REASSIGN's ExpDataSN.
*/
if (!tmr_req->exp_data_sn) {
cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = 0;
} else {
cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
/*
* The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
* Initiator is expecting. The Target controls all WRITE operations
* so if we have received all DataOUT we can safety ignore Initiator.
*/
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
target_execute_cmd(se_cmd);
return 0;
}
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
/*
* Special case to deal with DataSequenceInOrder=No and Non-Immeidate
* Unsolicited DataOut.
*/
if (cmd->unsolicited_data) {
cmd->unsolicited_data = 0;
offset = cmd->next_burst_len = cmd->write_data_done;
if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
cmd->se_cmd.data_length) {
no_build_r2ts = 1;
length = (cmd->se_cmd.data_length - offset);
} else
length = (conn->sess->sess_ops->FirstBurstLength - offset);
spin_lock_bh(&cmd->r2t_lock);
if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
cmd->outstanding_r2ts++;
spin_unlock_bh(&cmd->r2t_lock);
if (no_build_r2ts)
return 0;
}
/*
* iscsit_build_r2ts_for_cmd() can handle the rest from here.
*/
return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
}
static int iscsit_task_reassign_complete_read(
struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
struct iscsi_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
* The Initiator must not send a Data SNACK with a BegRun less than
* the TMR TASK_REASSIGN's ExpDataSN.
*/
if (!tmr_req->exp_data_sn) {
cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = 0;
} else {
cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
transport_handle_cdb_direct(se_cmd);
return 0;
}
if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
" from transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
return -1;
}
dr = iscsit_allocate_datain_req();
if (!dr)
return -1;
/*
* The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
* Initiator is expecting.
*/
dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
dr->runlength = 0;
dr->generate_recovery_values = 1;
dr->recovery = DATAIN_CONNECTION_RECOVERY;
iscsit_attach_datain_req(cmd, dr);
cmd->i_state = ISTATE_SEND_DATAIN;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_none(
struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
" is NULL!\n", cmd->init_task_tag);
return -1;
}
cr = cmd->cr;
/*
* Reset the StatSN so a new one for this commands new connection
* will be assigned.
* Reset the ExpStatSN as well so we may receive Status SNACKs.
*/
cmd->stat_sn = cmd->exp_stat_sn = 0;
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
}
switch (cmd->data_direction) {
case DMA_TO_DEVICE:
return iscsit_task_reassign_complete_write(cmd, tmr_req);
case DMA_FROM_DEVICE:
return iscsit_task_reassign_complete_read(cmd, tmr_req);
case DMA_NONE:
return iscsit_task_reassign_complete_none(cmd, tmr_req);
default:
pr_err("Unknown cmd->data_direction: 0x%02x\n",
cmd->data_direction);
return -1;
}
return 0;
}
static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd;
int ret = 0;
if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
return -1;
}
cmd = tmr_req->ref_cmd;
cmd->conn = conn;
switch (cmd->iscsi_opcode) {
case ISCSI_OP_NOOP_OUT:
ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
break;
case ISCSI_OP_SCSI_CMD:
ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
break;
default:
pr_err("Illegal iSCSI Opcode 0x%02x during"
" command realligence\n", cmd->iscsi_opcode);
return -1;
}
if (ret != 0)
return ret;
pr_debug("Completed connection realligence for Opcode: 0x%02x,"
" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
cmd->init_task_tag, conn->cid);
return 0;
}
/*
* Handles special after-the-fact actions related to TMRs.
* Right now the only one that its really needed for is
* connection recovery releated TASK_REASSIGN.
*/
int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
if (tmr_req->task_reassign &&
(se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
return iscsit_task_reassign_complete(tmr_req, conn);
return 0;
}
EXPORT_SYMBOL(iscsit_tmr_post_handler);
/*
* Nothing to do here, but leave it for good measure. :-)
*/
static int iscsit_task_reassign_prepare_read(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
return 0;
}
static void iscsit_task_reassign_prepare_unsolicited_dataout(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
int i, j;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
if (conn->sess->sess_ops->DataSequenceInOrder) {
cmd->data_sn = 0;
if (cmd->immediate_data)
cmd->r2t_offset += (cmd->first_burst_len -
cmd->seq_start_offset);
if (conn->sess->sess_ops->DataPDUInOrder) {
cmd->write_data_done -= (cmd->immediate_data) ?
(cmd->first_burst_len -
cmd->seq_start_offset) :
cmd->first_burst_len;
cmd->first_burst_len = 0;
return;
}
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
if ((pdu->offset >= cmd->seq_start_offset) &&
((pdu->offset + pdu->length) <=
cmd->seq_end_offset)) {
cmd->first_burst_len -= pdu->length;
cmd->write_data_done -= pdu->length;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
} else {
for (i = 0; i < cmd->seq_count; i++) {
seq = &cmd->seq_list[i];
if (seq->type != SEQTYPE_UNSOLICITED)
continue;
cmd->write_data_done -=
(seq->offset - seq->orig_offset);
cmd->first_burst_len = 0;
seq->data_sn = 0;
seq->offset = seq->orig_offset;
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
if (conn->sess->sess_ops->DataPDUInOrder)
continue;
for (j = 0; j < seq->pdu_count; j++) {
pdu = &cmd->pdu_list[j+seq->pdu_start];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
}
}
static int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
/*
* The command was in the process of receiving Unsolicited DataOUT when
* the connection failed.
*/
if (cmd->unsolicited_data)
iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
/*
* The Initiator is requesting R2Ts starting from zero, skip
* checking acknowledged R2Ts and start checking struct iscsi_r2ts
* greater than zero.
*/
if (!tmr_req->exp_data_sn)
goto drop_unacknowledged_r2ts;
/*
* We now check that the PDUs in DataOUT sequences below
* the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
* expecting next) have all the DataOUT they require to complete
* the DataOUT sequence. First scan from R2TSN 0 to TMR
* TASK_REASSIGN ExpDataSN-1.
*
* If we have not received all DataOUT in question, we must
* make sure to make the appropriate changes to values in
* struct iscsi_cmd (and elsewhere depending on session parameters)
* so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
* will resend a new R2T for the DataOUT sequences in question.
*/
spin_lock_bh(&cmd->r2t_lock);
if (list_empty(&cmd->cmd_r2t_list)) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
if (r2t->r2t_sn >= tmr_req->exp_data_sn)
continue;
/*
* Safely ignore Recovery R2Ts and R2Ts that have completed
* DataOUT sequences.
*/
if (r2t->seq_complete)
continue;
if (r2t->recovery_r2t)
continue;
/*
* DataSequenceInOrder=Yes:
*
* Taking into account the iSCSI implementation requirement of
* MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
* DataSequenceInOrder=Yes, we must take into consideration
* the following:
*
* DataSequenceInOrder=No:
*
* Taking into account that the Initiator controls the (possibly
* random) PDU Order in (possibly random) Sequence Order of
* DataOUT the target requests with R2Ts, we must take into
* consideration the following:
*
* DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
*
* While processing non-complete R2T DataOUT sequence requests
* the Target will re-request only the total sequence length
* minus current received offset. This is because we must
* assume the initiator will continue sending DataOUT from the
* last PDU before the connection failed.
*
* DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
*
* While processing non-complete R2T DataOUT sequence requests
* the Target will re-request the entire DataOUT sequence if
* any single PDU is missing from the sequence. This is because
* we have no logical method to determine the next PDU offset,
* and we must assume the Initiator will be sending any random
* PDU offset in the current sequence after TASK_REASSIGN
* has completed.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (!first_incomplete_r2t) {
cmd->r2t_offset -= r2t->xfer_len;
goto next;
}
if (conn->sess->sess_ops->DataPDUInOrder) {
cmd->data_sn = 0;
cmd->r2t_offset -= (r2t->xfer_len -
cmd->next_burst_len);
first_incomplete_r2t = 0;
goto next;
}
cmd->data_sn = 0;
cmd->r2t_offset -= r2t->xfer_len;
for (i = 0; i < cmd->pdu_count; i++) {
pdu = &cmd->pdu_list[i];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
if ((pdu->offset >= r2t->offset) &&
(pdu->offset < (r2t->offset +
r2t->xfer_len))) {
cmd->next_burst_len -= pdu->length;
cmd->write_data_done -= pdu->length;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
first_incomplete_r2t = 0;
} else {
struct iscsi_seq *seq;
seq = iscsit_get_seq_holder(cmd, r2t->offset,
r2t->xfer_len);
if (!seq) {
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
cmd->write_data_done -=
(seq->offset - seq->orig_offset);
seq->data_sn = 0;
seq->offset = seq->orig_offset;
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
cmd->seq_send_order--;
if (conn->sess->sess_ops->DataPDUInOrder)
goto next;
for (i = 0; i < seq->pdu_count; i++) {
pdu = &cmd->pdu_list[i+seq->pdu_start];
if (pdu->status != ISCSI_PDU_RECEIVED_OK)
continue;
pdu->status = ISCSI_PDU_NOT_RECEIVED;
}
}
next:
cmd->outstanding_r2ts--;
}
spin_unlock_bh(&cmd->r2t_lock);
/*
* We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
* TASK_REASSIGN to the last R2T in the list.. We are also careful
* to check that the Initiator is not requesting R2Ts for DataOUT
* sequences it has already completed.
*
* Free each R2T in question and adjust values in struct iscsi_cmd
* accordingly so iscsit_build_r2ts_for_cmd() do the rest of
* the work after the TMR TASK_REASSIGN Response is sent.
*/
drop_unacknowledged_r2ts:
cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
cmd->r2t_sn = tmr_req->exp_data_sn;
spin_lock_bh(&cmd->r2t_lock);
list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
/*
* Skip up to the R2T Sequence number provided by the
* iSCSI TASK_REASSIGN TMR
*/
if (r2t->r2t_sn < tmr_req->exp_data_sn)
continue;
if (r2t->seq_complete) {
pr_err("Initiator is requesting R2Ts from"
" R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
" Length: %u is already complete."
" BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
tmr_req->exp_data_sn, r2t->r2t_sn,
r2t->offset, r2t->xfer_len);
spin_unlock_bh(&cmd->r2t_lock);
return -1;
}
if (r2t->recovery_r2t) {
iscsit_free_r2t(r2t, cmd);
continue;
}
/* DataSequenceInOrder=Yes:
*
* Taking into account the iSCSI implementation requirement of
* MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
* DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
* entire transfer length from the commands R2T offset marker.
*
* DataSequenceInOrder=No:
*
* We subtract the difference from struct iscsi_seq between the
* current offset and original offset from cmd->write_data_done
* for account for DataOUT PDUs already received. Then reset
* the current offset to the original and zero out the current
* burst length, to make sure we re-request the entire DataOUT
* sequence.
*/
if (conn->sess->sess_ops->DataSequenceInOrder)
cmd->r2t_offset -= r2t->xfer_len;
else
cmd->seq_send_order--;
cmd->outstanding_r2ts--;
iscsit_free_r2t(r2t, cmd);
}
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
/*
* Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
* a given struct iscsi_cmd.
*/
int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0;
if (ref_cmd->data_direction == DMA_NONE)
return 0;
/*
* For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
* of DataIN the Initiator is expecting.
*
* Also check that the Initiator is not re-requesting DataIN that has
* already been acknowledged with a DataAck SNACK.
*/
if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
pr_err("Received ExpDataSN: 0x%08x for READ"
" in TMR TASK_REASSIGN greater than command's"
" DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
ref_cmd->data_sn);
return -1;
}
if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
(tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
pr_err("Received ExpDataSN: 0x%08x for READ"
" in TMR TASK_REASSIGN for previously"
" acknowledged DataIN: 0x%08x,"
" protocol error\n", tmr_req->exp_data_sn,
ref_cmd->acked_data_sn);
return -1;
}
return iscsit_task_reassign_prepare_read(tmr_req, conn);
}
/*
* For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
* for R2Ts the Initiator is expecting.
*
* Do the magic in iscsit_task_reassign_prepare_write().
*/
if (ref_cmd->data_direction == DMA_TO_DEVICE) {
if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
pr_err("Received ExpDataSN: 0x%08x for WRITE"
" in TMR TASK_REASSIGN greater than command's"
" R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
ref_cmd->r2t_sn);
return -1;
}
return iscsit_task_reassign_prepare_write(tmr_req, conn);
}
pr_err("Unknown iSCSI data_direction: 0x%02x\n",
ref_cmd->data_direction);
return -1;
}

View file

@ -0,0 +1,14 @@
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
unsigned char *);
extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
unsigned char *);
extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
struct iscsi_conn *);
#endif /* ISCSI_TARGET_TMR_H */

View file

@ -0,0 +1,881 @@
/*******************************************************************************
* This file contains iSCSI Target Portal Group related functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
#include "iscsi_target_nodeattrib.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_parameters.h"
#include <target/iscsi/iscsi_transport.h>
struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
{
struct iscsi_portal_group *tpg;
tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct iscsi_portal_group\n");
return NULL;
}
tpg->tpgt = tpgt;
tpg->tpg_state = TPG_STATE_FREE;
tpg->tpg_tiqn = tiqn;
INIT_LIST_HEAD(&tpg->tpg_gnp_list);
INIT_LIST_HEAD(&tpg->tpg_list);
mutex_init(&tpg->tpg_access_lock);
sema_init(&tpg->np_login_sem, 1);
spin_lock_init(&tpg->tpg_state_lock);
spin_lock_init(&tpg->tpg_np_lock);
return tpg;
}
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
int iscsit_load_discovery_tpg(void)
{
struct iscsi_param *param;
struct iscsi_portal_group *tpg;
int ret;
tpg = iscsit_alloc_portal_group(NULL, 1);
if (!tpg) {
pr_err("Unable to allocate struct iscsi_portal_group\n");
return -1;
}
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
NULL, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);
return -1;
}
tpg->sid = 1; /* First Assigned LIO Session ID */
iscsit_set_default_tpg_attribs(tpg);
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto out;
/*
* By default we disable authentication for discovery sessions,
* this can be changed with:
*
* /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
goto out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
goto out;
tpg->tpg_attrib.authentication = 0;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
iscsit_global->discovery_tpg = tpg;
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
return -1;
}
void iscsit_release_discovery_tpg(void)
{
struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
if (!tpg)
return;
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
iscsit_global->discovery_tpg = NULL;
}
struct iscsi_portal_group *iscsit_get_tpg_from_np(
struct iscsi_tiqn *tiqn,
struct iscsi_np *np,
struct iscsi_tpg_np **tpg_np_out)
{
struct iscsi_portal_group *tpg = NULL;
struct iscsi_tpg_np *tpg_np;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (tpg_np->tpg_np == np) {
*tpg_np_out = tpg_np;
kref_get(&tpg_np->tpg_np_kref);
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
return tpg;
}
}
spin_unlock(&tpg->tpg_np_lock);
}
spin_unlock(&tiqn->tiqn_tpg_lock);
return NULL;
}
int iscsit_get_tpg(
struct iscsi_portal_group *tpg)
{
int ret;
ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
return ((ret != 0) || signal_pending(current)) ? -1 : 0;
}
void iscsit_put_tpg(struct iscsi_portal_group *tpg)
{
mutex_unlock(&tpg->tpg_access_lock);
}
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
if (shutdown)
tpg_np->tpg_np->enabled = false;
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
static void iscsit_clear_tpg_np_login_threads(
struct iscsi_portal_group *tpg,
bool shutdown)
{
struct iscsi_tpg_np *tpg_np;
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
continue;
}
spin_unlock(&tpg->tpg_np_lock);
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
}
void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
{
iscsi_print_params(tpg->param_list);
}
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
a->authentication = TA_AUTHENTICATION;
a->login_timeout = TA_LOGIN_TIMEOUT;
a->netif_timeout = TA_NETIF_TIMEOUT;
a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
a->generate_node_acls = TA_GENERATE_NODE_ACLS;
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
a->default_erl = TA_DEFAULT_ERL;
a->t10_pi = TA_DEFAULT_T10_PI;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
{
if (tpg->tpg_state != TPG_STATE_FREE) {
pr_err("Unable to add iSCSI Target Portal Group: %d"
" while not in TPG_STATE_FREE state.\n", tpg->tpgt);
return -EEXIST;
}
iscsit_set_default_tpg_attribs(tpg);
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
tiqn->tiqn_ntpgs++;
pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
tiqn->tiqn, tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
err_out:
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
kfree(tpg);
return -ENOMEM;
}
int iscsit_tpg_del_portal_group(
struct iscsi_tiqn *tiqn,
struct iscsi_portal_group *tpg,
int force)
{
u8 old_state = tpg->tpg_state;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
pr_err("Unable to delete iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
tpg->tpgt);
tpg->tpg_state = old_state;
return -EPERM;
}
core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
core_tpg_deregister(&tpg->tpg_se_tpg);
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_FREE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_ntpgs--;
list_del(&tpg->tpg_list);
spin_unlock(&tiqn->tiqn_tpg_lock);
pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
tiqn->tiqn, tpg->tpgt);
kfree(tpg);
return 0;
}
int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{
struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
int ret;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
pr_err("iSCSI target portal group: %hu is already"
" active, ignoring request.\n", tpg->tpgt);
spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
/*
* Make sure that AuthMethod does not contain None as an option
* unless explictly disabled. Set the default to CHAP if authentication
* is enforced (as per default), and remove the NONE option.
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param) {
spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
ret = iscsi_update_param_value(param, CHAP);
if (ret)
goto err;
}
ret = iscsit_ta_authentication(tpg, 1);
if (ret < 0)
goto err;
}
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_active_tpgs++;
pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
err:
spin_unlock(&tpg->tpg_state_lock);
return ret;
}
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
{
struct iscsi_tiqn *tiqn;
u8 old_state = tpg->tpg_state;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_INACTIVE) {
pr_err("iSCSI Target Portal Group: %hu is already"
" inactive, ignoring request.\n", tpg->tpgt);
spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
iscsit_clear_tpg_np_login_threads(tpg, false);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = old_state;
spin_unlock(&tpg->tpg_state_lock);
pr_err("Unable to disable iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n",
tpg->tpgt);
return -EPERM;
}
tiqn = tpg->tpg_tiqn;
if (!tiqn || (tpg == iscsit_global->discovery_tpg))
return 0;
spin_lock(&tiqn->tiqn_tpg_lock);
tiqn->tiqn_active_tpgs--;
pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
tpg->tpgt);
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
}
struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
struct iscsi_session *sess)
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
se_node_acl);
return &acl->node_attrib;
}
struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
struct iscsi_tpg_np *tpg_np,
int network_transport)
{
struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
spin_lock(&tpg_np->tpg_np_parent_lock);
list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
&tpg_np->tpg_np_parent_list, tpg_np_child_list) {
if (tpg_np_child->tpg_np->np_network_transport ==
network_transport) {
spin_unlock(&tpg_np->tpg_np_parent_lock);
return tpg_np_child;
}
}
spin_unlock(&tpg_np->tpg_np_parent_lock);
return NULL;
}
static bool iscsit_tpg_check_network_portal(
struct iscsi_tiqn *tiqn,
struct __kernel_sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
struct iscsi_np *np;
bool match = false;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
np = tpg_np->tpg_np;
match = iscsit_check_np_match(sockaddr, np,
network_transport);
if (match)
break;
}
spin_unlock(&tpg->tpg_np_lock);
}
spin_unlock(&tiqn->tiqn_tpg_lock);
return match;
}
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
struct __kernel_sockaddr_storage *sockaddr,
char *ip_str,
struct iscsi_tpg_np *tpg_np_parent,
int network_transport)
{
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np;
if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
network_transport)) {
pr_err("Network Portal: %s already exists on a"
" different TPG on %s\n", ip_str,
tpg->tpg_tiqn->tiqn);
return ERR_PTR(-EEXIST);
}
}
tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
if (!tpg_np) {
pr_err("Unable to allocate memory for"
" struct iscsi_tpg_np.\n");
return ERR_PTR(-ENOMEM);
}
np = iscsit_add_np(sockaddr, ip_str, network_transport);
if (IS_ERR(np)) {
kfree(tpg_np);
return ERR_CAST(np);
}
INIT_LIST_HEAD(&tpg_np->tpg_np_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
spin_lock_init(&tpg_np->tpg_np_parent_lock);
init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
np->tpg_np = tpg_np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
tpg->num_tpg_nps++;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_num_tpg_nps++;
spin_unlock(&tpg->tpg_np_lock);
if (tpg_np_parent) {
tpg_np->tpg_np_parent = tpg_np_parent;
spin_lock(&tpg_np_parent->tpg_np_parent_lock);
list_add_tail(&tpg_np->tpg_np_child_list,
&tpg_np_parent->tpg_np_parent_list);
spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
}
pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
np->np_transport->name);
return tpg_np;
}
static int iscsit_tpg_release_np(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
np->np_transport->name);
tpg_np->tpg_np = NULL;
tpg_np->tpg = NULL;
kfree(tpg_np);
/*
* iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
*/
return iscsit_del_np(np);
}
int iscsit_tpg_del_network_portal(
struct iscsi_portal_group *tpg,
struct iscsi_tpg_np *tpg_np)
{
struct iscsi_np *np;
struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
int ret = 0;
np = tpg_np->tpg_np;
if (!np) {
pr_err("Unable to locate struct iscsi_np from"
" struct iscsi_tpg_np\n");
return -EINVAL;
}
if (!tpg_np->tpg_np_parent) {
/*
* We are the parent tpg network portal. Release all of the
* child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
* list first.
*/
list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
&tpg_np->tpg_np_parent_list,
tpg_np_child_list) {
ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
if (ret < 0)
pr_err("iscsit_tpg_del_network_portal()"
" failed: %d\n", ret);
}
} else {
/*
* We are not the parent ISCSI_TCP tpg network portal. Release
* our own network portals from the child list.
*/
spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
list_del(&tpg_np->tpg_np_child_list);
spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
}
spin_lock(&tpg->tpg_np_lock);
list_del(&tpg_np->tpg_np_list);
tpg->num_tpg_nps--;
if (tpg->tpg_tiqn)
tpg->tpg_tiqn->tiqn_num_tpg_nps--;
spin_unlock(&tpg->tpg_np_lock);
return iscsit_tpg_release_np(tpg_np, tpg, np);
}
int iscsit_tpg_set_initiator_node_queue_depth(
struct iscsi_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
{
return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
initiatorname, queue_depth, force);
}
int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
{
unsigned char buf1[256], buf2[256], *none = NULL;
int len;
struct iscsi_param *param;
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication);
return -EINVAL;
}
memset(buf1, 0, sizeof(buf1));
memset(buf2, 0, sizeof(buf2));
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
return -EINVAL;
if (authentication) {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
if (!none)
goto out;
if (!strncmp(none + 4, ",", 1)) {
if (!strcmp(buf1, none))
sprintf(buf2, "%s", none+5);
else {
none--;
*none = '\0';
len = sprintf(buf2, "%s", buf1);
none += 5;
sprintf(buf2 + len, "%s", none);
}
} else {
none--;
*none = '\0';
sprintf(buf2, "%s", buf1);
}
if (iscsi_update_param_value(param, buf2) < 0)
return -EINVAL;
} else {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
if (none)
goto out;
strncat(buf1, ",", strlen(","));
strncat(buf1, NONE, strlen(NONE));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
out:
a->authentication = authentication;
pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
return 0;
}
int iscsit_ta_login_timeout(
struct iscsi_portal_group *tpg,
u32 login_timeout)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
pr_err("Requested Login Timeout %u larger than maximum"
" %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
return -EINVAL;
} else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
pr_err("Requested Logout Timeout %u smaller than"
" minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
return -EINVAL;
}
a->login_timeout = login_timeout;
pr_debug("Set Logout Timeout to %u for Target Portal Group"
" %hu\n", a->login_timeout, tpg->tpgt);
return 0;
}
int iscsit_ta_netif_timeout(
struct iscsi_portal_group *tpg,
u32 netif_timeout)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
pr_err("Requested Network Interface Timeout %u larger"
" than maximum %u\n", netif_timeout,
TA_NETIF_TIMEOUT_MAX);
return -EINVAL;
} else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
pr_err("Requested Network Interface Timeout %u smaller"
" than minimum %u\n", netif_timeout,
TA_NETIF_TIMEOUT_MIN);
return -EINVAL;
}
a->netif_timeout = netif_timeout;
pr_debug("Set Network Interface Timeout to %u for"
" Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
return 0;
}
int iscsit_ta_generate_node_acls(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->generate_node_acls = flag;
pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
if (flag == 1 && a->cache_dynamic_acls == 0) {
pr_debug("Explicitly setting cache_dynamic_acls=1 when "
"generate_node_acls=1\n");
a->cache_dynamic_acls = 1;
}
return 0;
}
int iscsit_ta_default_cmdsn_depth(
struct iscsi_portal_group *tpg,
u32 tcq_depth)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
pr_err("Requested Default Queue Depth: %u larger"
" than maximum %u\n", tcq_depth,
TA_DEFAULT_CMDSN_DEPTH_MAX);
return -EINVAL;
} else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
pr_err("Requested Default Queue Depth: %u smaller"
" than minimum %u\n", tcq_depth,
TA_DEFAULT_CMDSN_DEPTH_MIN);
return -EINVAL;
}
a->default_cmdsn_depth = tcq_depth;
pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
tpg->tpgt, a->default_cmdsn_depth);
return 0;
}
int iscsit_ta_cache_dynamic_acls(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (a->generate_node_acls == 1 && flag == 0) {
pr_debug("Skipping cache_dynamic_acls=0 when"
" generate_node_acls=1\n");
return 0;
}
a->cache_dynamic_acls = flag;
pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
"Enabled" : "Disabled");
return 0;
}
int iscsit_ta_demo_mode_write_protect(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->demo_mode_write_protect = flag;
pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
return 0;
}
int iscsit_ta_prod_mode_write_protect(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->prod_mode_write_protect = flag;
pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
" %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
"ON" : "OFF");
return 0;
}
int iscsit_ta_demo_mode_discovery(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->demo_mode_discovery = flag;
pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
" %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
"ON" : "OFF");
return 0;
}
int iscsit_ta_default_erl(
struct iscsi_portal_group *tpg,
u32 default_erl)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
pr_err("Illegal value for default_erl: %u\n", default_erl);
return -EINVAL;
}
a->default_erl = default_erl;
pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
return 0;
}
int iscsit_ta_t10_pi(
struct iscsi_portal_group *tpg,
u32 flag)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
a->t10_pi = flag;
pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
" %s\n", tpg->tpgt, (a->t10_pi) ?
"ON" : "OFF");
return 0;
}

View file

@ -0,0 +1,43 @@
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
int);
extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
struct iscsi_portal_group *, const char *, u32);
extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
struct se_node_acl *);
extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
int);
extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
unsigned char *, u32, int);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */

View file

@ -0,0 +1,495 @@
/*******************************************************************************
* This file contains the iSCSI Login Thread and Thread Queue functions.
*
* (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include "iscsi_target_core.h"
#include "iscsi_target_tq.h"
#include "iscsi_target.h"
static LIST_HEAD(inactive_ts_list);
static DEFINE_SPINLOCK(inactive_ts_lock);
static DEFINE_SPINLOCK(ts_bitmap_lock);
static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
{
if (!list_empty(&ts->ts_list)) {
WARN_ON(1);
return;
}
spin_lock(&inactive_ts_lock);
list_add_tail(&ts->ts_list, &inactive_ts_list);
iscsit_global->inactive_ts++;
spin_unlock(&inactive_ts_lock);
}
static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
{
struct iscsi_thread_set *ts;
spin_lock(&inactive_ts_lock);
if (list_empty(&inactive_ts_list)) {
spin_unlock(&inactive_ts_lock);
return NULL;
}
ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del_init(&ts->ts_list);
iscsit_global->inactive_ts--;
spin_unlock(&inactive_ts_lock);
return ts;
}
int iscsi_allocate_thread_sets(u32 thread_pair_count)
{
int allocated_thread_pair_count = 0, i, thread_id;
struct iscsi_thread_set *ts = NULL;
for (i = 0; i < thread_pair_count; i++) {
ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
if (!ts) {
pr_err("Unable to allocate memory for"
" thread set.\n");
return allocated_thread_pair_count;
}
/*
* Locate the next available regision in the thread_set_bitmap
*/
spin_lock(&ts_bitmap_lock);
thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
iscsit_global->ts_bitmap_count, get_order(1));
spin_unlock(&ts_bitmap_lock);
if (thread_id < 0) {
pr_err("bitmap_find_free_region() failed for"
" thread_set_bitmap\n");
kfree(ts);
return allocated_thread_pair_count;
}
ts->thread_id = thread_id;
ts->status = ISCSI_THREAD_SET_FREE;
INIT_LIST_HEAD(&ts->ts_list);
spin_lock_init(&ts->ts_state_lock);
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
init_completion(&ts->rx_start_comp);
init_completion(&ts->tx_start_comp);
sema_init(&ts->ts_activate_sem, 0);
ts->create_threads = 1;
ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
ISCSI_TX_THREAD_NAME);
if (IS_ERR(ts->tx_thread)) {
dump_stack();
pr_err("Unable to start iscsi_target_tx_thread\n");
break;
}
ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
ISCSI_RX_THREAD_NAME);
if (IS_ERR(ts->rx_thread)) {
kthread_stop(ts->tx_thread);
pr_err("Unable to start iscsi_target_rx_thread\n");
break;
}
ts->create_threads = 0;
iscsi_add_ts_to_inactive_list(ts);
allocated_thread_pair_count++;
}
pr_debug("Spawned %d thread set(s) (%d total threads).\n",
allocated_thread_pair_count, allocated_thread_pair_count * 2);
return allocated_thread_pair_count;
}
static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
{
spin_lock_bh(&ts->ts_state_lock);
ts->status = ISCSI_THREAD_SET_DIE;
if (ts->rx_thread) {
complete(&ts->rx_start_comp);
spin_unlock_bh(&ts->ts_state_lock);
kthread_stop(ts->rx_thread);
spin_lock_bh(&ts->ts_state_lock);
}
if (ts->tx_thread) {
complete(&ts->tx_start_comp);
spin_unlock_bh(&ts->ts_state_lock);
kthread_stop(ts->tx_thread);
spin_lock_bh(&ts->ts_state_lock);
}
spin_unlock_bh(&ts->ts_state_lock);
/*
* Release this thread_id in the thread_set_bitmap
*/
spin_lock(&ts_bitmap_lock);
bitmap_release_region(iscsit_global->ts_bitmap,
ts->thread_id, get_order(1));
spin_unlock(&ts_bitmap_lock);
kfree(ts);
}
void iscsi_deallocate_thread_sets(void)
{
struct iscsi_thread_set *ts = NULL;
u32 released_count = 0;
while ((ts = iscsi_get_ts_from_inactive_list())) {
iscsi_deallocate_thread_one(ts);
released_count++;
}
if (released_count)
pr_debug("Stopped %d thread set(s) (%d total threads)."
"\n", released_count, released_count * 2);
}
static void iscsi_deallocate_extra_thread_sets(void)
{
u32 orig_count, released_count = 0;
struct iscsi_thread_set *ts = NULL;
orig_count = TARGET_THREAD_SET_COUNT;
while ((iscsit_global->inactive_ts + 1) > orig_count) {
ts = iscsi_get_ts_from_inactive_list();
if (!ts)
break;
iscsi_deallocate_thread_one(ts);
released_count++;
}
if (released_count)
pr_debug("Stopped %d thread set(s) (%d total threads)."
"\n", released_count, released_count * 2);
}
void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
{
spin_lock_bh(&ts->ts_state_lock);
conn->thread_set = ts;
ts->conn = conn;
ts->status = ISCSI_THREAD_SET_ACTIVE;
spin_unlock_bh(&ts->ts_state_lock);
complete(&ts->rx_start_comp);
complete(&ts->tx_start_comp);
down(&ts->ts_activate_sem);
}
struct iscsi_thread_set *iscsi_get_thread_set(void)
{
struct iscsi_thread_set *ts;
get_set:
ts = iscsi_get_ts_from_inactive_list();
if (!ts) {
iscsi_allocate_thread_sets(1);
goto get_set;
}
ts->delay_inactive = 1;
ts->signal_sent = 0;
ts->thread_count = 2;
init_completion(&ts->rx_restart_comp);
init_completion(&ts->tx_restart_comp);
sema_init(&ts->ts_activate_sem, 0);
return ts;
}
void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
{
struct iscsi_thread_set *ts = NULL;
if (!conn->thread_set) {
pr_err("struct iscsi_conn->thread_set is NULL\n");
return;
}
ts = conn->thread_set;
spin_lock_bh(&ts->ts_state_lock);
ts->thread_clear &= ~thread_clear;
if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
(ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
complete(&ts->rx_restart_comp);
else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
(ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
complete(&ts->tx_restart_comp);
spin_unlock_bh(&ts->ts_state_lock);
}
void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
{
struct iscsi_thread_set *ts = NULL;
if (!conn->thread_set) {
pr_err("struct iscsi_conn->thread_set is NULL\n");
return;
}
ts = conn->thread_set;
spin_lock_bh(&ts->ts_state_lock);
ts->signal_sent |= signal_sent;
spin_unlock_bh(&ts->ts_state_lock);
}
int iscsi_release_thread_set(struct iscsi_conn *conn)
{
int thread_called = 0;
struct iscsi_thread_set *ts = NULL;
if (!conn || !conn->thread_set) {
pr_err("connection or thread set pointer is NULL\n");
BUG();
}
ts = conn->thread_set;
spin_lock_bh(&ts->ts_state_lock);
ts->status = ISCSI_THREAD_SET_RESET;
if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
strlen(ISCSI_RX_THREAD_NAME)))
thread_called = ISCSI_RX_THREAD;
else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
strlen(ISCSI_TX_THREAD_NAME)))
thread_called = ISCSI_TX_THREAD;
if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
(ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
send_sig(SIGINT, ts->rx_thread, 1);
ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
}
ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
wait_for_completion(&ts->rx_restart_comp);
spin_lock_bh(&ts->ts_state_lock);
ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
}
if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
(ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
send_sig(SIGINT, ts->tx_thread, 1);
ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
}
ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
wait_for_completion(&ts->tx_restart_comp);
spin_lock_bh(&ts->ts_state_lock);
ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
}
ts->conn = NULL;
ts->status = ISCSI_THREAD_SET_FREE;
spin_unlock_bh(&ts->ts_state_lock);
return 0;
}
int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
{
struct iscsi_thread_set *ts;
if (!conn->thread_set)
return -1;
ts = conn->thread_set;
spin_lock_bh(&ts->ts_state_lock);
if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
spin_unlock_bh(&ts->ts_state_lock);
return -1;
}
if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
send_sig(SIGINT, ts->tx_thread, 1);
ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
}
if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
send_sig(SIGINT, ts->rx_thread, 1);
ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
}
spin_unlock_bh(&ts->ts_state_lock);
return 0;
}
static void iscsi_check_to_add_additional_sets(void)
{
int thread_sets_add;
spin_lock(&inactive_ts_lock);
thread_sets_add = iscsit_global->inactive_ts;
spin_unlock(&inactive_ts_lock);
if (thread_sets_add == 1)
iscsi_allocate_thread_sets(1);
}
static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
{
spin_lock_bh(&ts->ts_state_lock);
if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
signal_pending(current)) {
spin_unlock_bh(&ts->ts_state_lock);
return -1;
}
spin_unlock_bh(&ts->ts_state_lock);
return 0;
}
struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
{
int ret;
spin_lock_bh(&ts->ts_state_lock);
if (ts->create_threads) {
spin_unlock_bh(&ts->ts_state_lock);
goto sleep;
}
if (ts->status != ISCSI_THREAD_SET_DIE)
flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets();
iscsi_add_ts_to_inactive_list(ts);
spin_lock_bh(&ts->ts_state_lock);
}
if ((ts->status == ISCSI_THREAD_SET_RESET) &&
(ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
complete(&ts->rx_restart_comp);
ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
sleep:
ret = wait_for_completion_interruptible(&ts->rx_start_comp);
if (ret != 0)
return NULL;
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
iscsi_check_to_add_additional_sets();
spin_lock_bh(&ts->ts_state_lock);
if (!ts->conn) {
pr_err("struct iscsi_thread_set->conn is NULL for"
" RX thread_id: %s/%d\n", current->comm, current->pid);
spin_unlock_bh(&ts->ts_state_lock);
return NULL;
}
ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
up(&ts->ts_activate_sem);
return ts->conn;
}
struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
{
int ret;
spin_lock_bh(&ts->ts_state_lock);
if (ts->create_threads) {
spin_unlock_bh(&ts->ts_state_lock);
goto sleep;
}
if (ts->status != ISCSI_THREAD_SET_DIE)
flush_signals(current);
if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets();
iscsi_add_ts_to_inactive_list(ts);
spin_lock_bh(&ts->ts_state_lock);
}
if ((ts->status == ISCSI_THREAD_SET_RESET) &&
(ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
complete(&ts->tx_restart_comp);
ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
sleep:
ret = wait_for_completion_interruptible(&ts->tx_start_comp);
if (ret != 0)
return NULL;
if (iscsi_signal_thread_pre_handler(ts) < 0)
return NULL;
iscsi_check_to_add_additional_sets();
spin_lock_bh(&ts->ts_state_lock);
if (!ts->conn) {
pr_err("struct iscsi_thread_set->conn is NULL for"
" TX thread_id: %s/%d\n", current->comm, current->pid);
spin_unlock_bh(&ts->ts_state_lock);
return NULL;
}
ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
spin_unlock_bh(&ts->ts_state_lock);
up(&ts->ts_activate_sem);
return ts->conn;
}
int iscsi_thread_set_init(void)
{
int size;
iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
if (!iscsit_global->ts_bitmap) {
pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
return -ENOMEM;
}
return 0;
}
void iscsi_thread_set_free(void)
{
kfree(iscsit_global->ts_bitmap);
}

View file

@ -0,0 +1,84 @@
#ifndef ISCSI_THREAD_QUEUE_H
#define ISCSI_THREAD_QUEUE_H
/*
* Defines for thread sets.
*/
extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
extern int iscsi_allocate_thread_sets(u32);
extern void iscsi_deallocate_thread_sets(void);
extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
extern struct iscsi_thread_set *iscsi_get_thread_set(void);
extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
extern int iscsi_release_thread_set(struct iscsi_conn *);
extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
extern int iscsi_thread_set_init(void);
extern void iscsi_thread_set_free(void);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
#define TARGET_THREAD_SET_COUNT 4
#define ISCSI_RX_THREAD 1
#define ISCSI_TX_THREAD 2
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
#define ISCSI_BLOCK_RX_THREAD 0x1
#define ISCSI_BLOCK_TX_THREAD 0x2
#define ISCSI_CLEAR_RX_THREAD 0x1
#define ISCSI_CLEAR_TX_THREAD 0x2
#define ISCSI_SIGNAL_RX_THREAD 0x1
#define ISCSI_SIGNAL_TX_THREAD 0x2
/* struct iscsi_thread_set->status */
#define ISCSI_THREAD_SET_FREE 1
#define ISCSI_THREAD_SET_ACTIVE 2
#define ISCSI_THREAD_SET_DIE 3
#define ISCSI_THREAD_SET_RESET 4
#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
/* By default allow a maximum of 32K iSCSI connections */
#define ISCSI_TS_BITMAP_BITS 32768
struct iscsi_thread_set {
/* flags used for blocking and restarting sets */
int blocked_threads;
/* flag for creating threads */
int create_threads;
/* flag for delaying readding to inactive list */
int delay_inactive;
/* status for thread set */
int status;
/* which threads have had signals sent */
int signal_sent;
/* flag for which threads exited first */
int thread_clear;
/* Active threads in the thread set */
int thread_count;
/* Unique thread ID */
u32 thread_id;
/* pointer to connection if set is active */
struct iscsi_conn *conn;
/* used for controlling ts state accesses */
spinlock_t ts_state_lock;
/* used for restarting thread queue */
struct completion rx_restart_comp;
/* used for restarting thread queue */
struct completion tx_restart_comp;
/* used for normal unused blocking */
struct completion rx_start_comp;
/* used for normal unused blocking */
struct completion tx_start_comp;
/* OS descriptor for rx thread */
struct task_struct *rx_thread;
/* OS descriptor for tx thread */
struct task_struct *tx_thread;
/* struct iscsi_thread_set in list list head*/
struct list_head ts_list;
struct semaphore ts_activate_sem;
};
#endif /*** ISCSI_THREAD_QUEUE_H ***/

View file

@ -0,0 +1,55 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
static DEFINE_MUTEX(transport_mutex);
struct iscsit_transport *iscsit_get_transport(int type)
{
struct iscsit_transport *t;
mutex_lock(&transport_mutex);
list_for_each_entry(t, &g_transport_list, t_node) {
if (t->transport_type == type) {
if (t->owner && !try_module_get(t->owner)) {
t = NULL;
}
mutex_unlock(&transport_mutex);
return t;
}
}
mutex_unlock(&transport_mutex);
return NULL;
}
void iscsit_put_transport(struct iscsit_transport *t)
{
if (t->owner)
module_put(t->owner);
}
int iscsit_register_transport(struct iscsit_transport *t)
{
INIT_LIST_HEAD(&t->t_node);
mutex_lock(&transport_mutex);
list_add_tail(&t->t_node, &g_transport_list);
mutex_unlock(&transport_mutex);
pr_debug("Registered iSCSI transport: %s\n", t->name);
return 0;
}
EXPORT_SYMBOL(iscsit_register_transport);
void iscsit_unregister_transport(struct iscsit_transport *t)
{
mutex_lock(&transport_mutex);
list_del(&t->t_node);
mutex_unlock(&transport_mutex);
pr_debug("Unregistered iSCSI transport: %s\n", t->name);
}
EXPORT_SYMBOL(iscsit_unregister_transport);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
#define MARKER_SIZE 8
extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unsigned char * ,__be32 cmdsn);
extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
itt_t, u32);
extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
struct iscsi_conn_recovery **, itt_t);
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
extern void iscsit_start_nopin_timer(struct iscsi_conn *);
extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
extern void iscsit_print_session_params(struct iscsi_session *);
extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
#endif /*** ISCSI_TARGET_UTIL_H ***/

View file

@ -0,0 +1,5 @@
config LOOPBACK_TARGET
tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.

View file

@ -0,0 +1 @@
obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
/*
* Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
*/
#define TL_SCSI_MAX_CMD_LEN 32
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
/* Tagged command queueing */
u32 sc_cmd_tag;
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
};
struct tcm_loop_tmr {
atomic_t tmr_complete;
wait_queue_head_t tl_tmr_wait;
};
struct tcm_loop_nexus {
/*
* Pointer to TCM session for I_T Nexus
*/
struct se_session *se_sess;
};
struct tcm_loop_nacl {
struct se_node_acl se_node_acl;
};
#define TCM_TRANSPORT_ONLINE 0
#define TCM_TRANSPORT_OFFLINE 1
struct tcm_loop_tpg {
unsigned short tl_tpgt;
unsigned short tl_transport_status;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
};
struct tcm_loop_hba {
u8 tl_proto_id;
unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
struct device dev;
struct Scsi_Host *sh;
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
struct se_wwn tl_hba_wwn;
};

View file

@ -0,0 +1,11 @@
config SBP_TARGET
tristate "FireWire SBP-2 fabric module"
depends on FIREWIRE
help
Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire
bus, for example hard disks. Similar to FireWire Target Disk mode on
many Apple computers.
To compile this driver as a module, say M here: The module will be
called sbp-target.

View file

@ -0,0 +1 @@
obj-$(CONFIG_SBP_TARGET) += sbp_target.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,251 @@
#ifndef _SBP_BASE_H
#define _SBP_BASE_H
#include <linux/firewire.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <target/target_core_base.h>
#define SBP_VERSION "v0.1"
#define SBP_NAMELEN 32
#define SBP_ORB_FETCH_SIZE 8
#define MANAGEMENT_AGENT_STATE_IDLE 0
#define MANAGEMENT_AGENT_STATE_BUSY 1
#define ORB_NOTIFY(v) (((v) >> 31) & 0x01)
#define ORB_REQUEST_FORMAT(v) (((v) >> 29) & 0x03)
#define MANAGEMENT_ORB_FUNCTION(v) (((v) >> 16) & 0x0f)
#define MANAGEMENT_ORB_FUNCTION_LOGIN 0x0
#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS 0x1
#define MANAGEMENT_ORB_FUNCTION_RECONNECT 0x3
#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD 0x4
#define MANAGEMENT_ORB_FUNCTION_LOGOUT 0x7
#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK 0xb
#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET 0xc
#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET 0xe
#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET 0xf
#define LOGIN_ORB_EXCLUSIVE(v) (((v) >> 28) & 0x01)
#define LOGIN_ORB_RESERVED(v) (((v) >> 24) & 0x0f)
#define LOGIN_ORB_RECONNECT(v) (((v) >> 20) & 0x0f)
#define LOGIN_ORB_LUN(v) (((v) >> 0) & 0xffff)
#define LOGIN_ORB_PASSWORD_LENGTH(v) (((v) >> 16) & 0xffff)
#define LOGIN_ORB_RESPONSE_LENGTH(v) (((v) >> 0) & 0xffff)
#define RECONNECT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
#define LOGOUT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
#define CMDBLK_ORB_DIRECTION(v) (((v) >> 27) & 0x01)
#define CMDBLK_ORB_SPEED(v) (((v) >> 24) & 0x07)
#define CMDBLK_ORB_MAX_PAYLOAD(v) (((v) >> 20) & 0x0f)
#define CMDBLK_ORB_PG_TBL_PRESENT(v) (((v) >> 19) & 0x01)
#define CMDBLK_ORB_PG_SIZE(v) (((v) >> 16) & 0x07)
#define CMDBLK_ORB_DATA_SIZE(v) (((v) >> 0) & 0xffff)
#define STATUS_BLOCK_SRC(v) (((v) & 0x03) << 30)
#define STATUS_BLOCK_RESP(v) (((v) & 0x03) << 28)
#define STATUS_BLOCK_DEAD(v) (((v) ? 1 : 0) << 27)
#define STATUS_BLOCK_LEN(v) (((v) & 0x07) << 24)
#define STATUS_BLOCK_SBP_STATUS(v) (((v) & 0xff) << 16)
#define STATUS_BLOCK_ORB_OFFSET_HIGH(v) (((v) & 0xffff) << 0)
#define STATUS_SRC_ORB_CONTINUING 0
#define STATUS_SRC_ORB_FINISHED 1
#define STATUS_SRC_UNSOLICITED 2
#define STATUS_RESP_REQUEST_COMPLETE 0
#define STATUS_RESP_TRANSPORT_FAILURE 1
#define STATUS_RESP_ILLEGAL_REQUEST 2
#define STATUS_RESP_VENDOR_DEPENDENT 3
#define SBP_STATUS_OK 0
#define SBP_STATUS_REQ_TYPE_NOTSUPP 1
#define SBP_STATUS_SPEED_NOTSUPP 2
#define SBP_STATUS_PAGE_SIZE_NOTSUPP 3
#define SBP_STATUS_ACCESS_DENIED 4
#define SBP_STATUS_LUN_NOTSUPP 5
#define SBP_STATUS_PAYLOAD_TOO_SMALL 6
/* 7 is reserved */
#define SBP_STATUS_RESOURCES_UNAVAIL 8
#define SBP_STATUS_FUNCTION_REJECTED 9
#define SBP_STATUS_LOGIN_ID_UNKNOWN 10
#define SBP_STATUS_DUMMY_ORB_COMPLETE 11
#define SBP_STATUS_REQUEST_ABORTED 12
#define SBP_STATUS_UNSPECIFIED_ERROR 0xff
#define AGENT_STATE_RESET 0
#define AGENT_STATE_ACTIVE 1
#define AGENT_STATE_SUSPENDED 2
#define AGENT_STATE_DEAD 3
struct sbp2_pointer {
__be32 high;
__be32 low;
};
struct sbp_command_block_orb {
struct sbp2_pointer next_orb;
struct sbp2_pointer data_descriptor;
__be32 misc;
u8 command_block[12];
};
struct sbp_page_table_entry {
__be16 segment_length;
__be16 segment_base_hi;
__be32 segment_base_lo;
};
struct sbp_management_orb {
struct sbp2_pointer ptr1;
struct sbp2_pointer ptr2;
__be32 misc;
__be32 length;
struct sbp2_pointer status_fifo;
};
struct sbp_status_block {
__be32 status;
__be32 orb_low;
u8 data[24];
};
struct sbp_login_response_block {
__be32 misc;
struct sbp2_pointer command_block_agent;
__be32 reconnect_hold;
};
struct sbp_login_descriptor {
struct sbp_session *sess;
struct list_head link;
struct se_lun *lun;
u64 status_fifo_addr;
int exclusive;
u16 login_id;
struct sbp_target_agent *tgt_agt;
};
struct sbp_session {
spinlock_t lock;
struct se_session *se_sess;
struct list_head login_list;
struct delayed_work maint_work;
u64 guid; /* login_owner_EUI_64 */
int node_id; /* login_owner_ID */
struct fw_card *card;
int generation;
int speed;
int reconnect_hold;
u64 reconnect_expires;
};
struct sbp_nacl {
/* Initiator EUI-64 */
u64 guid;
/* ASCII formatted GUID for SBP Initiator port */
char iport_name[SBP_NAMELEN];
/* Returned by sbp_make_nodeacl() */
struct se_node_acl se_node_acl;
};
struct sbp_tpg {
/* Target portal group tag for TCM */
u16 tport_tpgt;
/* Pointer back to sbp_tport */
struct sbp_tport *tport;
/* Returned by sbp_make_tpg() */
struct se_portal_group se_tpg;
};
struct sbp_tport {
/* Target Unit Identifier (EUI-64) */
u64 guid;
/* Target port name */
char tport_name[SBP_NAMELEN];
/* Returned by sbp_make_tport() */
struct se_wwn tport_wwn;
struct sbp_tpg *tpg;
/* FireWire unit directory */
struct fw_descriptor unit_directory;
/* SBP Management Agent */
struct sbp_management_agent *mgt_agt;
/* Parameters */
int enable;
s32 directory_id;
int mgt_orb_timeout;
int max_reconnect_timeout;
int max_logins_per_lun;
};
static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr)
{
return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 |
(be32_to_cpu(ptr->low) & 0xfffffffc);
}
static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr)
{
ptr->high = cpu_to_be32(addr >> 32);
ptr->low = cpu_to_be32(addr);
}
struct sbp_target_agent {
spinlock_t lock;
struct fw_address_handler handler;
struct sbp_login_descriptor *login;
int state;
struct work_struct work;
u64 orb_pointer;
bool doorbell;
};
struct sbp_target_request {
struct sbp_login_descriptor *login;
u64 orb_pointer;
struct sbp_command_block_orb orb;
struct sbp_status_block status;
struct work_struct work;
struct se_cmd se_cmd;
struct sbp_page_table_entry *pg_tbl;
void *cmd_buf;
unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
};
struct sbp_management_agent {
spinlock_t lock;
struct sbp_tport *tport;
struct fw_address_handler handler;
int state;
struct work_struct work;
u64 orb_offset;
struct sbp_management_request *request;
};
struct sbp_management_request {
struct sbp_management_orb orb;
struct sbp_status_block status;
struct fw_card *card;
int generation;
int node_addr;
int speed;
};
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,161 @@
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
/*
* INQUIRY response data, TPGS Field
*
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
#define TPGS_IMPLICIT_ALUA 0x10
#define TPGS_EXPLICIT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
* from spc4r36j section 6.37 Table 307
*/
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
* from spc4r36j section 6.37 Table 306
*/
#define ALUA_T_SUP 0x80
#define ALUA_O_SUP 0x40
#define ALUA_LBD_SUP 0x10
#define ALUA_U_SUP 0x08
#define ALUA_S_SUP 0x04
#define ALUA_AN_SUP 0x02
#define ALUA_AO_SUP 0x01
/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
#define ASCQ_04H_ALUA_OFFLINE 0x12
/*
* Used as the default for Active/NonOptimized delay (in milliseconds)
* This can also be changed via configfs on a per target port group basis..
*/
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
* Used for implicit and explicit ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
* Used for the recommended application client implicit transition timeout
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_METADATA_PATH_LEN 512
/*
* Used by core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern struct kmem_cache *t10_alua_lba_map_cache;
extern struct kmem_cache *t10_alua_lba_map_mem_cache;
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
struct list_head *, u64, u64);
extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
extern void core_alua_free_lba_map(struct list_head *);
extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
int, int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
extern void core_alua_free_lu_gp_mem(struct se_device *);
extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
struct t10_alua_tg_pt_gp *);
extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
size_t);
extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
extern ssize_t core_alua_store_secondary_status(struct se_lun *,
const char *, size_t);
extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
extern int core_setup_alua(struct se_device *);
extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,451 @@
/*******************************************************************************
* Filename: target_core_fabric_lib.c
*
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
* (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
/*
* Handlers for Serial Attached SCSI (SAS)
*/
u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* Return a SAS Serial SCSI Protocol identifier for loopback operations
* This is defined in section 7.5.1 Table 362 in spc4r17
*/
return 0x6;
}
EXPORT_SYMBOL(sas_get_fabric_proto_ident);
u32 sas_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char *ptr;
int ret;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
buf[0] = 0x06;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
ret = hex2bin(&buf[4], ptr, 8);
if (ret < 0)
pr_debug("sas transport_id: invalid hex string\n");
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id);
u32 sas_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id_len);
/*
* Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
* Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
*/
char *sas_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
* for initiator ports using SCSI over SAS Serial SCSI Protocol
*
* The TransportID for a SAS Initiator Port is of fixed size of
* 24 bytes, and SAS does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[4];
}
EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
/*
* Handlers for Fibre Channel Protocol (FCP)
*/
u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
}
EXPORT_SYMBOL(fc_get_fabric_proto_ident);
u32 fc_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id_len);
u32 fc_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char *ptr;
int i, ret;
u32 off = 8;
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
* From spc4r17, 7.5.4.2 TransportID for initiator ports using
* SCSI over Fibre Channel
*
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
ret = hex2bin(&buf[off++], &ptr[i], 1);
if (ret < 0)
pr_debug("fc transport_id: invalid hex string\n");
i += 2;
}
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id);
char *fc_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* The TransportID for a FC N Port is of fixed size of
* 24 bytes, and FC does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[8];
}
EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
/*
* Handlers for Internet Small Computer Systems Interface (iSCSI)
*/
u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* This value is defined for "Internet SCSI (iSCSI)"
* in spc4r17 section 7.5.1 Table 362
*/
return 0x5;
}
EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
u32 iscsi_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
u32 off = 4, padding = 0;
u16 len = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
* Set PROTOCOL IDENTIFIER to 5h for iSCSI
*/
buf[0] = 0x05;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI.
*
* The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
* shall contain the iSCSI name of an iSCSI initiator node (see
* RFC 3720). The first ISCSI NAME field byte containing an ASCII
* null character terminates the ISCSI NAME field without regard for
* the specified length of the iSCSI TransportID or the contents of
* the ADDITIONAL LENGTH field.
*/
len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
/*
* Add Extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration and *format code == 1
* 1, use iSCSI Initiator port TransportID format.
*
* Otherwise use iSCSI Initiator device TransportID format that
* does not contain the ASCII encoded iSCSI Initiator iSID value
* provied by the iSCSi Initiator during the iSCSI login process.
*/
if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
/*
* Set FORMAT CODE 01b for iSCSI Initiator port TransportID
* format.
*/
buf[0] |= 0x40;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI. Table 390
*
* The SEPARATOR field shall contain the five ASCII
* characters ",i,0x".
*
* The null-terminated, null-padded ISCSI INITIATOR SESSION ID
* field shall contain the iSCSI initiator session identifier
* (see RFC 3720) in the form of ASCII characters that are the
* hexadecimal digits converted from the binary iSCSI initiator
* session identifier value. The first ISCSI INITIATOR SESSION
* ID field byte containing an ASCII null character
*/
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
len += 5;
buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
buf[off+len] = '\0'; off++;
len += 7;
}
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff);
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id);
u32 iscsi_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
u32 len = 0, padding = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
len = strlen(se_nacl->initiatorname);
/*
* Add extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration, use format code:
* 01b: iSCSI Initiator port TransportID format
*
* If there is not an active iSCSI session, use format code:
* 00b: iSCSI Initiator device TransportID format
*/
if (pr_reg->isid_present_at_reg) {
len += 5; /* For ",i,0x" ASCII separator */
len += 7; /* For iSCSI Initiator Session ID + Null terminator */
*format_code = 1;
} else
*format_code = 0;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
char *p;
u32 tid_len, padding;
int i;
u16 add_len;
u8 format_code = (buf[0] & 0xc0);
/*
* Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
*
* TransportID for initiator ports using SCSI over iSCSI,
* from Table 388 -- iSCSI TransportID formats.
*
* 00b Initiator port is identified using the world wide unique
* SCSI device name of the iSCSI initiator
* device containing the initiator port (see table 389).
* 01b Initiator port is identified using the world wide unique
* initiator port identifier (see table 390).10b to 11b
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
/*
* If the caller wants the TransportID Length, we set that value for the
* entire iSCSI Tarnsport ID now.
*/
if (out_tid_len) {
/* The shift works thanks to integer promotion rules */
add_len = (buf[2] << 8) | buf[3];
tid_len = strlen(&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
if (padding != 0)
tid_len += padding;
if ((add_len + 4) != tid_len) {
pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
} else
*out_tid_len = (add_len + 4);
}
/*
* Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
* format.
*/
if (format_code == 0x40) {
p = strstr(&buf[4], ",i,0x");
if (!p) {
pr_err("Unable to locate \",i,0x\" separator"
" for Initiator port identifier: %s\n",
&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" separator */
*port_nexus_ptr = p;
/*
* Go ahead and do the lower case conversion of the received
* 12 ASCII characters representing the ISID in the TransportID
* for comparison against the running iSCSI session's ISID from
* iscsi_target.c:lio_sess_get_initiator_sid()
*/
for (i = 0; i < 12; i++) {
if (isdigit(*p)) {
p++;
continue;
}
*p = tolower(*p);
p++;
}
}
return (char *)&buf[4];
}
EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,54 @@
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
/*
* Limited by the number of iovecs (2048) per vfs_[writev,readv] call
*/
#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
#define FDBD_FORMAT_UNIT_SIZE 2048
struct fd_prot {
unsigned char *prot_buf;
struct scatterlist *prot_sg;
u32 prot_sg_nents;
};
struct fd_dev {
struct se_device dev;
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 fd_dev_id;
/* Number of SG tables in sg_table_array */
u32 fd_table_count;
u32 fd_queue_depth;
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
struct file *fd_prot_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
struct fd_host {
u32 fd_host_dev_id_count;
/* Unique FILEIO Host ID */
u32 fd_host_id;
} ____cacheline_aligned;
#endif /* TARGET_CORE_FILE_H */

View file

@ -0,0 +1,168 @@
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file contains the TCM HBA Transport related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
INIT_LIST_HEAD(&sub_api->sub_api_list);
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_api->name)) {
pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
return -EEXIST;
}
}
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
EXPORT_SYMBOL(transport_subsystem_register);
void transport_subsystem_release(struct se_subsystem_api *sub_api)
{
mutex_lock(&subsystem_mutex);
list_del(&sub_api->sub_api_list);
mutex_unlock(&subsystem_mutex);
}
EXPORT_SYMBOL(transport_subsystem_release);
static struct se_subsystem_api *core_get_backend(const char *sub_name)
{
struct se_subsystem_api *s;
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_name))
goto found;
}
mutex_unlock(&subsystem_mutex);
return NULL;
found:
if (s->owner && !try_module_get(s->owner))
s = NULL;
mutex_unlock(&subsystem_mutex);
return s;
}
struct se_hba *
core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
{
struct se_hba *hba;
int ret = 0;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
goto out_free_hba;
}
ret = hba->transport->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
spin_lock(&hba_lock);
hba->hba_id = hba_id_counter++;
list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
out_module_put:
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
}
int
core_delete_hba(struct se_hba *hba)
{
WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
return 0;
}

View file

@ -0,0 +1,899 @@
/*******************************************************************************
* Filename: target_core_iblock.c
*
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
return container_of(dev, struct iblock_dev, dev);
}
static struct se_subsystem_api iblock_template;
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
return 0;
}
static void iblock_detach_hba(struct se_hba *hba)
{
}
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!ib_dev) {
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return &ib_dev->dev;
}
static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
struct block_device *bd = NULL;
struct blk_integrity *bi;
fmode_t mode;
int ret = -ENOMEM;
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
return -EINVAL;
}
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) {
pr_err("IBLOCK: Unable to create bioset\n");
goto out;
}
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
mode = FMODE_READ|FMODE_EXCL;
if (!ib_dev->ibd_readonly)
mode |= FMODE_WRITE;
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto out_free_bioset;
}
ib_dev->ibd_bd = bd;
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
dev->dev_attrib.max_unmap_block_desc_count = 1;
dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q))
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd);
if (bi) {
struct bio_set *bs = ib_dev->ibd_bio_set;
if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
!strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
pr_err("IBLOCK export of blk_integrity: %s not"
" supported\n", bi->name);
ret = -ENOSYS;
goto out_blkdev_put;
}
if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
}
if (dev->dev_attrib.pi_prot_type) {
if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
pr_err("Unable to allocate bioset for PI\n");
ret = -ENOMEM;
goto out_blkdev_put;
}
pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
bs->bio_integrity_pool);
}
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
}
return 0;
out_blkdev_put:
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
out_free_bioset:
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
out:
return ret;
}
static void iblock_free_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
struct request_queue *q)
{
unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
if (block_size == dev->dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
switch (dev->dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
case 1024:
blocks_long <<= 2;
break;
case 512:
blocks_long <<= 3;
default:
break;
}
break;
case 2048:
switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
case 1024:
blocks_long <<= 1;
break;
case 512:
blocks_long <<= 2;
break;
default:
break;
}
break;
case 1024:
switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
case 2048:
blocks_long >>= 1;
break;
case 512:
blocks_long <<= 1;
break;
default:
break;
}
break;
case 512:
switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
case 2048:
blocks_long >>= 2;
break;
case 1024:
blocks_long >>= 1;
break;
default:
break;
}
break;
default:
break;
}
return blocks_long;
}
static void iblock_complete_cmd(struct se_cmd *cmd)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
if (!atomic_dec_and_test(&ibr->pending))
return;
if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
target_complete_cmd(cmd, status);
kfree(ibr);
}
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic();
}
bio_put(bio);
iblock_complete_cmd(cmd);
}
static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
/*
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
if (sg_num > BIO_MAX_PAGES)
sg_num = BIO_MAX_PAGES;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
return bio;
}
static void iblock_submit_bios(struct bio_list *list, int rw)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(rw, bio);
blk_finish_plug(&plug);
}
static void iblock_end_io_flush(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
if (err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd) {
if (err)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
bio_put(bio);
}
/*
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op.
*/
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
if (!immed)
bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio);
return 0;
}
static sense_reason_t
iblock_do_unmap(struct se_cmd *cmd, void *priv,
sector_t lba, sector_t nolb)
{
struct block_device *bdev = priv;
int ret;
ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
return 0;
}
static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
}
static sense_reason_t
iblock_execute_write_same_unmap(struct se_cmd *cmd)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
sector_t lba = cmd->t_task_lba;
sector_t nolb = sbc_get_write_same_sectors(cmd);
int ret;
ret = iblock_do_unmap(cmd, bdev, lba, nolb);
if (ret)
return ret;
target_complete_cmd(cmd, GOOD);
return 0;
}
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
struct iblock_req *ibr;
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
sector_t block_lba = cmd->t_task_lba;
sector_t sectors = sbc_get_write_same_sectors(cmd);
sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
sg->length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
" block_size: %u\n", cmd->t_data_nents, sg->length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
cmd->priv = ibr;
bio = iblock_get_bio(cmd, block_lba, 1);
if (!bio)
goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
atomic_set(&ibr->pending, 1);
while (sectors) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
bio = iblock_get_bio(cmd, block_lba, 1);
if (!bio)
goto fail_put_bios;
atomic_inc(&ibr->pending);
bio_list_add(&list, bio);
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sectors -= 1;
}
iblock_submit_bios(&list, WRITE);
return 0;
fail_put_bios:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
fail_free_ibr:
kfree(ibr);
fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
enum {
Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
}
if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
SE_UDEV_PATH_LEN) == 0) {
ret = -EINVAL;
break;
}
pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_readonly:
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
ret = kstrtoul(arg_p, 0, &tmp_readonly);
kfree(arg_p);
if (ret < 0) {
pr_err("kstrtoul() failed for"
" readonly=\n");
goto out;
}
ib_dev->ibd_readonly = tmp_readonly;
pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
break;
case Opt_force:
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s",
ib_dev->ibd_udev_path);
bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == ib_dev) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
return bl;
}
static int
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
{
struct se_device *dev = cmd->se_dev;
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct scatterlist *sg;
int i, rc;
bi = bdev_get_integrity(ib_dev->ibd_bd);
if (!bi) {
pr_err("Unable to locate bio_integrity\n");
return -ENODEV;
}
bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
if (!bip) {
pr_err("Unable to allocate bio_integrity_payload\n");
return -ENOMEM;
}
bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
dev->prot_length;
bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
sg->offset);
if (rc != sg->length) {
pr_err("bio_integrity_add_page() failed; %d\n", rc);
return -ENOMEM;
}
pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
sg_page(sg), sg->length, sg->offset);
}
return 0;
}
static sense_reason_t
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
sector_t block_lba;
unsigned bio_cnt;
int rw = 0;
int i;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
if (q->flush_flags & REQ_FUA) {
if (cmd->se_cmd_flags & SCF_FUA)
rw = WRITE_FUA;
else if (!(q->flush_flags & REQ_FLUSH))
rw = WRITE_FUA;
else
rw = WRITE;
} else {
rw = WRITE;
}
} else {
rw = READ;
}
/*
* Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
if (dev->dev_attrib.block_size == 4096)
block_lba = (cmd->t_task_lba << 3);
else if (dev->dev_attrib.block_size == 2048)
block_lba = (cmd->t_task_lba << 2);
else if (dev->dev_attrib.block_size == 1024)
block_lba = (cmd->t_task_lba << 1);
else if (dev->dev_attrib.block_size == 512)
block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->dev_attrib.block_size);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
cmd->priv = ibr;
if (!sgl_nents) {
atomic_set(&ibr->pending, 1);
iblock_complete_cmd(cmd);
return 0;
}
bio = iblock_get_bio(cmd, block_lba, sgl_nents);
if (!bio)
goto fail_free_ibr;
bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
atomic_set(&ibr->pending, 2);
bio_cnt = 1;
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
* length of the S/G list entry this will cause and
* endless loop. Better hope no driver uses huge pages.
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list, rw);
bio_cnt = 0;
}
bio = iblock_get_bio(cmd, block_lba, sg_num);
if (!bio)
goto fail_put_bios;
atomic_inc(&ibr->pending);
bio_list_add(&list, bio);
bio_cnt++;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
}
if (cmd->prot_type) {
int rc = iblock_alloc_bip(cmd, bio_start);
if (rc)
goto fail_put_bios;
}
iblock_submit_bios(&list, rw);
iblock_complete_cmd(cmd);
return 0;
fail_put_bios:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
fail_free_ibr:
kfree(ibr);
fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
int ret;
ret = bdev_alignment_offset(bd);
if (ret == -1)
return 0;
/* convert offset-bytes to offset-lbas */
return ret / bdev_logical_block_size(bd);
}
static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
return ilog2(logs_per_phys);
}
static unsigned int iblock_get_io_min(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
return bdev_io_min(bd);
}
static unsigned int iblock_get_io_opt(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
return bdev_io_opt(bd);
}
static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
.execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap,
};
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
static bool iblock_get_write_cache(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return q->flush_flags & REQ_FLUSH;
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
.inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.alloc_device = iblock_alloc_device,
.configure_device = iblock_configure_device,
.free_device = iblock_free_device,
.parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
.get_lbppbe = iblock_get_lbppbe,
.get_io_min = iblock_get_io_min,
.get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
};
static int __init iblock_module_init(void)
{
return transport_subsystem_register(&iblock_template);
}
static void __exit iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(iblock_module_init);
module_exit(iblock_module_exit);

View file

@ -0,0 +1,25 @@
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
atomic_t pending;
atomic_t ib_bio_err_cnt;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
bool ibd_readonly;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */

View file

@ -0,0 +1,115 @@
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_model_alias(struct se_device *, int);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
int se_dev_set_emulate_write_cache(struct se_device *, int);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
int se_dev_set_emulate_tas(struct se_device *, int);
int se_dev_set_emulate_tpu(struct se_device *, int);
int se_dev_set_emulate_tpws(struct se_device *, int);
int se_dev_set_emulate_caw(struct se_device *, int);
int se_dev_set_emulate_3pc(struct se_device *, int);
int se_dev_set_pi_prot_type(struct se_device *, int);
int se_dev_set_pi_prot_format(struct se_device *, int);
int se_dev_set_enforce_pr_isids(struct se_device *, int);
int se_dev_set_force_pr_aptpl(struct se_device *, int);
int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
struct se_node_acl *, u32, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun *, struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
int core_delete_hba(struct se_hba *);
/* target_core_tmr.c */
void core_tmr_abort_task(struct se_device *, struct se_tmr_req *,
struct se_session *);
int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
/* target_core_tpg.c */
extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
u32, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
/* target_core_transport.c */
extern struct kmem_cache *se_tmr_req_cache;
int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
unsigned long long, char *, int *);
void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
#endif /* TARGET_CORE_INTERNAL_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,73 @@
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
/*
* PERSISTENT_RESERVE_OUT service action codes
*
* spc4r17 section 6.14.2 Table 171
*/
#define PRO_REGISTER 0x00
#define PRO_RESERVE 0x01
#define PRO_RELEASE 0x02
#define PRO_CLEAR 0x03
#define PRO_PREEMPT 0x04
#define PRO_PREEMPT_AND_ABORT 0x05
#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
#define PRO_REGISTER_AND_MOVE 0x07
/*
* PERSISTENT_RESERVE_IN service action codes
*
* spc4r17 section 6.13.1 Table 159
*/
#define PRI_READ_KEYS 0x00
#define PRI_READ_RESERVATION 0x01
#define PRI_REPORT_CAPABILITIES 0x02
#define PRI_READ_FULL_STATUS 0x03
/*
* PERSISTENT_RESERVE_ SCOPE field
*
* spc4r17 section 6.13.3.3 Table 163
*/
#define PR_SCOPE_LU_SCOPE 0x00
/*
* PERSISTENT_RESERVE_* TYPE field
*
* spc4r17 section 6.13.3.4 Table 164
*/
#define PR_TYPE_WRITE_EXCLUSIVE 0x01
#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
/*
* Function defined in target_core_spc.c
*/
void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
extern struct kmem_cache *t10_pr_reg_cache;
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
struct se_portal_group *, struct se_lun *,
struct se_node_acl *, u32);
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,61 @@
#ifndef TARGET_CORE_PSCSI_H
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
#define INQUIRY_DATA_SIZE 0x24
#endif
/* used in pscsi_add_device_to_list() */
#define PSCSI_DEFAULT_QUEUEDEPTH 1
#define PS_RETRY 5
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
#include <linux/device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_device.h>
#include <linux/kref.h>
#include <linux/kobject.h>
struct pscsi_plugin_task {
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
#define PDF_HAS_LUN_ID 0x04
#define PDF_HAS_VPD_UNIT_SERIAL 0x08
#define PDF_HAS_VPD_DEV_IDENT 0x10
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
struct se_device dev;
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
} ____cacheline_aligned;
typedef enum phv_modes {
PHV_VIRTUAL_HOST_ID,
PHV_LLD_SCSI_HOST_NO
} phv_modes_t;
struct pscsi_hba_virt {
int phv_host_id;
phv_modes_t phv_mode;
struct Scsi_Host *phv_lld_host;
} ____cacheline_aligned;
#endif /*** TARGET_CORE_PSCSI_H ***/

View file

@ -0,0 +1,669 @@
/*******************************************************************************
* Filename: target_core_rd.c
*
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_rd.h"
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
if (!rd_host) {
pr_err("Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
rd_host->rd_host_id = host_id;
hba->hba_ptr = rd_host;
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
return 0;
}
static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
hba->hba_ptr = NULL;
}
static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
u32 sg_table_count)
{
struct page *pg;
struct scatterlist *sg;
u32 i, j, page_count = 0, sg_per_table;
for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
if (pg) {
__free_page(pg);
page_count++;
}
}
kfree(sg);
}
kfree(sg_table);
return page_count;
}
static void rd_release_device_space(struct rd_dev *rd_dev)
{
u32 page_count;
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
return;
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
rd_dev->sg_table_count);
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
/* rd_build_device_space():
*
*
*/
static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
u32 total_sg_needed, unsigned char init_payload)
{
u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct page *pg;
struct scatterlist *sg;
unsigned char *p;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
p = kmap(pg);
memset(p, init_payload, PAGE_SIZE);
kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
return 0;
}
static int rd_build_device_space(struct rd_dev *rd_dev)
{
struct rd_dev_sg_table *sg_table;
u32 sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
int rc;
if (rd_dev->rd_page_count <= 0) {
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
/* Don't need backing pages for NULLIO */
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
if (rc)
return rc;
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
}
static void rd_release_prot_space(struct rd_dev *rd_dev)
{
u32 page_count;
if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
return;
page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
rd_dev->sg_prot_count);
pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
rd_dev->sg_prot_array = NULL;
rd_dev->sg_prot_count = 0;
}
static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
{
struct rd_dev_sg_table *sg_table;
u32 total_sg_needed, sg_tables;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
int rc;
if (rd_dev->rd_flags & RDF_NULLIO)
return 0;
/*
* prot_length=8byte dif data
* tot sg needed = rd_page_count * (PGSZ/block_size) *
* (prot_length/block_size) + pad
* PGSZ canceled each other.
*/
total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk protection"
" scatterlist tables\n");
return -ENOMEM;
}
rd_dev->sg_prot_array = sg_table;
rd_dev->sg_prot_count = sg_tables;
rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
if (rc)
return rc;
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
if (!rd_dev) {
pr_err("Unable to allocate memory for struct rd_dev\n");
return NULL;
}
rd_dev->rd_host = rd_host;
return &rd_dev->dev;
}
static int rd_configure_device(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
struct rd_host *rd_host = dev->se_hba->hba_ptr;
int ret;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return 0;
fail:
rd_release_device_space(rd_dev);
return ret;
}
static void rd_free_device(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
kfree(rd_dev);
}
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
struct rd_dev_sg_table *sg_table;
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
i = page / sg_per_table;
if (i < rd_dev->sg_table_count) {
sg_table = &rd_dev->sg_table_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
return sg_table;
}
pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
{
struct rd_dev_sg_table *sg_table;
u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
i = page / sg_per_table;
if (i < rd_dev->sg_prot_count) {
sg_table = &rd_dev->sg_prot_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
return sg_table;
}
pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
u32 rd_offset;
u32 rd_size;
u32 rd_page;
u32 src_len;
u64 tmp;
sense_reason_t rc;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
dev->rd_dev_id,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
struct rd_dev_sg_table *prot_table;
struct scatterlist *prot_sg;
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
u32 prot_offset, prot_page;
tmp = cmd->t_task_lba * se_dev->prot_length;
prot_offset = do_div(tmp, PAGE_SIZE);
prot_page = tmp;
prot_table = rd_get_prot_table(dev, prot_page);
if (!prot_table)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
prot_sg, prot_offset);
if (rc)
return rc;
}
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (rd_size) {
u32 len;
void *rd_addr;
sg_miter_next(&m);
if (!(u32)m.length) {
pr_debug("RD[%u]: invalid sgl %p len %zu\n",
dev->rd_dev_id, m.addr, m.length);
sg_miter_stop(&m);
return TCM_INCORRECT_AMOUNT_OF_DATA;
}
len = min((u32)m.length, src_len);
if (len > rd_size) {
pr_debug("RD[%u]: size underrun page %d offset %d "
"size %d\n", dev->rd_dev_id,
rd_page, rd_offset, rd_size);
len = rd_size;
}
m.consumed = len;
rd_addr = sg_virt(rd_sg) + rd_offset;
if (data_direction == DMA_FROM_DEVICE)
memcpy(m.addr, rd_addr, len);
else
memcpy(rd_addr, m.addr, len);
rd_size -= len;
if (!rd_size)
continue;
src_len -= len;
if (src_len) {
rd_offset += len;
continue;
}
/* rd page completed, next one please */
rd_page++;
rd_offset = 0;
src_len = PAGE_SIZE;
if (rd_page <= table->page_end_offset) {
rd_sg++;
continue;
}
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
rd_sg = table->sg_table;
}
sg_miter_stop(&m);
if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
struct rd_dev_sg_table *prot_table;
struct scatterlist *prot_sg;
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
u32 prot_offset, prot_page;
tmp = cmd->t_task_lba * se_dev->prot_length;
prot_offset = do_div(tmp, PAGE_SIZE);
prot_page = tmp;
prot_table = rd_get_prot_table(dev, prot_page);
if (!prot_table)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
prot_sg, prot_offset);
if (rc)
return rc;
}
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
enum {
Opt_rd_pages, Opt_rd_nullio, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
{Opt_err, NULL}
};
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
case Opt_rd_nullio:
match_int(args, &arg);
if (arg != 1)
break;
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
default:
break;
}
}
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct rd_dev *rd_dev = RD_DEV(dev);
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
" SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
!!(rd_dev->rd_flags & RDF_NULLIO));
return bl;
}
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->dev_attrib.block_size) - 1;
return blocks_long;
}
static int rd_init_prot(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
if (!dev->dev_attrib.pi_prot_type)
return 0;
return rd_build_prot_space(rd_dev, dev->prot_length,
dev->dev_attrib.block_size);
}
static void rd_free_prot(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_prot_space(rd_dev);
}
static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.alloc_device = rd_alloc_device,
.configure_device = rd_configure_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
};
int __init rd_module_init(void)
{
int ret;
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
return ret;
}
return 0;
}
void rd_module_exit(void)
{
transport_subsystem_release(&rd_mcp_template);
}

View file

@ -0,0 +1,51 @@
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
#define RD_HBA_VERSION "v4.0"
#define RD_MCP_VERSION "4.0"
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
struct rd_dev_sg_table {
u32 page_start_offset;
u32 page_end_offset;
u32 rd_sg_count;
struct scatterlist *sg_table;
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
#define RDF_NULLIO 0x02
struct rd_dev {
struct se_device dev;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
/* Total page count for ramdisk device */
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
/* Number of SG tables in sg_prot_array */
u32 sg_prot_count;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
/* Array of rd_dev_sg_table containing protection scatterlists */
struct rd_dev_sg_table *sg_prot_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
struct rd_host {
u32 rd_host_dev_id_count;
u32 rd_host_id; /* Unique Ramdisk Host ID */
} ____cacheline_aligned;
#endif /* TARGET_CORE_RD_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,389 @@
/*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
* (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
int core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function,
gfp_t gfp_flags)
{
struct se_tmr_req *tmr;
tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return -ENOMEM;
}
se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
se_cmd->se_tmr_req = tmr;
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return 0;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
unsigned long flags;
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_del(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
kfree(tmr);
}
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas)
{
bool remove = true;
/*
* TASK ABORTED status (TAS) bit support
*/
if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
remove = false;
transport_send_task_abort(cmd);
}
transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
struct t10_pr_registration *reg;
if (!list)
return 0;
list_for_each_entry(reg, list, pr_reg_abort_list) {
if (reg->pr_res_key == cmd->pr_res_key)
return 0;
}
return 1;
}
void core_tmr_abort_task(
struct se_device *dev,
struct se_tmr_req *tmr,
struct se_session *se_sess)
{
struct se_cmd *se_cmd;
unsigned long flags;
int ref_tag;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
if (dev != se_cmd->se_dev)
continue;
/* skip se_cmd associated with tmr */
if (tmr->task_cmd == se_cmd)
continue;
ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
if (tmr->ref_task_tag != ref_tag)
continue;
printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_COMPLETE) {
printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
goto out;
}
se_cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list);
kref_get(&se_cmd->cmd_kref);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
target_put_sess_cmd(se_sess, se_cmd);
transport_cmd_finish_abort(se_cmd, true);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %d\n", ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
return;
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
out:
printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
tmr->ref_task_tag);
tmr->response = TMR_TASK_DOES_NOT_EXIST;
}
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
if (tmr_p == tmr)
continue;
cmd = tmr_p->task_cmd;
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
spin_lock(&cmd->t_state_lock);
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock(&cmd->t_state_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock);
continue;
}
spin_unlock(&cmd->t_state_lock);
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
list_del_init(&tmr_p->tmr_list);
cmd = tmr_p->task_cmd;
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
transport_cmd_finish_abort(cmd, 1);
}
}
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_node_acl *tmr_nacl,
int tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
struct se_cmd *cmd, *next;
unsigned long flags;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
*
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
* a) "Yes" indicates that each command that is aborted on an I_T nexus
* other than the one that caused the SCSI device condition is
* completed with TASK ABORTED status, if the TAS bit is set to one in
* the Control mode page (see SPC-4). "No" indicates that no status is
* returned for aborted commands.
*
* d) If the logical unit reset is caused by a particular I_T nexus
* (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
* (TASK_ABORTED status) applies.
*
* Otherwise (e.g., if triggered by a hard reset), "no"
* (no TASK_ABORTED SAM status) applies.
*
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
list_del(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
"cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" -- CMD_T_ACTIVE: %d"
" CMD_T_STOP: %d CMD_T_SENT: %d\n",
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
(cmd->transport_state & CMD_T_SENT) != 0);
/*
* If the command may be queued onto a workqueue cancel it now.
*
* This is equivalent to removal from the execute queue in the
* loop above, but we do it down here given that
* cancel_work_sync may block.
*/
if (cmd->t_state == TRANSPORT_COMPLETE)
cancel_work_sync(&cmd->work);
spin_lock_irqsave(&cmd->t_state_lock, flags);
target_stop_cmd(cmd, &flags);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
}
}
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
int tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name);
return 0;
}

View file

@ -0,0 +1,856 @@
/*******************************************************************************
* Filename: target_core_tpg.c
*
* This file contains generic Target Portal Group related functions.
*
* (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
extern struct se_device *g_lun0_dev;
static DEFINE_SPINLOCK(tpg_lock);
static LIST_HEAD(tpg_list);
/* core_clear_initiator_node_from_tpg():
*
*
*/
static void core_clear_initiator_node_from_tpg(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
if (!deve->se_lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
}
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
}
/* __core_tpg_get_initiator_node_acl():
*
* spin_lock_bh(&tpg->acl_node_lock); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
return NULL;
}
/* core_tpg_get_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
return acl;
}
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
/* core_tpg_add_node_to_devs():
*
*
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
struct se_portal_group *tpg)
{
int i = 0;
u32 lun_access = 0;
struct se_lun *lun;
struct se_device *dev;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = tpg->tpg_lun_list[i];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
continue;
spin_unlock(&tpg->tpg_lun_lock);
dev = lun->lun_se_dev;
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access, acl, tpg);
/*
* Check to see if there are any existing persistent reservation
* APTPL pre-registrations that need to be enabled for this dynamic
* LUN ACL now..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
lun->unpacked_lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
/* core_set_queue_depth_for_node():
*
*
*/
static int core_set_queue_depth_for_node(
struct se_portal_group *tpg,
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
pr_err("Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
return 0;
}
void array_free(void *array, int n)
{
void **a = array;
int i;
for (i = 0; i < n; i++)
kfree(a[i]);
kfree(a);
}
static void *array_zalloc(int n, size_t size, gfp_t flags)
{
void **a;
int i;
a = kzalloc(n * sizeof(void*), flags);
if (!a)
return NULL;
for (i = 0; i < n; i++) {
a[i] = kzalloc(size, flags);
if (!a[i]) {
array_free(a, n);
return NULL;
}
}
return a;
}
/* core_create_device_list_for_node():
*
*
*/
static int core_create_device_list_for_node(struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
int i;
nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
sizeof(struct se_dev_entry), GFP_KERNEL);
if (!nacl->device_list) {
pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
spin_lock_init(&deve->ua_lock);
INIT_LIST_HEAD(&deve->alua_port_list);
INIT_LIST_HEAD(&deve->ua_list);
}
return 0;
}
/* core_tpg_check_initiator_node_acl()
*
*
*/
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl)
return acl;
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
/*
* Here we only create demo-mode MappedLUNs from the active
* TPG LUNs if the fabric is not explicitly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_irq(&tpg->acl_node_lock);
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->acl_pr_ref_count) != 0)
cpu_relax();
}
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
int i;
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = tpg->tpg_lun_list[i];
if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
(lun->lun_se_dev == NULL))
continue;
spin_unlock(&tpg->tpg_lun_lock);
core_dev_del_lun(tpg, lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
EXPORT_SYMBOL(core_tpg_clear_object_luns);
/* core_tpg_add_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *se_nacl,
const char *initiatorname,
u32 queue_depth)
{
struct se_node_acl *acl = NULL;
spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_irq(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_irq(&tpg->acl_node_lock);
if (!se_nacl) {
pr_err("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
* For v4.x logic the se_node_acl_s is hanging off a fabric
* dependent structure allocated via
* struct target_core_fabric_ops->fabric_make_nodeacl()
*/
acl = se_nacl;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = queue_depth;
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_irq(&tpg->acl_node_lock);
done:
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
/* core_tpg_del_initiator_node_acl():
*
*
*/
int core_tpg_del_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *acl,
int force)
{
LIST_HEAD(sess_list);
struct se_session *sess, *sess_tmp;
unsigned long flags;
int rc;
spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
acl->acl_stop = 1;
list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
sess_acl_list) {
if (sess->sess_tearing_down != 0)
continue;
target_get_session(sess);
list_move(&sess->sess_acl_list, &sess_list);
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
list_del(&sess->sess_acl_list);
rc = tpg->se_tpg_tfo->shutdown_session(sess);
target_put_session(sess);
if (!rc)
continue;
target_put_session(sess);
}
target_put_nacl(acl);
/*
* Wait for last target_put_nacl() to complete in target_complete_nacl()
* for active fabric session transport_deregister_session() callbacks.
*/
wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
return 0;
}
EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
/* core_tpg_set_initiator_node_queue_depth():
*
*
*/
int core_tpg_set_initiator_node_queue_depth(
struct se_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
unsigned long flags;
int dynamic_acl = 0;
spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) {
pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&tpg->acl_node_lock);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_irqsave(&tpg->session_lock, flags);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
if (!force) {
pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_irqrestore(&tpg->session_lock, flags);
spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_irq(&tpg->acl_node_lock);
return -EEXIST;
}
/*
* Determine if the session needs to be closed by our context.
*/
if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
break;
}
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
* Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
acl->queue_depth = queue_depth;
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
* bit from tpg->se_tpg_tfo->shutdown_session() called above.
*/
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL;
}
spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_irq(&tpg->acl_node_lock);
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
/* core_tpg_set_initiator_node_tag():
*
* Initiator nodeacl tags are not used internally, but may be used by
* userspace to emulate aliases or groups.
* Returns length of newly-set tag or -EINVAL.
*/
int core_tpg_set_initiator_node_tag(
struct se_portal_group *tpg,
struct se_node_acl *acl,
const char *new_tag)
{
if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
return -EINVAL;
if (!strncmp("NULL", new_tag, 4)) {
acl->acl_tag[0] = '\0';
return 0;
}
return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
static void core_tpg_lun_ref_release(struct percpu_ref *ref)
{
struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
complete(&lun->lun_ref_comp);
}
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
struct se_device *dev = g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
lun->unpacked_lun = 0;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return ret;
return 0;
}
int core_tpg_register(
struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
void *tpg_fabric_ptr,
int se_tpg_type)
{
struct se_lun *lun;
u32 i;
se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
sizeof(struct se_lun), GFP_KERNEL);
if (!se_tpg->tpg_lun_list) {
pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
}
se_tpg->se_tpg_type = se_tpg_type;
se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
se_tpg->se_tpg_tfo = tfo;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
spin_lock_init(&se_tpg->tpg_lun_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
array_free(se_tpg->tpg_lun_list,
TRANSPORT_MAX_LUNS_PER_TPG);
return -ENOMEM;
}
}
spin_lock_bh(&tpg_lock);
list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
spin_unlock_bh(&tpg_lock);
pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
return 0;
}
EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
struct se_node_acl *nacl, *nacl_tmp;
pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
spin_lock_bh(&tpg_lock);
list_del(&se_tpg->se_tpg_node);
spin_unlock_bh(&tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
/*
* Release any remaining demo-mode generated se_node_acl that have
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session().
*/
spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
acl_list) {
list_del(&nacl->acl_list);
se_tpg->num_node_acls--;
spin_unlock_irq(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_irq(&se_tpg->acl_node_lock);
}
spin_unlock_irq(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
se_tpg->se_tpg_fabric_ptr = NULL;
array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
tpg->se_tpg_tfo->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
struct se_device *dev)
{
int ret;
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
GFP_KERNEL);
if (ret < 0)
return ret;
ret = core_dev_export(dev, tpg, lun);
if (ret < 0) {
percpu_ref_exit(&lun->lun_ref);
return ret;
}
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
spin_unlock(&tpg->tpg_lun_lock);
return 0;
}
void core_tpg_remove_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
core_clear_lun_from_tpg(lun, tpg);
transport_clear_lun_ref(lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
spin_lock(&tpg->tpg_lun_lock);
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
spin_unlock(&tpg->tpg_lun_lock);
percpu_ref_exit(&lun->lun_ref);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,319 @@
/*******************************************************************************
* Filename: target_core_ua.c
*
* This file contains logic for SPC-3 Unit Attention emulation
*
* (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
sense_reason_t
target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
if (!sess)
return 0;
nacl = sess->se_node_acl;
if (!nacl)
return 0;
deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
*
* a) if an INQUIRY command enters the enabled command state, the
* device server shall process the INQUIRY command and shall neither
* report nor clear any unit attention condition;
* b) if a REPORT LUNS command enters the enabled command state, the
* device server shall process the REPORT LUNS command and shall not
* report any unit attention condition;
* e) if a REQUEST SENSE command enters the enabled command state while
* a unit attention condition exists for the SCSI initiator port
* associated with the I_T nexus on which the REQUEST SENSE command
* was received, then the device server shall process the command
* and either:
*/
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
}
int core_scsi3_ua_allocate(
struct se_node_acl *nacl,
u32 unpacked_lun,
u8 asc,
u8 ascq)
{
struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp;
/*
* PASSTHROUGH OPS
*/
if (!nacl)
return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!ua) {
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
ua->ua_asc = asc;
ua->ua_ascq = ascq;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[unpacked_lun];
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
* Do not report the same UNIT ATTENTION twice..
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
/*
* Attach the highest priority Unit Attention to
* the head of the list following sam4r14,
* Section 5.14 Unit Attention Condition:
*
* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
* POWER ON OCCURRED or
* DEVICE INTERNAL RESET
* SCSI BUS RESET OCCURRED or
* MICROCODE HAS BEEN CHANGED or
* protocol specific
* BUS DEVICE RESET FUNCTION OCCURRED
* I_T NEXUS LOSS OCCURRED
* COMMANDS CLEARED BY POWER LOSS NOTIFICATION
* all others Lowest
*
* Each of the ASCQ codes listed above are defined in
* the 29h ASC family, see spc4r17 Table D.1
*/
if (ua_p->ua_asc == 0x29) {
if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else if (ua_p->ua_asc == 0x2a) {
/*
* Incoming Family 29h ASCQ codes will override
* Family 2AHh ASCQ codes for Unit Attention condition.
*/
if ((asc == 0x29) || (ascq > ua_p->ua_asc))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc_mb(&deve->ua_count);
return 0;
}
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
struct se_ua *ua, *ua_p;
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
}
void core_scsi3_ua_for_check_condition(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!sess)
return;
nacl = sess->se_node_acl;
if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
* sense data for the received CDB.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
/*
* For ua_intlck_ctrl code not equal to 00b, only report the
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
}
/*
* Otherwise for the default 00b, release the UNIT ATTENTION
* condition. Return the ASC/ASCQ of the highest priority UA
* (head of the list) in the outgoing CHECK_CONDITION + sense.
*/
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!sess)
return -EINVAL;
nacl = sess->se_node_acl;
if (!nacl)
return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list. The First (and hence highest priority)
* ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
* matching struct se_lun.
*
* Once the returning ASC/ASCQ values are set, we go ahead and
* release all of the Unit Attention conditions for the associated
* struct se_lun.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -EPERM : 0;
}

View file

@ -0,0 +1,37 @@
#ifndef TARGET_CORE_UA_H
#define TARGET_CORE_UA_H
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
#define ASCQ_29H_POWER_ON_OCCURRED 0x01
#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
extern struct kmem_cache *se_ua_cache;
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
#endif /* TARGET_CORE_UA_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
#define XCOPY_MAX_SECTORS 1024
enum xcopy_origin_list {
XCOL_SOURCE_RECV_OP = 0x01,
XCOL_DEST_RECV_OP = 0x02,
};
struct xcopy_pt_cmd;
struct xcopy_op {
int op_origin;
struct se_cmd *xop_se_cmd;
struct se_device *src_dev;
unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
sector_t src_lba;
sector_t dst_lba;
unsigned short stdi;
unsigned short dtdi;
unsigned short nolb;
unsigned int dbl;
struct xcopy_pt_cmd *src_pt_cmd;
struct xcopy_pt_cmd *dst_pt_cmd;
u32 xop_data_nents;
struct scatterlist *xop_data_sg;
struct work_struct xop_work;
};
/*
* Receive Copy Results Sevice Actions
*/
#define RCR_SA_COPY_STATUS 0x00
#define RCR_SA_RECEIVE_DATA 0x01
#define RCR_SA_OPERATING_PARAMETERS 0x03
#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04
/*
* Receive Copy Results defs for Operating Parameters
*/
#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2
#define RCR_OP_MAX_SG_DESC_COUNT 0x1
#define RCR_OP_MAX_DESC_LIST_LEN 1024
#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */
#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */
#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */
#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */
#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
extern int target_xcopy_setup_pt(void);
extern void target_xcopy_release_pt(void);
extern sense_reason_t target_do_xcopy(struct se_cmd *);
extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);

View file

@ -0,0 +1,5 @@
config TCM_FC
tristate "TCM_FC fabric Plugin"
depends on LIBFC
help
Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM

View file

@ -0,0 +1,6 @@
tcm_fc-y += tfc_cmd.o \
tfc_conf.o \
tfc_io.o \
tfc_sess.o
obj-$(CONFIG_TCM_FC) += tcm_fc.o

View file

@ -0,0 +1,183 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];
__u8 wwpn[8];
__u8 __resvd2[8];
} __attribute__((__packed__));
/*
* Session (remote port).
*/
struct ft_sess {
u32 port_id; /* for hash lookup use only */
u32 params;
u16 max_frame; /* maximum frame size */
u64 port_name; /* port name for transport ID */
struct ft_tport *tport;
struct se_session *se_sess;
struct hlist_node hash; /* linkage in ft_sess_hash table */
struct rcu_head rcu;
struct kref kref; /* ref for hash and outstanding I/Os */
};
/*
* Hash table of sessions per local port.
* Hash lookup by remote port FC_ID.
*/
#define FT_SESS_HASH_BITS 6
#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
/*
* Per local port data.
* This is created only after a TPG exists that allows target function
* for the local port. If the TPG exists, this is allocated when
* we're notified that the local port has been created, or when
* the first PRLI provider callback is received.
*/
struct ft_tport {
struct fc_lport *lport;
struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
u32 sess_count; /* number of sessions in hash */
struct rcu_head rcu;
struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
};
/*
* Node ID and authentication.
*/
struct ft_node_auth {
u64 port_name;
u64 node_name;
};
/*
* Node ACL for FC remote port session.
*/
struct ft_node_acl {
struct ft_node_auth node_auth;
struct se_node_acl se_node_acl;
};
struct ft_lun {
u32 index;
char name[FT_LUN_NAMELEN];
};
/*
* Target portal group (local port).
*/
struct ft_tpg {
u32 index;
struct ft_lport_wwn *lport_wwn;
struct ft_tport *tport; /* active tport or NULL */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct workqueue_struct *workqueue;
};
struct ft_lport_wwn {
u64 wwpn;
char name[FT_NAMELEN];
struct list_head ft_wwn_node;
struct ft_tpg *tpg;
struct se_wwn se_wwn;
};
/*
* Commands
*/
struct ft_cmd {
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
struct fc_frame *req_frame;
u32 write_data_len; /* data received on writes */
struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
u32 aborted:1; /* Set if aborted by reset or timeout */
struct scatterlist *sg; /* Set only if DDP is setup */
u32 sg_cnt; /* No. of item in scatterlist */
};
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
* Fabric methods.
*/
/*
* Session ops.
*/
void ft_sess_put(struct ft_sess *);
int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
void ft_lport_add(struct fc_lport *, void *);
void ft_lport_del(struct fc_lport *, void *);
int ft_lport_notify(struct notifier_block *, unsigned long, void *);
/*
* IO methods.
*/
int ft_check_stop_free(struct se_cmd *);
void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
void ft_aborted_task(struct se_cmd *);
/*
* other internal functions.
*/
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
/*
* Underlying HW specific helper function
*/
void ft_invl_hw_context(struct ft_cmd *);
#endif /* __TCM_FC_H__ */

View file

@ -0,0 +1,585 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
/*
* Dump cmd state for debugging.
*/
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
struct scatterlist *sg;
int count;
se_cmd = &cmd->se_cmd;
pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_data_nents,
se_cmd->data_length, se_cmd->se_cmd_flags);
for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
pr_debug("%s: cmd %p sg %p page %p "
"len 0x%x off 0x%x\n",
caller, cmd, sg,
sg_page(sg), sg->length, sg->offset);
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
}
}
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
if (unlikely(ft_debug_logging))
_ft_dump_cmd(cmd, caller);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
struct ft_sess *sess;
if (!cmd)
return;
sess = cmd->sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
ft_free_cmd(cmd);
}
int ft_check_stop_free(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
/*
* Send response.
*/
int ft_queue_status(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_resp_with_ext *fcp;
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
int rc;
if (cmd->aborted)
return 0;
ft_dump_cmd(cmd, __func__);
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
len = se_cmd->scsi_sense_length;
if (len) {
fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
fcp->ext.fr_sns_len = htonl(len);
memcpy((fcp + 1), se_cmd->sense_buffer, len);
}
/*
* Test underflow and overflow with one mask. Usually both are off.
* Bidirectional commands are not handled yet.
*/
if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
fcp->resp.fr_flags |= FCP_RESID_OVER;
else
fcp->resp.fr_flags |= FCP_RESID_UNDER;
fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
}
/*
* Send response.
*/
cmd->seq = lport->tt.seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
rc = lport->tt.seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
/*
* Generate a TASK_SET_FULL status to notify the initiator
* to reduce it's queue_depth after the se_cmd response has
* been re-queued by target-core.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
lport->tt.exch_done(cmd->seq);
return 0;
}
int ft_write_pending_status(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
return cmd->write_data_len != se_cmd->data_length;
}
/*
* Send TX_RDY (transfer ready).
*/
int ft_write_pending(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp;
struct fcp_txrdy *txrdy;
struct fc_lport *lport;
struct fc_exch *ep;
struct fc_frame_header *fh;
u32 f_ctl;
ft_dump_cmd(cmd, __func__);
if (cmd->aborted)
return 0;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
cmd->seq = lport->tt.seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
fh = fc_frame_header_get(fp);
f_ctl = ntoh24(fh->fh_f_ctl);
/* Only if it is 'Exchange Responder' */
if (f_ctl & FC_FC_EX_CTX) {
/* Target is 'exchange responder' and sending XFER_READY
* to 'exchange initiator (initiator)'
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
cmd->was_ddp_setup = 1;
}
}
lport->tt.seq_send(lport, cmd->seq, fp);
return 0;
}
u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
if (cmd->aborted)
return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
return 0;
}
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
{
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
if (unlikely(IS_ERR(fp))) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
cmd->aborted = true;
return;
}
fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_SOL_DATA: /* write data */
ft_recv_write_data(cmd, fp);
break;
case FC_RCTL_DD_UNSOL_CTL: /* command */
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
ft_invl_hw_context(cmd);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
}
/*
* Send a FCP response including SCSI status and optional FCP rsp_code.
* status is SAM_STAT_GOOD (zero) iff code is valid.
* This is used in error cases, such as allocation failures.
*/
static void ft_send_resp_status(struct fc_lport *lport,
const struct fc_frame *rx_fp,
u32 status, enum fcp_resp_rsp_codes code)
{
struct fc_frame *fp;
struct fc_seq *sp;
const struct fc_frame_header *fh;
size_t len;
struct fcp_resp_with_ext *fcp;
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
len += sizeof(*info);
fp = fc_frame_alloc(lport, len);
if (!fp)
return;
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = status;
if (status == SAM_STAT_GOOD) {
fcp->ext.fr_rsp_len = htonl(sizeof(*info));
fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
info = (struct fcp_resp_rsp_info *)(fcp + 1);
info->rsp_code = code;
}
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
lport->tt.seq_send(lport, sp, fp);
lport->tt.exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
}
/*
* Send error or task management response.
*/
static void ft_send_resp_code(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_status(cmd->sess->tport->lport,
cmd->req_frame, SAM_STAT_GOOD, code);
}
/*
* Send error or task management response.
* Always frees the cmd and associated state.
*/
static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
enum fcp_resp_rsp_codes code)
{
ft_send_resp_code(cmd, code);
ft_free_cmd(cmd);
}
/*
* Handle Task Management Request.
*/
static void ft_send_tm(struct ft_cmd *cmd)
{
struct fcp_cmnd *fcp;
int rc;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
break;
case FCP_TMF_CLR_TASK_SET:
tm_func = TMR_CLEAR_TASK_SET;
break;
case FCP_TMF_ABT_TASK_SET:
tm_func = TMR_ABORT_TASK_SET;
break;
case FCP_TMF_CLR_ACA:
tm_func = TMR_CLEAR_ACA;
break;
default:
/*
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
/* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
cmd, tm_func, GFP_KERNEL, 0, 0);
if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
}
/*
* Send status from completed task management request.
*/
void ft_queue_tm_resp(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct se_tmr_req *tmr = se_cmd->se_tmr_req;
enum fcp_resp_rsp_codes code;
if (cmd->aborted)
return;
switch (tmr->response) {
case TMR_FUNCTION_COMPLETE:
code = FCP_TMF_CMPL;
break;
case TMR_LUN_DOES_NOT_EXIST:
code = FCP_TMF_INVALID_LUN;
break;
case TMR_FUNCTION_REJECTED:
code = FCP_TMF_REJECTED;
break;
case TMR_TASK_DOES_NOT_EXIST:
case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
default:
code = FCP_TMF_FAILED;
break;
}
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
}
void ft_aborted_task(struct se_cmd *se_cmd)
{
return;
}
static void ft_send_work(struct work_struct *work);
/*
* Handle incoming FCP command.
*/
static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
goto busy;
cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->seq = lport->tt.seq_assign(lport, fp);
if (!cmd->seq) {
percpu_ida_free(&se_sess->sess_tag_pool, tag);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
INIT_WORK(&cmd->work, ft_send_work);
queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
}
/*
* Handle incoming FCP frame.
* Caller has verified that the frame is type FCP.
*/
void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_DD_UNSOL_CMD: /* command */
ft_recv_cmd(sess, fp);
break;
case FC_RCTL_DD_SOL_DATA: /* write data */
case FC_RCTL_DD_UNSOL_CTL:
case FC_RCTL_DD_SOL_CTL:
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
break;
}
}
/*
* Send new command to target.
*/
static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct fcp_cmnd *fcp;
int data_dir = 0;
int task_attr;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
if (!fcp)
goto err;
if (fcp->fc_flags & FCP_CFL_LEN_MASK)
goto err; /* not handling longer CDBs yet */
/*
* Check for FCP task management flags
*/
if (fcp->fc_tm_flags) {
ft_send_tm(cmd);
return;
}
switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
case 0:
data_dir = DMA_NONE;
break;
case FCP_CFL_RDDATA:
data_dir = DMA_FROM_DEVICE;
break;
case FCP_CFL_WRDATA:
data_dir = DMA_TO_DEVICE;
break;
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
/*
* Locate the SAM Task Attr from fc_pri_ta
*/
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
case FCP_PTA_HEADQ:
task_attr = MSG_HEAD_TAG;
break;
case FCP_PTA_ORDERED:
task_attr = MSG_ORDERED_TAG;
break;
case FCP_PTA_ACA:
task_attr = MSG_ACA_TAG;
break;
case FCP_PTA_SIMPLE: /* Fallthrough */
default:
task_attr = MSG_SIMPLE_TAG;
}
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir, 0))
goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}

View file

@ -0,0 +1,642 @@
/*******************************************************************************
* Filename: tcm_fc.c
*
* This file contains the configfs implementation for TCM_fc fabric node.
* Based on tcm_loop_configfs.c
*
* Copyright (c) 2010 Cisco Systems, Inc.
* Copyright (c) 2009,2010 Rising Tide, Inc.
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
struct target_fabric_configfs *ft_configfs;
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
/*
* Parse WWN.
* If strict, we require lower-case hex and colon separators to be sure
* the name is the same as what would be generated by ft_format_wwn()
* so the name and wwn are mapped one-to-one.
*/
static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
u32 byte = 0;
u32 pos = 0;
u32 err;
int val;
*wwn = 0;
for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
c = *cp;
if (c == '\n' && cp[1] == '\0')
continue;
if (strict && pos++ == 2 && byte++ < 7) {
pos = 0;
if (c == ':')
continue;
err = 1;
goto fail;
}
if (c == '\0') {
err = 2;
if (strict && byte != 8)
goto fail;
return cp - name;
}
err = 3;
val = hex_to_bin(c);
if (val < 0 || (strict && isupper(c)))
goto fail;
*wwn = (*wwn << 4) | val;
}
err = 4;
fail:
pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
{
u8 b[8];
put_unaligned_be64(wwn, b);
return snprintf(buf, len,
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
static ssize_t ft_wwn_show(void *arg, char *buf)
{
u64 *wwn = arg;
ssize_t len;
len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
buf[len++] = '\n';
return len;
}
static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
{
ssize_t ret;
u64 wwn;
ret = ft_parse_wwn(buf, &wwn, 0);
if (ret > 0)
*(u64 *)arg = wwn;
return ret;
}
/*
* ACL auth ops.
*/
static ssize_t ft_nacl_show_port_name(
struct se_node_acl *se_nacl,
char *page)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.port_name, page);
}
static ssize_t ft_nacl_store_port_name(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.port_name, page, count);
}
TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
static ssize_t ft_nacl_show_node_name(
struct se_node_acl *se_nacl,
char *page)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.node_name, page);
}
static ssize_t ft_nacl_store_node_name(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.node_name, page, count);
}
TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
static struct configfs_attribute *ft_nacl_base_attrs[] = {
&ft_nacl_port_name.attr,
&ft_nacl_node_name.attr,
NULL,
};
/*
* ACL ops.
*/
/*
* Add ACL for an initiator. The ACL is named arbitrarily.
* The port_name and/or node_name are attributes.
*/
static struct se_node_acl *ft_add_acl(
struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct ft_node_acl *acl;
struct ft_tpg *tpg;
u64 wwpn;
u32 q_depth;
pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
q_depth = 32; /* XXX bogus default - get from tpg? */
return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
&acl->se_node_acl, name, q_depth);
}
static void ft_del_acl(struct se_node_acl *se_acl)
{
struct se_portal_group *se_tpg = se_acl->se_tpg;
struct ft_tpg *tpg;
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
kfree(acl);
}
struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
{
struct ft_node_acl *found = NULL;
struct ft_node_acl *acl;
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl;
spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
break;
}
}
spin_unlock_irq(&se_tpg->acl_node_lock);
return found;
}
static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl) {
pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct se_node_acl *se_acl)
{
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
pr_debug("acl %p\n", acl);
kfree(acl);
}
/*
* local_port port_group (tpg) ops.
*/
static struct se_portal_group *ft_add_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
struct workqueue_struct *wq;
unsigned long index;
int ret;
pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
*/
if (strstr(name, "tpgt_") != name)
return NULL;
ret = kstrtoul(name + 5, 10, &index);
if (ret)
return NULL;
if (index > UINT_MAX)
return NULL;
if ((index != 1)) {
pr_err("Error, a single TPG=1 is used for HW port mappings\n");
return ERR_PTR(-ENOSYS);
}
ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1);
if (!wq) {
kfree(tpg);
return NULL;
}
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(wq);
kfree(tpg);
return NULL;
}
tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
}
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();
mutex_lock(&ft_lport_lock);
ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
}
mutex_unlock(&ft_lport_lock);
core_tpg_deregister(se_tpg);
kfree(tpg);
}
/*
* Verify that an lport is configured to use the tcm_fc module, and return
* the target port group that should be used.
*
* The caller holds ft_lport_lock.
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
struct ft_lport_wwn *ft_wwn;
list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (ft_wwn->wwpn == lport->wwpn)
return ft_wwn->tpg;
}
return NULL;
}
/*
* target config instance ops.
*/
/*
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
if (!ft_wwn)
return NULL;
ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
return NULL;
}
}
list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
return &ft_wwn->se_wwn;
}
static void ft_del_wwn(struct se_wwn *wwn)
{
struct ft_lport_wwn *ft_wwn = container_of(wwn,
struct ft_lport_wwn, se_wwn);
pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
kfree(ft_wwn);
}
static ssize_t ft_wwn_show_attr_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
}
TF_WWN_ATTR_RO(ft, version);
static struct configfs_attribute *ft_wwn_attrs[] = {
&ft_wwn_version.attr,
NULL,
};
static char *ft_get_fabric_name(void)
{
return "fc";
}
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return tpg->index;
}
static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
}
static int ft_check_false(struct se_portal_group *se_tpg)
{
return 0;
}
static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
{
}
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
return tpg->index;
}
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
.tpg_get_default_depth = ft_get_default_depth,
.tpg_get_pr_transport_id = fc_get_pr_transport_id,
.tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
.tpg_check_demo_mode = ft_check_false,
.tpg_check_demo_mode_cache = ft_check_false,
.tpg_check_demo_mode_write_protect = ft_check_false,
.tpg_check_prod_mode_write_protect = ft_check_false,
.tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
.write_pending_status = ft_write_pending_status,
.set_default_node_attributes = ft_set_default_node_attr,
.get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
.aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = &ft_add_wwn,
.fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
.fabric_post_link = NULL,
.fabric_pre_unlink = NULL,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
.fabric_make_nodeacl = &ft_add_acl,
.fabric_drop_nodeacl = &ft_del_acl,
};
static int ft_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
if (IS_ERR(fabric)) {
pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
ft_nacl_base_attrs;
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* register the fabric for use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
target_fabric_configfs_free(fabric);
return -1;
}
/*
* Setup our local pointer to *fabric.
*/
ft_configfs = fabric;
return 0;
}
static void ft_deregister_configfs(void)
{
if (!ft_configfs)
return;
target_fabric_configfs_deregister(ft_configfs);
ft_configfs = NULL;
}
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
};
static int __init ft_init(void)
{
if (ft_register_configfs())
return -1;
if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
ft_deregister_configfs();
return -1;
}
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
}
static void __exit ft_exit(void)
{
blocking_notifier_chain_unregister(&fc_lport_notifier_head,
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
ft_deregister_configfs();
synchronize_rcu();
}
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
MODULE_LICENSE("GPL");
module_init(ft_init);
module_exit(ft_exit);

View file

@ -0,0 +1,380 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
*
* Copyright (c) 2007 Intel Corporation. All rights reserved.
* Copyright (c) 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2008 Mike Christie
* Copyright (c) 2009 Rising Tide, Inc.
* Copyright (c) 2009 Linux-iSCSI.org
* Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
/*
* Deliver read data back to initiator.
* XXX TBD handle resource problems later.
*/
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
void *from;
void *to = NULL;
if (cmd->aborted)
return 0;
if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
goto queue_status;
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(remaining && !se_cmd->t_data_sg);
if (remaining) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
use_sg = !(remaining % 4);
while (remaining) {
struct fc_seq *seq = cmd->seq;
if (!seq) {
pr_debug("%s: Command aborted, xid 0x%x\n",
__func__, ep->xid);
break;
}
if (!mem_len) {
sg = sg_next(sg);
mem_len = min((size_t)sg->length, remaining);
mem_off = sg->offset;
page = sg_page(sg);
}
if (!frame_len) {
/*
* If lport's has capability of Large Send Offload LSO)
* , then allow 'frame_len' to be as big as 'lso_max'
* if indicated transfer length is >= lport->lso_max
*/
frame_len = (lport->seq_offload) ? lport->lso_max :
cmd->sess->max_frame;
frame_len = min(frame_len, remaining);
fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
if (!fp)
return -ENOMEM;
to = fc_frame_payload_get(fp, 0);
fh_off = frame_off;
frame_off += frame_len;
/*
* Setup the frame's max payload which is used by base
* driver to indicate HW about max frame size, so that
* HW can do fragmentation appropriately based on
* "gso_max_size" of underline netdev.
*/
fr_max_payload(fp) = cmd->sess->max_frame;
}
tlen = min(mem_len, frame_len);
if (use_sg) {
off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
skb_shinfo(fp_skb(fp))->nr_frags,
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
from += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
to += tlen;
}
mem_off += tlen;
mem_len -= tlen;
frame_len -= tlen;
remaining -= tlen;
if (frame_len &&
(skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
continue;
if (!remaining)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
/*
* Go ahead and set TASK_SET_FULL status ignoring the
* rest of the DataIN, and immediately attempt to
* send the response via ft_queue_status() in order
* to notify the initiator that it should reduce it's
* per LUN queue_depth.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
break;
}
}
queue_status:
return ft_queue_status(se_cmd);
}
static void ft_execute_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
target_execute_cmd(&cmd->se_cmd);
}
/*
* Receive write data frame.
*/
void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
struct fc_frame_header *fh;
struct scatterlist *sg = NULL;
u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
size_t mem_len = 0;
size_t tlen;
struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
f_ctl = ntoh24(fh->fh_f_ctl);
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
BUG_ON(!ep);
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
* payload is expected to be copied directly to user buffers.
*/
buf = fc_frame_payload_get(fp, 1);
if (buf)
pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
se_cmd->t_data_sg, se_cmd->t_data_nents);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both
* situation (success and error).
*/
ft_invl_hw_context(cmd);
/*
* If "Sequence Initiative (TSI)" bit set in f_ctl, means last
* write data frame is received successfully where payload is
* posted directly to user buffer and only the last frame's
* header is posted in receive queue.
*
* If "Sequence Initiative (TSI)" bit is not set, means error
* condition w.r.t. DDP, hence drop the packet and let explict
* ABORTS from other end of exchange timer trigger the recovery.
*/
if (f_ctl & FC_FC_SEQ_INIT)
goto last_frame;
else
goto drop;
}
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
if (frame_len <= sizeof(*fh))
goto drop;
frame_len -= sizeof(*fh);
from = fc_frame_payload_get(fp, 0);
if (rel_off >= se_cmd->data_length)
goto drop;
if (frame_len + rel_off > se_cmd->data_length)
frame_len = se_cmd->data_length - rel_off;
/*
* Setup to use first mem list entry, unless no data.
*/
BUG_ON(frame_len && !se_cmd->t_data_sg);
if (frame_len) {
sg = se_cmd->t_data_sg;
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
sg = sg_next(sg);
mem_len = sg->length;
mem_off = sg->offset;
page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
mem_len = 0;
continue;
}
mem_off += rel_off;
mem_len -= rel_off;
rel_off = 0;
tlen = min(mem_len, frame_len);
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
to += mem_off & ~PAGE_MASK;
tlen = min(tlen, (size_t)(PAGE_SIZE -
(mem_off & ~PAGE_MASK)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
from += tlen;
frame_len -= tlen;
mem_off += tlen;
mem_len -= tlen;
cmd->write_data_len += tlen;
}
last_frame:
if (cmd->write_data_len == se_cmd->data_length) {
INIT_WORK(&cmd->work, ft_execute_work);
queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
}
drop:
fc_frame_free(fp);
}
/*
* Handle and cleanup any HW specific resources if
* received ABORTS, errors, timeouts.
*/
void ft_invl_hw_context(struct ft_cmd *cmd)
{
struct fc_seq *seq;
struct fc_exch *ep = NULL;
struct fc_lport *lport = NULL;
BUG_ON(!cmd);
seq = cmd->seq;
/* Cleanup the DDP context in HW if DDP was setup */
if (cmd->was_ddp_setup && seq) {
ep = fc_seq_exch(seq);
if (ep) {
lport = ep->lp;
if (lport && (ep->xid <= lport->lro_xid)) {
/*
* "ddp_done" trigger invalidation of HW
* specific DDP context
*/
cmd->write_data_len = lport->tt.ddp_done(lport,
ep->xid);
/*
* Resetting same variable to indicate HW's
* DDP context has been invalidated to avoid
* re_invalidation of same context (context is
* identified using ep->xid)
*/
cmd->was_ddp_setup = 0;
}
}
}
}

View file

@ -0,0 +1,504 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kref.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
static void ft_sess_delete_all(struct ft_tport *);
/*
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
int i;
tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
tpg = ft_lport_find_tpg(lport);
if (!tpg)
return NULL;
if (tport) {
tport->tpg = tpg;
tpg->tport = tport;
return tport;
}
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport)
return NULL;
tport->lport = lport;
tport->tpg = tpg;
tpg->tport = tport;
for (i = 0; i < FT_SESS_HASH_SIZE; i++)
INIT_HLIST_HEAD(&tport->hash[i]);
rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
return tport;
}
/*
* Delete a target local port.
* Caller holds ft_lport_lock.
*/
static void ft_tport_delete(struct ft_tport *tport)
{
struct fc_lport *lport;
struct ft_tpg *tpg;
ft_sess_delete_all(tport);
lport = tport->lport;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
tpg = tport->tpg;
if (tpg) {
tpg->tport = NULL;
tport->tpg = NULL;
}
kfree_rcu(tport, rcu);
}
/*
* Add local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
ft_tport_get(lport);
mutex_unlock(&ft_lport_lock);
}
/*
* Delete local port.
* Called thru fc_lport_iterate().
*/
void ft_lport_del(struct fc_lport *lport, void *arg)
{
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = lport->prov[FC_TYPE_FCP];
if (tport)
ft_tport_delete(tport);
mutex_unlock(&ft_lport_lock);
}
/*
* Notification of local port change from libfc.
* Create or delete local port and associated tport.
*/
int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
{
struct fc_lport *lport = arg;
switch (event) {
case FC_LPORT_EV_ADD:
ft_lport_add(lport, NULL);
break;
case FC_LPORT_EV_DEL:
ft_lport_del(lport, NULL);
break;
}
return NOTIFY_DONE;
}
/*
* Hash function for FC_IDs.
*/
static u32 ft_sess_hash(u32 port_id)
{
return hash_32(port_id, FT_SESS_HASH_BITS);
}
/*
* Find session in local port.
* Sessions and hash lists are RCU-protected.
* A reference is taken which must be eventually freed.
*/
static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
{
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
if (!tport)
goto out;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
pr_debug("port_id %x not found\n", port_id);
return NULL;
}
/*
* Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct ft_node_acl *acl)
{
struct ft_sess *sess;
struct hlist_head *head;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash)
if (sess->port_id == port_id)
return sess;
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
return NULL;
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
sess->se_sess->se_node_acl = &acl->se_node_acl;
sess->tport = tport;
sess->port_id = port_id;
kref_init(&sess->kref); /* ref for table entry */
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
return sess;
}
/*
* Unhash the session.
* Caller holds ft_lport_lock.
*/
static void ft_sess_unhash(struct ft_sess *sess)
{
struct ft_tport *tport = sess->tport;
hlist_del_rcu(&sess->hash);
BUG_ON(!tport->sess_count);
tport->sess_count--;
sess->port_id = -1;
sess->params = 0;
}
/*
* Delete session from hash.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
{
struct hlist_head *head;
struct ft_sess *sess;
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
ft_sess_unhash(sess);
return sess;
}
}
return NULL;
}
/*
* Delete all sessions from tport.
* Caller holds ft_lport_lock.
*/
static void ft_sess_delete_all(struct ft_tport *tport)
{
struct hlist_head *head;
struct ft_sess *sess;
for (head = tport->hash;
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
}
}
}
/*
* TCM ops for sessions.
*/
/*
* Determine whether session is allowed to be shutdown in the current context.
* Returns non-zero if the session should be shutdown.
*/
int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
pr_debug("port_id %x\n", sess->port_id);
return 1;
}
/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
void ft_sess_close(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
u32 port_id;
mutex_lock(&ft_lport_lock);
port_id = sess->port_id;
if (port_id == -1) {
mutex_unlock(&ft_lport_lock);
return;
}
pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
ft_sess_put(sess);
/* XXX Send LOGO or PRLO */
synchronize_rcu(); /* let transport deregister happen */
}
u32 ft_sess_get_index(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return sess->port_id; /* XXX TBD probably not what is needed */
}
u32 ft_sess_get_port_name(struct se_session *se_sess,
unsigned char *buf, u32 len)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
return ft_format_wwn(buf, len, sess->port_name);
}
/*
* libfc ops involving sessions.
*/
static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
struct ft_tport *tport;
struct ft_sess *sess;
struct ft_node_acl *acl;
u32 fcp_parm;
tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
acl = ft_acl_get(tport->tpg, rdata);
if (!acl)
goto not_target; /* no target for this remote */
if (!rspp)
goto fill;
if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
return FC_SPP_RESP_NO_PA;
/*
* If both target and initiator bits are off, the SPP is invalid.
*/
fcp_parm = ntohl(rspp->spp_params);
if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
return FC_SPP_RESP_INVL;
/*
* Create session (image pair) only if requested by
* EST_IMG_PAIR flag and if the requestor is an initiator.
*/
if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, acl);
if (!sess)
return FC_SPP_RESP_RES;
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
sess->port_name = rdata->ids.port_name;
sess->max_frame = rdata->maxframe_size;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* OR in our service parameters with other provider (initiator), if any.
*/
fill:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_RETRY;
spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
return FC_SPP_RESP_ACK;
not_target:
fcp_parm = ntohl(spp->spp_params);
fcp_parm &= ~FCP_SPPF_TARG_FCN;
spp->spp_params = htonl(fcp_parm);
return 0;
}
/**
* tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)
* @spp: response service parameter page
*
* Returns spp response code.
*/
static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp, struct fc_els_spp *spp)
{
int ret;
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
transport_deregister_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
void ft_sess_put(struct ft_sess *sess)
{
int sess_held = atomic_read(&sess->kref.refcount);
BUG_ON(!sess_held);
kref_put(&sess->kref, ft_sess_free);
}
static void ft_prlo(struct fc_rport_priv *rdata)
{
struct ft_sess *sess;
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;
}
sess = ft_sess_delete(tport, rdata->ids.port_id);
if (!sess) {
mutex_unlock(&ft_lport_lock);
return;
}
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(sess->se_sess);
ft_sess_put(sess); /* release from table */
rdata->prli_count--;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
/*
* Handle incoming FCP request.
* Caller has verified that the frame is type FCP.
*/
static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
}
ft_recv_req(sess, fp); /* must do ft_sess_put() */
}
/*
* Provider ops for libfc.
*/
struct fc4_prov ft_prov = {
.prli = ft_prli,
.prlo = ft_prlo,
.recv = ft_recv,
.module = THIS_MODULE,
};