Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,9 @@
#
# Makefile for the S/390 specific device drivers
#
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
zfcp_unit.o
obj-$(CONFIG_ZFCP) += zfcp.o

View file

@ -0,0 +1,579 @@
/*
* zfcp device driver
*
* Module interface and handling of zfcp data structures.
*
* Copyright IBM Corp. 2002, 2013
*/
/*
* Driver authors:
* Martin Peschke (originator of the driver)
* Raimund Schroeder
* Aron Zeh
* Wolfgang Taphorn
* Stefan Bader
* Heiko Carstens (kernel 2.6 port of the driver)
* Andreas Herrmann
* Maxim Shchetynin
* Volker Sameske
* Ralph Wuerthner
* Michael Loehr
* Swen Schillig
* Christof Schmitt
* Martin Petermann
* Sven Schuetz
* Steffen Maier
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
MODULE_DESCRIPTION("FCP HBA driver");
MODULE_LICENSE("GPL");
static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
unsigned long size)
{
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (!cdev)
return;
if (ccw_device_set_online(cdev))
goto out_ccw_device;
adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
goto out_ccw_device;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out_port;
flush_work(&port->rport_work);
zfcp_unit_add(port, lun);
put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
put_device(&cdev->dev);
return;
}
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
char *str, *str_saved;
char busid[ZFCP_BUS_ID_SIZE];
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
str_saved = kstrdup(devstr, GFP_KERNEL);
str = str_saved;
if (!str)
return;
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strncpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
kfree(str_saved);
zfcp_init_device_configure(busid, wwpn, lun);
return;
err_out:
kfree(str_saved);
pr_err("%s is not a valid SCSI device\n", devstr);
}
static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
sizeof(struct zfcp_fc_req));
if (!zfcp_fc_req_cache)
goto out_fc_cache;
zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
if (!zfcp_scsi_transport_template)
goto out_transport;
scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
retval = ccw_driver_register(&zfcp_ccw_driver);
if (retval) {
pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
goto out_ccw_register;
}
if (init_device)
zfcp_init_device_setup(init_device);
return 0;
out_ccw_register:
fc_release_transport(zfcp_scsi_transport_template);
out_transport:
kmem_cache_destroy(zfcp_fc_req_cache);
out_fc_cache:
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
return retval;
}
module_init(zfcp_module_init);
static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
fc_release_transport(zfcp_scsi_transport_template);
kmem_cache_destroy(zfcp_fc_req_cache);
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
/**
* zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
* @adapter: pointer to adapter to search for port
* @wwpn: wwpn to search for
*
* Returns: pointer to zfcp_port or NULL
*/
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
u64 wwpn)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return NULL;
}
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
adapter->pool.erp_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.erp_req)
return -ENOMEM;
adapter->pool.gid_pn_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.gid_pn_req)
return -ENOMEM;
adapter->pool.scsi_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_req)
return -ENOMEM;
adapter->pool.scsi_abort =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_abort)
return -ENOMEM;
adapter->pool.status_read_req =
mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
sizeof(struct zfcp_fsf_req));
if (!adapter->pool.status_read_req)
return -ENOMEM;
adapter->pool.qtcb_pool =
mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
adapter->pool.sr_data =
mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
return 0;
}
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
if (adapter->pool.erp_req)
mempool_destroy(adapter->pool.erp_req);
if (adapter->pool.scsi_req)
mempool_destroy(adapter->pool.scsi_req);
if (adapter->pool.scsi_abort)
mempool_destroy(adapter->pool.scsi_abort);
if (adapter->pool.qtcb_pool)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
if (adapter->pool.sr_data)
mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
*
* Returns: 0 on success, 1 otherwise
*
* if there are 16 or more status_read requests missing an adapter_reopen
* is triggered
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
} else
atomic_dec(&adapter->stat_miss);
return 0;
}
static void _zfcp_status_read_scheduler(struct work_struct *work)
{
zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
stat_work));
}
static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
{
struct zfcp_adapter *adapter =
container_of(sl, struct zfcp_adapter, service_level);
seq_printf(m, "zfcp: %s microcode level %x\n",
dev_name(&adapter->ccw_device->dev),
adapter->fsf_lic_version);
}
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
char name[TASK_COMM_LEN];
snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev));
adapter->work_queue = create_singlethread_workqueue(name);
if (adapter->work_queue)
return 0;
return -ENOMEM;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
{
if (adapter->work_queue)
destroy_workqueue(adapter->work_queue);
adapter->work_queue = NULL;
}
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
*
* Returns: struct zfcp_adapter*
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
*/
struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
if (!get_device(&ccw_device->dev))
return ERR_PTR(-ENODEV);
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
if (!adapter) {
put_device(&ccw_device->dev);
return ERR_PTR(-ENOMEM);
}
kref_init(&adapter->ref);
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
adapter->req_list = zfcp_reqlist_alloc();
if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
goto failed;
if (zfcp_setup_adapter_work_queue(adapter))
goto failed;
if (zfcp_fc_gs_setup(adapter))
goto failed;
rwlock_init(&adapter->port_list_lock);
INIT_LIST_HEAD(&adapter->port_list);
INIT_LIST_HEAD(&adapter->events.list);
INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
spin_lock_init(&adapter->events.list_lock);
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
if (zfcp_erp_thread_setup(adapter))
goto failed;
adapter->service_level.seq_print = zfcp_print_sl;
dev_set_drvdata(&ccw_device->dev, adapter);
if (sysfs_create_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs))
goto failed;
/* report size limit per scatter-gather segment */
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
zfcp_adapter_unregister(adapter);
return ERR_PTR(-ENOMEM);
}
void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
{
struct ccw_device *cdev = adapter->ccw_device;
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter);
zfcp_qdio_destroy(adapter->qdio);
zfcp_ccw_adapter_put(adapter); /* final put to release */
}
/**
* zfcp_adapter_release - remove the adapter from the resource list
* @ref: pointer to struct kref
* locks: adapter list write lock is assumed to be held by caller
*/
void zfcp_adapter_release(struct kref *ref)
{
struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
ref);
struct ccw_device *cdev = adapter->ccw_device;
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter);
put_device(&cdev->dev);
}
static void zfcp_port_release(struct device *dev)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
}
/**
* zfcp_port_enqueue - enqueue port to port list of adapter
* @adapter: adapter where remote port is added
* @wwpn: WWPN of the remote port to be enqueued
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval = -ENOMEM;
kref_get(&adapter->ref);
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
atomic_set(&port->units, 0);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
port->adapter = adapter;
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
port->dev.parent = &adapter->ccw_device->dev;
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
if (device_register(&port->dev)) {
put_device(&port->dev);
goto err_out;
}
write_lock_irq(&adapter->port_list_lock);
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port;
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
}
/**
* zfcp_sg_free_table - free memory used by scatterlists
* @sg: pointer to scatterlist
* @count: number of scatterlist which are to be free'ed
* the scatterlist are expected to reference pages always
*/
void zfcp_sg_free_table(struct scatterlist *sg, int count)
{
int i;
for (i = 0; i < count; i++, sg++)
if (sg)
free_page((unsigned long) sg_virt(sg));
else
break;
}
/**
* zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
* @sg: pointer to struct scatterlist
* @count: number of scatterlists which should be assigned with buffers
* of size page
*
* Returns: 0 on success, -ENOMEM otherwise
*/
int zfcp_sg_setup_table(struct scatterlist *sg, int count)
{
void *addr;
int i;
sg_init_table(sg, count);
for (i = 0; i < count; i++, sg++) {
addr = (void *) get_zeroed_page(GFP_KERNEL);
if (!addr) {
zfcp_sg_free_table(sg, i);
return -ENOMEM;
}
sg_set_buf(sg, addr, PAGE_SIZE);
}
return 0;
}

View file

@ -0,0 +1,318 @@
/*
* zfcp device driver
*
* Registration and callback for the s390 common I/O layer.
*
* Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#define ZFCP_MODEL_PRIV 0x4
static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
unsigned long flags;
spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
adapter = dev_get_drvdata(&cdev->dev);
if (adapter)
kref_get(&adapter->ref);
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
return adapter;
}
void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
{
unsigned long flags;
spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
kref_put(&adapter->ref, zfcp_adapter_release);
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}
/**
* zfcp_ccw_activate - activate adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
* @clear: Status flags to clear.
* @tag: s390dbf trace record tag
*/
static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_clear_adapter_status(adapter, clear);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
tag);
zfcp_erp_wait(adapter);
flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
return 0;
}
static struct ccw_device_id zfcp_ccw_device_id[] = {
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
{ CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
{},
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
/**
* zfcp_ccw_probe - probe function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer for each FCP
* device found on the current system. This is only a stub to make cio
* work: To only allocate adapter resources for devices actually used,
* the allocation is deferred to the first call to ccw_set_online.
*/
static int zfcp_ccw_probe(struct ccw_device *cdev)
{
return 0;
}
/**
* zfcp_ccw_remove - remove function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and removes an adapter
* from the system. Task of this function is to get rid of all units and
* ports that belong to this adapter. And in addition all resources of this
* adapter will be freed too.
*/
static void zfcp_ccw_remove(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
struct zfcp_unit *unit, *u;
LIST_HEAD(unit_remove_lh);
LIST_HEAD(port_remove_lh);
ccw_device_set_offline(cdev);
adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return;
write_lock_irq(&adapter->port_list_lock);
list_for_each_entry_safe(port, p, &adapter->port_list, list) {
write_lock(&port->unit_list_lock);
list_for_each_entry_safe(unit, u, &port->unit_list, list)
list_move(&unit->list, &unit_remove_lh);
write_unlock(&port->unit_list_lock);
list_move(&port->list, &port_remove_lh);
}
write_unlock_irq(&adapter->port_list_lock);
zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
device_unregister(&unit->dev);
list_for_each_entry_safe(port, p, &port_remove_lh, list)
device_unregister(&port->dev);
zfcp_adapter_unregister(adapter);
}
/**
* zfcp_ccw_set_online - set_online function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an
* adapter into state online. The first call will allocate all
* adapter resources that will be retained until the device is removed
* via zfcp_ccw_remove.
*
* Setting an fcp device online means that it will be registered with
* the SCSI stack, that the QDIO queues will be set up and that the
* adapter will be opened.
*/
static int zfcp_ccw_set_online(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter) {
adapter = zfcp_adapter_enqueue(cdev);
if (IS_ERR(adapter)) {
dev_err(&cdev->dev,
"Setting up data structures for the "
"FCP adapter failed\n");
return PTR_ERR(adapter);
}
kref_get(&adapter->ref);
}
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_ccw_activate(cdev, 0, "ccsonl1");
/* scan for remote ports
either at the end of any successful adapter recovery
or only after the adapter recovery for setting a device online */
zfcp_fc_inverse_conditional_port_scan(adapter);
flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
* @set: Status flags to set.
* @tag: s390dbf trace record tag
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_set_adapter_status(adapter, set);
zfcp_erp_adapter_shutdown(adapter, 0, tag);
zfcp_erp_wait(adapter);
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_set_offline - set_offline function of zfcp driver
* @cdev: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
static int zfcp_ccw_set_offline(struct ccw_device *cdev)
{
return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
}
/**
* zfcp_ccw_notify - ccw notify function
* @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
*
* This function gets called by the common i/o layer if an adapter has gone
* or reappeared.
*/
static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 1;
switch (event) {
case CIO_GONE:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
zfcp_dbf_hba_basic("ccnigo1", adapter);
break;
}
dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
case CIO_NO_PATH:
dev_warn(&cdev->dev,
"The CHPID for the FCP device is offline\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
zfcp_dbf_hba_basic("ccniop1", adapter);
break;
}
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccnoti4");
break;
case CIO_BOXED:
dev_warn(&cdev->dev, "The FCP device did not respond within "
"the specified time\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
break;
}
zfcp_ccw_adapter_put(adapter);
return 1;
}
/**
* zfcp_ccw_shutdown - handle shutdown from cio
* @cdev: device for adapter to shutdown.
*/
static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return;
zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
zfcp_ccw_adapter_put(adapter);
}
static int zfcp_ccw_suspend(struct ccw_device *cdev)
{
zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
return 0;
}
static int zfcp_ccw_thaw(struct ccw_device *cdev)
{
/* trace records for thaw and final shutdown during suspend
can only be found in system dump until the end of suspend
but not after resume because it's based on the memory image
right after the very first suspend (freeze) callback */
zfcp_ccw_activate(cdev, 0, "ccthaw1");
return 0;
}
static int zfcp_ccw_resume(struct ccw_device *cdev)
{
zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
return 0;
}
struct ccw_driver zfcp_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "zfcp",
},
.ids = zfcp_ccw_device_id,
.probe = zfcp_ccw_probe,
.remove = zfcp_ccw_remove,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
.freeze = zfcp_ccw_suspend,
.thaw = zfcp_ccw_thaw,
.restore = zfcp_ccw_resume,
};

View file

@ -0,0 +1,545 @@
/*
* zfcp device driver
*
* Debug traces for zfcp.
*
* Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include "zfcp_dbf.h"
#include "zfcp_ext.h"
#include "zfcp_fc.h"
static u32 dbfsize = 4;
module_param(dbfsize, uint, 0400);
MODULE_PARM_DESC(dbfsize,
"number of pages for each debug feature area (default 4)");
static u32 dbflevel = 3;
module_param(dbflevel, uint, 0400);
MODULE_PARM_DESC(dbflevel,
"log level for each debug feature area "
"(default 3, range 0..6)");
static inline unsigned int zfcp_dbf_plen(unsigned int offset)
{
return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
}
static inline
void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
u64 req_id)
{
struct zfcp_dbf_pay *pl = &dbf->pay_buf;
u16 offset = 0, rec_length;
spin_lock(&dbf->pay_lock);
memset(pl, 0, sizeof(*pl));
pl->fsf_req_id = req_id;
memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
while (offset < length) {
rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
(u16) (length - offset));
memcpy(pl->data, data + offset, rec_length);
debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
offset += rec_length;
pl->counter++;
}
spin_unlock(&dbf->pay_lock);
}
/**
* zfcp_dbf_hba_fsf_res - trace event for fsf responses
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request for which a response was received
*/
void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
struct fsf_qtcb_header *q_head = &req->qtcb->header;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_RES;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = req->fsf_command;
rec->fsf_seq_no = req->seq_no;
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
if (req->fsf_command != FSF_QTCB_FCP_CMND) {
rec->pl_len = q_head->log_length;
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
rec->pl_len, "fsf_res", req->req_id);
}
debug_event(dbf->hba, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request providing the unsolicited status
*/
void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_status_read_buffer *srb = req->data;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_USS;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = req->fsf_command;
if (!srb)
goto log;
rec->u.uss.status_type = srb->status_type;
rec->u.uss.status_subtype = srb->status_subtype;
rec->u.uss.d_id = ntoh24(srb->d_id);
rec->u.uss.lun = srb->fcp_lun;
memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
sizeof(rec->u.uss.queue_designator));
/* status read buffer payload length */
rec->pl_len = (!srb->length) ? 0 : srb->length -
offsetof(struct fsf_status_read_buffer, payload);
if (rec->pl_len)
zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
"fsf_uss", req->req_id);
log:
debug_event(dbf->hba, 2, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_bit_err - trace event for bit error conditions
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request which caused the bit_error condition
*/
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
struct fsf_status_read_buffer *sr_buf = req->data;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BIT;
rec->fsf_req_id = req->req_id;
rec->fsf_req_status = req->status;
rec->fsf_cmd = req->fsf_command;
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload));
debug_event(dbf->hba, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
/**
* zfcp_dbf_hba_def_err - trace event for deferred error messages
* @adapter: pointer to struct zfcp_adapter
* @req_id: request id which caused the deferred error message
* @scount: number of sbals incl. the signaling sbal
* @pl: array of all involved sbals
*/
void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
void **pl)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_pay *payload = &dbf->pay_buf;
unsigned long flags;
u16 length;
if (!pl)
return;
spin_lock_irqsave(&dbf->pay_lock, flags);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, "def_err", 7);
payload->fsf_req_id = req_id;
payload->counter = 0;
length = min((u16)sizeof(struct qdio_buffer),
(u16)ZFCP_DBF_PAY_MAX_REC);
while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
payload->counter++;
}
spin_unlock_irqrestore(&dbf->pay_lock, flags);
}
/**
* zfcp_dbf_hba_basic - trace event for basic adapter events
* @adapter: pointer to struct zfcp_adapter
*/
void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BASIC;
debug_event(dbf->hba, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
{
rec->adapter_status = atomic_read(&adapter->status);
if (port) {
rec->port_status = atomic_read(&port->status);
rec->wwpn = port->wwpn;
rec->d_id = port->d_id;
}
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
}
}
/**
* zfcp_dbf_rec_trig - trace event related to triggered recovery
* @tag: identifier for event
* @adapter: adapter on which the erp_action should run
* @port: remote port involved in the erp_action
* @sdev: scsi device involved in the erp_action
* @want: wanted erp_action
* @need: required erp_action
*
* The adapter->erp_lock has to be held.
*/
void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
struct zfcp_port *port, struct scsi_device *sdev,
u8 want, u8 need)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
struct list_head *entry;
unsigned long flags;
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = ZFCP_DBF_REC_TRIG;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
zfcp_dbf_set_common(rec, adapter, port, sdev);
list_for_each(entry, &adapter->erp_ready_head)
rec->u.trig.ready++;
list_for_each(entry, &adapter->erp_running_head)
rec->u.trig.running++;
rec->u.trig.want = want;
rec->u.trig.need = need;
debug_event(dbf->rec, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
/**
* zfcp_dbf_rec_run - trace event related to running recovery
* @tag: identifier for event
* @erp: erp_action running
*/
void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = ZFCP_DBF_REC_RUN;
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
rec->u.run.fsf_req_id = erp->fsf_req_id;
rec->u.run.rec_status = erp->status;
rec->u.run.rec_step = erp->step;
rec->u.run.rec_action = erp->action;
if (erp->sdev)
rec->u.run.rec_count =
atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
else if (erp->port)
rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
debug_event(dbf->rec, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
static inline
void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
u64 req_id, u32 d_id)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
memcpy(rec->payload, data, rec_len);
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
debug_event(dbf->san, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
/**
* zfcp_dbf_san_req - trace event for issued SAN request
* @tag: identifier for event
* @fsf_req: request containing issued CT data
* d_id: destination ID
*/
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
fsf->req_id, d_id);
}
/**
* zfcp_dbf_san_res - trace event for received SAN request
* @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
fsf->req_id, 0);
}
/**
* zfcp_dbf_san_in_els - trace event for incoming ELS
* @tag: identifier for event
* @fsf_req: request containing issued CT data
*/
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
{
struct zfcp_dbf *dbf = fsf->adapter->dbf;
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
fsf->req_id, ntoh24(srb->d_id));
}
/**
* zfcp_dbf_scsi - trace event for scsi commands
* @tag: identifier for event
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
*/
void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
struct fcp_resp_with_ext *fcp_rsp;
struct fcp_resp_rsp_info *fcp_rsp_info;
unsigned long flags;
spin_lock_irqsave(&dbf->scsi_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_SCSI_CMND;
rec->scsi_result = sc->result;
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
if (fsf) {
rec->fsf_req_id = fsf->req_id;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
(u16)ZFCP_DBF_PAY_MAX_REC);
zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
"fcp_sns", fsf->req_id);
}
}
debug_event(dbf->scsi, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;
d = debug_register(name, size, 1, rec_size);
if (!d)
return NULL;
debug_register_view(d, &debug_hex_ascii_view);
debug_set_level(d, dbflevel);
return d;
}
static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
{
if (!dbf)
return;
debug_unregister(dbf->scsi);
debug_unregister(dbf->san);
debug_unregister(dbf->hba);
debug_unregister(dbf->pay);
debug_unregister(dbf->rec);
kfree(dbf);
}
/**
* zfcp_adapter_debug_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
{
char name[DEBUG_MAX_NAME_LEN];
struct zfcp_dbf *dbf;
dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
if (!dbf)
return -ENOMEM;
spin_lock_init(&dbf->pay_lock);
spin_lock_init(&dbf->hba_lock);
spin_lock_init(&dbf->san_lock);
spin_lock_init(&dbf->scsi_lock);
spin_lock_init(&dbf->rec_lock);
/* debug feature area which records recovery activity */
sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
if (!dbf->rec)
goto err_out;
/* debug feature area which records HBA (FSF and QDIO) conditions */
sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
if (!dbf->hba)
goto err_out;
/* debug feature area which records payload info */
sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
if (!dbf->pay)
goto err_out;
/* debug feature area which records SAN command failures and recovery */
sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
if (!dbf->san)
goto err_out;
/* debug feature area which records SCSI command failures and recovery */
sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
if (!dbf->scsi)
goto err_out;
adapter->dbf = dbf;
return 0;
err_out:
zfcp_dbf_unregister(dbf);
return -ENOMEM;
}
/**
* zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
{
struct zfcp_dbf *dbf = adapter->dbf;
adapter->dbf = NULL;
zfcp_dbf_unregister(dbf);
}

View file

@ -0,0 +1,383 @@
/*
* zfcp device driver
* debug feature declarations
*
* Copyright IBM Corp. 2008, 2010
*/
#ifndef ZFCP_DBF_H
#define ZFCP_DBF_H
#include <scsi/fc/fc_fcp.h>
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#include "zfcp_def.h"
#define ZFCP_DBF_TAG_LEN 7
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
/**
* struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
* @ready: number of ready recovery actions
* @running: number of running recovery actions
* @want: wanted recovery action
* @need: needed recovery action
*/
struct zfcp_dbf_rec_trigger {
u32 ready;
u32 running;
u8 want;
u8 need;
} __packed;
/**
* struct zfcp_dbf_rec_running - trace record for running recovery
* @fsf_req_id: request id for fsf requests
* @rec_status: status of the fsf request
* @rec_step: current step of the recovery action
* rec_count: recovery counter
*/
struct zfcp_dbf_rec_running {
u64 fsf_req_id;
u32 rec_status;
u16 rec_step;
u8 rec_action;
u8 rec_count;
} __packed;
/**
* enum zfcp_dbf_rec_id - recovery trace record id
* @ZFCP_DBF_REC_TRIG: triggered recovery identifier
* @ZFCP_DBF_REC_RUN: running recovery identifier
*/
enum zfcp_dbf_rec_id {
ZFCP_DBF_REC_TRIG = 1,
ZFCP_DBF_REC_RUN = 2,
};
/**
* struct zfcp_dbf_rec - trace record for error recovery actions
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @lun: logical unit number
* @wwpn: word wide port number
* @d_id: destination ID
* @adapter_status: current status of the adapter
* @port_status: current status of the port
* @lun_status: current status of the lun
* @u.trig: structure zfcp_dbf_rec_trigger
* @u.run: structure zfcp_dbf_rec_running
*/
struct zfcp_dbf_rec {
u8 id;
char tag[ZFCP_DBF_TAG_LEN];
u64 lun;
u64 wwpn;
u32 d_id;
u32 adapter_status;
u32 port_status;
u32 lun_status;
union {
struct zfcp_dbf_rec_trigger trig;
struct zfcp_dbf_rec_running run;
} u;
} __packed;
/**
* enum zfcp_dbf_san_id - SAN trace record identifier
* @ZFCP_DBF_SAN_REQ: request trace record id
* @ZFCP_DBF_SAN_RES: response trace record id
* @ZFCP_DBF_SAN_ELS: extended link service record id
*/
enum zfcp_dbf_san_id {
ZFCP_DBF_SAN_REQ = 1,
ZFCP_DBF_SAN_RES = 2,
ZFCP_DBF_SAN_ELS = 3,
};
/** struct zfcp_dbf_san - trace record for SAN requests and responses
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @fsf_req_id: request id for fsf requests
* @payload: unformatted information related to request/response
* @d_id: destination id
*/
struct zfcp_dbf_san {
u8 id;
char tag[ZFCP_DBF_TAG_LEN];
u64 fsf_req_id;
u32 d_id;
#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
} __packed;
/**
* struct zfcp_dbf_hba_res - trace record for hba responses
* @req_issued: timestamp when request was issued
* @prot_status: protocol status
* @prot_status_qual: protocol status qualifier
* @fsf_status: fsf status
* @fsf_status_qual: fsf status qualifier
*/
struct zfcp_dbf_hba_res {
u64 req_issued;
u32 prot_status;
u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
} __packed;
/**
* struct zfcp_dbf_hba_uss - trace record for unsolicited status
* @status_type: type of unsolicited status
* @status_subtype: subtype of unsolicited status
* @d_id: destination ID
* @lun: logical unit number
* @queue_designator: queue designator
*/
struct zfcp_dbf_hba_uss {
u32 status_type;
u32 status_subtype;
u32 d_id;
u64 lun;
u64 queue_designator;
} __packed;
/**
* enum zfcp_dbf_hba_id - HBA trace record identifier
* @ZFCP_DBF_HBA_RES: response trace record
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
* @ZFCP_DBF_HBA_BIT: bit error trace record
*/
enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
ZFCP_DBF_HBA_BASIC = 4,
};
/**
* struct zfcp_dbf_hba - common trace record for HBA records
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @fsf_req_id: request id for fsf requests
* @fsf_req_status: status of fsf request
* @fsf_cmd: fsf command
* @fsf_seq_no: fsf sequence number
* @pl_len: length of payload stored as zfcp_dbf_pay
* @u: record type specific data
*/
struct zfcp_dbf_hba {
u8 id;
char tag[ZFCP_DBF_TAG_LEN];
u64 fsf_req_id;
u32 fsf_req_status;
u32 fsf_cmd;
u32 fsf_seq_no;
u16 pl_len;
union {
struct zfcp_dbf_hba_res res;
struct zfcp_dbf_hba_uss uss;
struct fsf_bit_error_payload be;
} u;
} __packed;
/**
* enum zfcp_dbf_scsi_id - scsi trace record identifier
* @ZFCP_DBF_SCSI_CMND: scsi command trace record
*/
enum zfcp_dbf_scsi_id {
ZFCP_DBF_SCSI_CMND = 1,
};
/**
* struct zfcp_dbf_scsi - common trace record for SCSI records
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
* @scsi_lun: scsi device logical unit number
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
* @fcp_rsp_info: FCP response info
* @scsi_opcode: scsi opcode
* @fsf_req_id: request id of fsf request
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
*/
struct zfcp_dbf_scsi {
u8 id;
char tag[ZFCP_DBF_TAG_LEN];
u32 scsi_id;
u32 scsi_lun;
u32 scsi_result;
u8 scsi_retries;
u8 scsi_allowed;
u8 fcp_rsp_info;
#define ZFCP_DBF_SCSI_OPCODE 16
u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
u64 fsf_req_id;
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
} __packed;
/**
* struct zfcp_dbf_pay - trace record for unformatted payload information
* @area: area this record is originated from
* @counter: ascending record number
* @fsf_req_id: request id of fsf request
* @data: unformatted data
*/
struct zfcp_dbf_pay {
u8 counter;
char area[ZFCP_DBF_TAG_LEN];
u64 fsf_req_id;
#define ZFCP_DBF_PAY_MAX_REC 0x100
char data[ZFCP_DBF_PAY_MAX_REC];
} __packed;
/**
* struct zfcp_dbf - main dbf trace structure
* @pay: reference to payload trace area
* @rec: reference to recovery trace area
* @hba: reference to hba trace area
* @san: reference to san trace area
* @scsi: reference to scsi trace area
* @pay_lock: lock protecting payload trace buffer
* @rec_lock: lock protecting recovery trace buffer
* @hba_lock: lock protecting hba trace buffer
* @san_lock: lock protecting san trace buffer
* @scsi_lock: lock protecting scsi trace buffer
* @pay_buf: pre-allocated buffer for payload
* @rec_buf: pre-allocated buffer for recovery
* @hba_buf: pre-allocated buffer for hba
* @san_buf: pre-allocated buffer for san
* @scsi_buf: pre-allocated buffer for scsi
*/
struct zfcp_dbf {
debug_info_t *pay;
debug_info_t *rec;
debug_info_t *hba;
debug_info_t *san;
debug_info_t *scsi;
spinlock_t pay_lock;
spinlock_t rec_lock;
spinlock_t hba_lock;
spinlock_t san_lock;
spinlock_t scsi_lock;
struct zfcp_dbf_pay pay_buf;
struct zfcp_dbf_rec rec_buf;
struct zfcp_dbf_hba hba_buf;
struct zfcp_dbf_san san_buf;
struct zfcp_dbf_scsi scsi_buf;
};
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
if (debug_level_enabled(req->adapter->dbf->hba, level))
zfcp_dbf_hba_fsf_res(tag, req);
}
/**
* zfcp_dbf_hba_fsf_response - trace event for request completion
* @req: request that has been completed
*/
static inline
void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
} else if (qtcb->header.log_length) {
zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
} else {
zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
}
}
static inline
void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *)
scmd->device->host->hostdata[0];
if (debug_level_enabled(adapter->dbf->scsi, level))
zfcp_dbf_scsi(tag, scmd, req);
}
/**
* zfcp_dbf_scsi_result - trace event for SCSI command completion
* @scmd: SCSI command pointer
* @req: FSF request used to issue SCSI command
*/
static inline
void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
{
if (scmd->result != 0)
_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
else if (scmd->retries > 0)
_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
else
_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
}
/**
* zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
* @scmd: SCSI command pointer
*/
static inline
void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
{
_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
}
/**
* zfcp_dbf_scsi_abort - trace event for SCSI command abort
* @tag: tag indicating success or failure of abort operation
* @scmd: SCSI command to be aborted
* @fsf_req: request containing abort (might be NULL)
*/
static inline
void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
struct zfcp_fsf_req *fsf_req)
{
_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
}
/**
* zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
* @tag: tag indicating success or failure of reset operation
* @scmnd: SCSI command which caused this error recovery
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
if (flag == FCP_TMF_TGT_RESET)
memcpy(tmp_tag, "tr_", 3);
else
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
}
#endif /* ZFCP_DBF_H */

View file

@ -0,0 +1,322 @@
/*
* zfcp device driver
*
* Global definitions for the zfcp device driver.
*
* Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_DEF_H
#define ZFCP_DEF_H
/*************************** INCLUDES *****************************************/
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/syscalls.h>
#include <linux/scatterlist.h>
#include <linux/ioctl.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_fsf.h"
#include "zfcp_fc.h"
#include "zfcp_qdio.h"
struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
/* timeout value for "default timer" for fsf requests */
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
/*
* Note, the leftmost status byte is common among adapter, port
* and unit
*/
#define ZFCP_COMMON_FLAGS 0xfff00000
/* common status bits */
#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
#define ZFCP_STATUS_COMMON_OPEN 0x04000000
#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
#define ZFCP_STATUS_COMMON_NOESC 0x00200000
/* adapter status */
#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
/************************* STRUCTURE DEFINITIONS *****************************/
struct zfcp_fsf_req;
/* holds various memory pools of an adapter */
struct zfcp_adapter_mempool {
mempool_t *erp_req;
mempool_t *gid_pn_req;
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
mempool_t *sr_data;
mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
struct zfcp_erp_action {
struct list_head list;
int action; /* requested action code */
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct scsi_device *sdev;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
unsigned long fsf_req_id;
struct timer_list timer;
};
struct fsf_latency_record {
u32 min;
u32 max;
u64 sum;
};
struct latency_cont {
struct fsf_latency_record channel;
struct fsf_latency_record fabric;
u64 counter;
};
struct zfcp_latencies {
struct latency_cont read;
struct latency_cont write;
struct latency_cont cmd;
spinlock_t lock;
};
struct zfcp_adapter {
struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
struct zfcp_qdio *qdio;
u32 hydra_version; /* Hydra version */
u32 fsf_lic_version;
u32 adapter_features; /* FCP channel features */
u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list; /* remote port list */
rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
adapter/devices */
wait_queue_head_t erp_ready_wq;
struct list_head erp_running_head;
rwlock_t erp_lock;
wait_queue_head_t erp_done_wqh;
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
u32 erp_total_count; /* total nr of enqueued erp
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct task_struct *erp_thread;
struct zfcp_fc_wka_ports *gs; /* generic services */
struct zfcp_dbf *dbf; /* debug traces */
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
};
struct zfcp_port {
struct device dev;
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list; /* head of logical unit list */
rwlock_t unit_list_lock; /* unit list lock */
atomic_t units; /* zfcp_unit count */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
struct work_struct gid_pn_work;
struct work_struct test_link_work;
struct work_struct rport_work;
enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
unsigned int starget_id;
};
/**
* struct zfcp_unit - LUN configured via zfcp sysfs
* @dev: struct device for sysfs representation and reference counting
* @list: entry in LUN/unit list per zfcp_port
* @port: reference to zfcp_port where this LUN is configured
* @fcp_lun: 64 bit LUN value
* @scsi_work: for running scsi_scan_target
*
* This is the representation of a LUN that has been configured for
* usage. The main data here is the 64 bit LUN value, data for
* running I/O and recovery is in struct zfcp_scsi_dev.
*/
struct zfcp_unit {
struct device dev;
struct list_head list;
struct zfcp_port *port;
u64 fcp_lun;
struct work_struct scsi_work;
};
/**
* struct zfcp_scsi_dev - zfcp data per SCSI device
* @status: zfcp internal status flags
* @lun_handle: handle from "open lun" for issuing FSF requests
* @erp_action: zfcp erp data for opening and recovering this LUN
* @erp_counter: zfcp erp counter for this LUN
* @latencies: FSF channel and fabric latencies
* @port: zfcp_port where this LUN belongs to
*/
struct zfcp_scsi_dev {
atomic_t status;
u32 lun_handle;
struct zfcp_erp_action erp_action;
atomic_t erp_counter;
struct zfcp_latencies latencies;
struct zfcp_port *port;
};
/**
* sdev_to_zfcp - Access zfcp LUN data for SCSI device
* @sdev: scsi_device where to get the zfcp_scsi_dev pointer
*/
static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
{
return scsi_transport_device_data(sdev);
}
/**
* zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
* @sdev: SCSI device where to get the LUN from
*/
static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
{
u64 fcp_lun;
int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
return fcp_lun;
}
/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
* @req_id: unique request ID
* @adapter: adapter this request belongs to
* @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
* @fsf_command: FSF command issued
* @qtcb: associated QTCB
* @seq_no: sequence number of this request
* @data: private data
* @timer: timer data of this request
* @erp_action: reference to erp action if request issued on behalf of ERP
* @pool: reference to memory pool if used for this request
* @issued: time when request was send (STCK)
* @handler: handler which should be called to process response
*/
struct zfcp_fsf_req {
struct list_head list;
unsigned long req_id;
struct zfcp_adapter *adapter;
struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
u32 fsf_command;
struct fsf_qtcb *qtcb;
u32 seq_no;
void *data;
struct timer_list timer;
struct zfcp_erp_action *erp_action;
mempool_t *pool;
unsigned long long issued;
void (*handler)(struct zfcp_fsf_req *);
};
static inline
int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
{
return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
}
#endif /* ZFCP_DEF_H */

1573
drivers/s390/scsi/zfcp_erp.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,159 @@
/*
* zfcp device driver
*
* External function declarations.
*
* Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_EXT_H
#define ZFCP_EXT_H
#include <linux/types.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_def.h"
#include "zfcp_fc.h"
/* zfcp_aux.c */
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_adapter_release(struct kref *);
extern void zfcp_adapter_unregister(struct zfcp_adapter *);
/* zfcp_ccw.c */
extern struct ccw_driver zfcp_ccw_driver;
extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
/* zfcp_dbf.c */
extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
enum fc_host_event_code event_code, u32);
extern void zfcp_fc_post_event(struct work_struct *);
extern void zfcp_fc_scan_ports(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_fc_port_did_lookup(struct work_struct *);
extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
extern void zfcp_fc_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
/* zfcp_fsf.c */
extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_config *);
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
mempool_t *, unsigned int);
extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
/* zfcp_scsi.c */
extern struct scsi_transport_template *zfcp_scsi_transport_template;
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
/* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[];
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
/* zfcp_unit.c */
extern int zfcp_unit_add(struct zfcp_port *, u64);
extern int zfcp_unit_remove(struct zfcp_port *, u64);
extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
#endif /* ZFCP_EXT_H */

991
drivers/s390/scsi/zfcp_fc.c Normal file
View file

@ -0,0 +1,991 @@
/*
* zfcp device driver
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
struct kmem_cache *zfcp_fc_req_cache;
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
[ELS_ADDR_FMT_DOM] = 0xFF0000,
[ELS_ADDR_FMT_FAB] = 0x000000,
};
static bool no_auto_port_rescan;
module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (!no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
*/
void zfcp_fc_post_event(struct work_struct *work)
{
struct zfcp_fc_event *event = NULL, *tmp = NULL;
LIST_HEAD(tmp_lh);
struct zfcp_fc_events *events = container_of(work,
struct zfcp_fc_events, work);
struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
events);
spin_lock_bh(&events->list_lock);
list_splice_init(&events->list, &tmp_lh);
spin_unlock_bh(&events->list_lock);
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
event->code, event->data);
list_del(&event->list);
kfree(event);
}
}
/**
* zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
* @adapter: The adapter where to enqueue the event
* @event_code: The event code (as defined in fc_host_event_code in
* scsi_transport_fc.h)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
if (!event)
return;
event->code = event_code;
event->data = event_data;
spin_lock(&adapter->events.list_lock);
list_add_tail(&event->list, &adapter->events.list);
spin_unlock(&adapter->events.list_lock);
queue_work(adapter->work_queue, &adapter->events.work);
}
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
wait_event(wka_port->completion_wq,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
return -EIO;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_fc_wka_port *wka_port =
container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
{
if (!gs)
return;
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fc_els_rscn_page *page)
{
unsigned long flags;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
unsigned int afmt;
head = (struct fc_els_rscn *) status_buffer->payload.data;
page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
page);
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer;
struct fc_els_flogi *plogi;
status_buffer = (struct fsf_status_read_buffer *) req->data;
plogi = (struct fc_els_flogi *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fc_els_logo *logo =
(struct fc_els_logo *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
}
/**
* zfcp_fc_incoming_els - handle incoming ELS
* @fsf_req - request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_dbf_san_in_els("fciels1", fsf_req);
if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
if (ct_els->status)
return;
if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
/* looks like a valid d_id */
ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
{
complete(data);
}
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
ct_hdr->ct_rev = FC_CT_REV;
ct_hdr->ct_fs_type = FC_FST_DIR;
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
ct_hdr->ct_cmd = cmd;
ct_hdr->ct_mr_size = mr_size / 4;
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
fc_req->ct_els.port = port;
fc_req->ct_els.handler = zfcp_fc_complete;
fc_req->ct_els.handler_data = &completion;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
* zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
int ret;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
goto out;
}
if (!port->d_id) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
goto out;
}
zfcp_erp_port_reopen(port, 0, "fcgpn_3");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
* @port: The zfcp_port to lookup the d_id for.
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
put_device(&port->dev);
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
* @plogi: plogi payload
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
if (plogi->fl_wwpn != port->wwpn) {
port->d_id = 0;
dev_warn(&port->adapter->ccw_device->dev,
"A port opened with WWPN 0x%016Lx returned data that "
"identifies it as WWPN 0x%016Lx\n",
(unsigned long long) port->wwpn,
(unsigned long long) plogi->fl_wwpn);
return;
}
port->wwnn = plogi->fl_wwnn;
port->maxframe_size = plogi->fl_csp.sp_bb_data;
if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS1;
if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS2;
if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS3;
if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS4;
}
static void zfcp_fc_adisc_handler(void *data)
{
struct zfcp_fc_req *fc_req = data;
struct zfcp_port *port = fc_req->ct_els.port;
struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
goto out;
}
if (!port->wwnn)
port->wwnn = adisc_resp->adisc_wwnn;
if ((port->wwpn != adisc_resp->adisc_wwpn) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2");
goto out;
}
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
fc_req->ct_els.port = port;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
fc_req->ct_els.handler = zfcp_fc_adisc_handler;
fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
{
struct zfcp_port *port =
container_of(work, struct zfcp_port, test_link_work);
int retval;
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
put_device(&port->dev);
}
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return NULL;
if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
sizeof(struct zfcp_fc_gpn_ft_req));
return fc_req;
}
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
return ret;
}
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list))
return;
list_move_tail(&port->list, lh);
}
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
unsigned long flags;
LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
if (hdr->ct_reason == FC_BA_RJT_UNABLE)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
hdr->ct_mr_size);
return -E2BIG;
}
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
last = acc->fp_flags & FC_NS_FID_LAST;
d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
continue;
port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1");
else if (PTR_ERR(port) != -EEXIST)
ret = PTR_ERR(port);
}
zfcp_erp_wait(adapter);
write_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
zfcp_fc_validate_port(port, &remove_lh);
write_unlock_irqrestore(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2");
device_unregister(&port->dev);
}
return ret;
}
/**
* zfcp_fc_scan_ports - scan remote ports and attach new ports
* @work: reference to scheduled work
*/
void zfcp_fc_scan_ports(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
fc_req = zfcp_alloc_sg_env(buf_num);
if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
char devno[] = "DEVNO:";
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
int ret;
zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (ret)
return ret;
wait_for_completion(&completion);
if (ct_els->status)
return ct_els->status;
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
!(strstr(gspn_rsp->gspn.fp_name, devno)))
snprintf(fc_host_symbolic_name(adapter->scsi_host),
FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
gspn_rsp->gspn.fp_name, devno,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
strlcpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
}
static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct Scsi_Host *shost = adapter->scsi_host;
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
int ret, len;
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
FC_SYMBOLIC_NAME_SIZE);
rspn_req->rspn.fr_name_len = len;
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
}
/**
* zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
* @work: ns_up_work of the adapter where to update the symbolic port name
*
* Retrieve the current symbolic port name that may have been set by
* the hardware using the GSPN request and update the fc_host
* symbolic_name sysfs attribute. When running in NPIV mode (and hence
* the port name is unique for this system), update the symbolic port
* name to add Linux specific information and update the FC nameserver
* using the RSPN request.
*/
void zfcp_fc_sym_name_update(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
ns_up_work);
int ret;
struct zfcp_fc_req *fc_req;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return;
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out_free;
ret = zfcp_fc_gspn(adapter, fc_req);
if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
goto out_ds_put;
memset(fc_req, 0, sizeof(*fc_req));
zfcp_fc_rspn(adapter, fc_req);
out_ds_put:
zfcp_fc_wka_port_put(&adapter->gs->ds);
out_free:
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
job->job_done(job);
}
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
return &adapter->gs->as;
case FC_FST_MGMT:
return &adapter->gs->ms;
case FC_FST_TIME:
return &adapter->gs->ts;
break;
case FC_FST_DIR:
return &adapter->gs->ds;
break;
default:
return NULL;
}
}
static void zfcp_fc_ct_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
zfcp_fc_wka_port_put(wka_port);
zfcp_fc_ct_els_job_handler(data);
}
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
struct fc_rport *rport = job->rport;
struct zfcp_port *port;
u32 d_id;
if (rport) {
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -EINVAL;
d_id = port->d_id;
put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
struct zfcp_fsf_ct_els *ct = job->dd_data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
if (!wka_port)
return -EINVAL;
ret = zfcp_fc_wka_port_get(wka_port);
if (ret)
return ret;
ct->handler = zfcp_fc_ct_job_handler;
ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
if (ret)
zfcp_fc_wka_port_put(wka_port);
return ret;
}
int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
shost = job->rport ? rport_to_shost(job->rport) : job->shost;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
ct_els->req = job->request_payload.sg_list;
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
switch (job->request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
case FC_BSG_RPT_CT:
case FC_BSG_HST_CT:
return zfcp_fc_exec_ct_job(job, adapter);
default:
return -EINVAL;
}
}
int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
}
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_fc_wka_ports *wka_ports;
wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
adapter->gs = wka_ports;
zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
return 0;
}
void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
kfree(adapter->gs);
adapter->gs = NULL;
}

297
drivers/s390/scsi/zfcp_fc.h Normal file
View file

@ -0,0 +1,297 @@
/*
* zfcp device driver
*
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
* Copyright IBM Corp. 2009
*/
#ifndef ZFCP_FC_H
#define ZFCP_FC_H
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/fc/fc_ns.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include "zfcp_fsf.h"
#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
/ sizeof(struct fc_gpn_ft_resp))
#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
- sizeof(struct fc_ct_hdr))
#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
(ZFCP_FC_GPN_FT_ENT_PAGE + 1))
#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
/**
* struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
* @code: Event code
* @data: Event data
* @list: list_head for zfcp_fc_events list
*/
struct zfcp_fc_event {
enum fc_host_event_code code;
u32 data;
struct list_head list;
};
/**
* struct zfcp_fc_events - Infrastructure for posting FC events from irq context
* @list: List for queueing of events from irq context to workqueue
* @list_lock: Lock for event list
* @work: work_struct for forwarding events in workqueue
*/
struct zfcp_fc_events {
struct list_head list;
spinlock_t list_lock;
struct work_struct work;
};
/**
* struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN request
*/
struct zfcp_fc_gid_pn_req {
struct fc_ct_hdr ct_hdr;
struct fc_ns_gid_pn gid_pn;
} __packed;
/**
* struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN response
*/
struct zfcp_fc_gid_pn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gid_pn;
} __packed;
/**
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
* @ct_hdr: FC GS common transport header
* @gpn_ft: GPN_FT request
*/
struct zfcp_fc_gpn_ft_req {
struct fc_ct_hdr ct_hdr;
struct fc_ns_gid_ft gpn_ft;
} __packed;
/**
* struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
* @ct_hdr: FC GS common transport header
* @gspn: GSPN_ID request
*/
struct zfcp_fc_gspn_req {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gspn;
} __packed;
/**
* struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
* @ct_hdr: FC GS common transport header
* @gspn: GSPN_ID response
* @name: The name string of the GSPN_ID response
*/
struct zfcp_fc_gspn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gspn_resp gspn;
char name[FC_SYMBOLIC_NAME_SIZE];
} __packed;
/**
* struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
* @ct_hdr: FC GS common transport header
* @rspn: RSPN_ID request
* @name: The name string of the RSPN_ID request
*/
struct zfcp_fc_rspn_req {
struct fc_ct_hdr ct_hdr;
struct fc_ns_rspn rspn;
char name[FC_SYMBOLIC_NAME_SIZE];
} __packed;
/**
* struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
* @ct_els: data required for issuing fsf command
* @sg_req: scatterlist entry for request data
* @sg_rsp: scatterlist entry for response data
* @u: request specific data
*/
struct zfcp_fc_req {
struct zfcp_fsf_ct_els ct_els;
struct scatterlist sg_req;
struct scatterlist sg_rsp;
union {
struct {
struct fc_els_adisc req;
struct fc_els_adisc rsp;
} adisc;
struct {
struct zfcp_fc_gid_pn_req req;
struct zfcp_fc_gid_pn_rsp rsp;
} gid_pn;
struct {
struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
struct zfcp_fc_gpn_ft_req req;
} gpn_ft;
struct {
struct zfcp_fc_gspn_req req;
struct zfcp_fc_gspn_rsp rsp;
} gspn;
struct {
struct zfcp_fc_rspn_req req;
struct fc_ct_hdr rsp;
} rspn;
} u;
};
/**
* enum zfcp_fc_wka_status - FC WKA port status in zfcp
* @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
* @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
* @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
* @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
*/
enum zfcp_fc_wka_status {
ZFCP_FC_WKA_PORT_OFFLINE,
ZFCP_FC_WKA_PORT_CLOSING,
ZFCP_FC_WKA_PORT_OPENING,
ZFCP_FC_WKA_PORT_ONLINE,
};
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
* @completion_wq: Wait for completion of open/close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
* @handle: FSF handle for the open WKA port
* @mutex: Mutex used during opening/closing state changes
* @work: For delaying the closing of the WKA port
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
wait_queue_head_t completion_wq;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
u32 handle;
struct mutex mutex;
struct delayed_work work;
};
/**
* struct zfcp_fc_wka_ports - Data structures for FC generic services
* @ms: FC Management service
* @ts: FC time service
* @ds: FC directory service
* @as: FC alias service
*/
struct zfcp_fc_wka_ports {
struct zfcp_fc_wka_port ms;
struct zfcp_fc_wka_port ts;
struct zfcp_fc_wka_port ds;
struct zfcp_fc_wka_port as;
};
/**
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
* @tm: task management flags to setup task management command
*/
static inline
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
if (unlikely(tm_flags)) {
fcp->fc_tm_flags = tm_flags;
return;
}
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
fcp->fc_pri_ta |= FCP_PTA_ORDERED;
break;
case MSG_SIMPLE_TAG:
fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
break;
};
} else
fcp->fc_pri_ta = FCP_PTA_SIMPLE;
if (scsi->sc_data_direction == DMA_FROM_DEVICE)
fcp->fc_flags |= FCP_CFL_RDDATA;
if (scsi->sc_data_direction == DMA_TO_DEVICE)
fcp->fc_flags |= FCP_CFL_WRDATA;
memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
fcp->fc_dl = scsi_bufflen(scsi);
if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
}
/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
*/
static inline
void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
struct scsi_cmnd *scsi)
{
struct fcp_resp_rsp_info *rsp_info;
char *sense;
u32 sense_len, resid;
u8 rsp_flags;
set_msg_byte(scsi, COMMAND_COMPLETE);
scsi->result |= fcp_rsp->resp.fr_status;
rsp_flags = fcp_rsp->resp.fr_flags;
if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
if (rsp_info->rsp_code == FCP_TMF_CMPL)
set_host_byte(scsi, DID_OK);
else {
set_host_byte(scsi, DID_ERROR);
return;
}
}
if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
sense = (char *) &fcp_rsp[1];
if (rsp_flags & FCP_RSP_LEN_VAL)
sense += fcp_rsp->ext.fr_rsp_len;
sense_len = min(fcp_rsp->ext.fr_sns_len,
(u32) SCSI_SENSE_BUFFERSIZE);
memcpy(scsi->sense_buffer, sense, sense_len);
}
if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
resid = fcp_rsp->ext.fr_resid;
scsi_set_resid(scsi, resid);
if (scsi_bufflen(scsi) - resid < scsi->underflow &&
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
}
}
#endif

2390
drivers/s390/scsi/zfcp_fsf.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,449 @@
/*
* zfcp device driver
*
* Interface to the FSF support functions.
*
* Copyright IBM Corp. 2002, 2010
*/
#ifndef FSF_H
#define FSF_H
#include <linux/pfn.h>
#include <linux/scatterlist.h>
#include <scsi/libfc.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001
/* FSF commands */
#define FSF_QTCB_FCP_CMND 0x00000001
#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
#define FSF_QTCB_OPEN_LUN 0x00000006
#define FSF_QTCB_CLOSE_LUN 0x00000007
#define FSF_QTCB_CLOSE_PORT 0x00000008
#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
#define FSF_QTCB_SEND_ELS 0x0000000B
#define FSF_QTCB_SEND_GENERIC 0x0000000C
#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
#define FSF_QTCB_EXCHANGE_PORT_DATA 0x0000000E
#define FSF_QTCB_DOWNLOAD_CONTROL_FILE 0x00000012
#define FSF_QTCB_UPLOAD_CONTROL_FILE 0x00000013
/* FSF QTCB types */
#define FSF_IO_COMMAND 0x00000001
#define FSF_SUPPORT_COMMAND 0x00000002
#define FSF_CONFIG_COMMAND 0x00000003
#define FSF_PORT_COMMAND 0x00000004
/* FSF protocol states */
#define FSF_PROT_GOOD 0x00000001
#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
#define FSF_PROT_LINK_DOWN 0x00000400
#define FSF_PROT_REEST_QUEUE 0x00000800
#define FSF_PROT_ERROR_STATE 0x01000000
/* FSF states */
#define FSF_GOOD 0x00000000
#define FSF_PORT_ALREADY_OPEN 0x00000001
#define FSF_LUN_ALREADY_OPEN 0x00000002
#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
#define FSF_HANDLE_MISMATCH 0x00000005
#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
#define FSF_FCPLUN_NOT_VALID 0x00000009
#define FSF_LUN_SHARING_VIOLATION 0x00000012
#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
#define FSF_ELS_COMMAND_REJECTED 0x00000050
#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
#define FSF_PORT_BOXED 0x00000059
#define FSF_LUN_BOXED 0x0000005A
#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
#define FSF_PAYLOAD_SIZE_MISMATCH 0x00000060
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
#define FSF_SBAL_MISMATCH 0x00000063
#define FSF_INCONSISTENT_PROT_DATA 0x00000070
#define FSF_INVALID_PROT_PARM 0x00000071
#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
#define FSF_PROT_STATUS_QUAL_SIZE 16
#define FSF_STATUS_QUALIFIER_SIZE 16
/* FSF status qualifier, recommendations */
#define FSF_SQ_NO_RECOM 0x00
#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
#define FSF_SQ_COMMAND_ABORTED 0x06
#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
/* FSF status qualifier (most significant 4 bytes), local link down */
#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
#define FSF_PSQ_LINK_NO_FCP 0x00000010
#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
/* number of status read buffers that should be sent by ULP */
#define FSF_STATUS_READS_RECOM 16
/* status types in status read buffer */
#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
#define FSF_STATUS_READ_LINK_DOWN 0x00000005
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes for link down */
#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
/* status subtypes for unsolicited status notification lost */
#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
/* topologie that is detected by the adapter */
#define FSF_TOPO_P2P 0x00000001
#define FSF_TOPO_FABRIC 0x00000002
#define FSF_TOPO_AL 0x00000003
/* data direction for FCP commands */
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_CMND 0x00000004
#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
/* data protection control flags */
#define FSF_APP_TAG_CHECK_ENABLE 0x10
/* fc service class */
#define FSF_CLASS_3 0x00000003
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
/* channel features */
#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
struct fsf_queue_designator {
u8 cssid;
u8 chpid;
u8 hla;
u8 ua;
u32 res1;
} __attribute__ ((packed));
struct fsf_bit_error_payload {
u32 res1;
u32 link_failure_error_count;
u32 loss_of_sync_error_count;
u32 loss_of_signal_error_count;
u32 primitive_sequence_error_count;
u32 invalid_transmission_word_error_count;
u32 crc_error_count;
u32 primitive_sequence_event_timeout_count;
u32 elastic_buffer_overrun_error_count;
u32 fcal_arbitration_timeout_count;
u32 advertised_receive_b2b_credit;
u32 current_receive_b2b_credit;
u32 advertised_transmit_b2b_credit;
u32 current_transmit_b2b_credit;
} __attribute__ ((packed));
struct fsf_link_down_info {
u32 error_code;
u32 res1;
u8 res2[2];
u8 primary_status;
u8 ioerr_code;
u8 action_code;
u8 reason_code;
u8 explanation_code;
u8 vendor_specific_code;
} __attribute__ ((packed));
struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
u32 length;
u32 res1;
struct fsf_queue_designator queue_designator;
u8 res2;
u8 d_id[3];
u32 class;
u64 fcp_lun;
u8 res3[24];
union {
u8 data[FSF_STATUS_READ_PAYLOAD_SIZE];
u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
struct fsf_link_down_info link_down_info;
struct fsf_bit_error_payload bit_error;
} payload;
} __attribute__ ((packed));
struct fsf_qual_version_error {
u32 fsf_version;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_qual_sequence_error {
u32 exp_req_seq_no;
u32 res1[3];
} __attribute__ ((packed));
struct fsf_qual_latency_info {
u32 channel_lat;
u32 fabric_lat;
u8 res1[8];
} __attribute__ ((packed));
union fsf_prot_status_qual {
u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
struct fsf_qual_version_error version_error;
struct fsf_qual_sequence_error sequence_error;
struct fsf_link_down_info link_down_info;
struct fsf_qual_latency_info latency_info;
} __attribute__ ((packed));
struct fsf_qtcb_prefix {
u64 req_id;
u32 qtcb_version;
u32 ulp_info;
u32 qtcb_type;
u32 req_seq_no;
u32 prot_status;
union fsf_prot_status_qual prot_status_qual;
u8 res1[20];
} __attribute__ ((packed));
struct fsf_statistics_info {
u64 input_req;
u64 output_req;
u64 control_req;
u64 input_mb;
u64 output_mb;
u64 seconds_act;
} __attribute__ ((packed));
union fsf_status_qual {
u8 byte[FSF_STATUS_QUALIFIER_SIZE];
u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
struct fsf_queue_designator fsf_queue_designator;
struct fsf_link_down_info link_down_info;
} __attribute__ ((packed));
struct fsf_qtcb_header {
u64 req_handle;
u32 fsf_command;
u32 res1;
u32 port_handle;
u32 lun_handle;
u32 res2;
u32 fsf_status;
union fsf_status_qual fsf_status_qual;
u8 res3[28];
u16 log_start;
u16 log_length;
u8 res4[16];
} __attribute__ ((packed));
#define FSF_PLOGI_MIN_LEN 112
#define FSF_FCP_CMND_SIZE 288
#define FSF_FCP_RSP_SIZE 128
struct fsf_qtcb_bottom_io {
u32 data_direction;
u32 service_class;
u8 res1;
u8 data_prot_flags;
u16 app_tag_value;
u32 ref_tag_value;
u32 fcp_cmnd_length;
u32 data_block_length;
u32 prot_data_length;
u8 res2[4];
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
u8 res3[64];
} __attribute__ ((packed));
struct fsf_qtcb_bottom_support {
u32 operation_subtype;
u8 res1[13];
u8 d_id[3];
u32 option;
u64 fcp_lun;
u64 res2;
u64 req_handle;
u32 service_class;
u8 res3[3];
u8 timeout;
u32 lun_access_info;
u8 res4[180];
u32 els1_length;
u32 els2_length;
u32 req_buf_length;
u32 resp_buf_length;
u8 els[256];
} __attribute__ ((packed));
#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
struct fsf_qtcb_bottom_config {
u32 lic_version;
u32 feature_selection;
u32 high_qtcb_version;
u32 low_qtcb_version;
u32 max_qtcb_size;
u32 max_data_transfer_size;
u32 adapter_features;
u32 connection_features;
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
u16 status_read_buf_num;
u16 timer_interval;
u8 res2[9];
u8 s_id[3];
u8 nport_serv_param[128];
u8 res3[8];
u32 adapter_ports;
u32 hardware_version;
u8 serial_number[32];
u8 plogi_payload[112];
struct fsf_statistics_info stat_info;
u8 res4[112];
} __attribute__ ((packed));
struct fsf_qtcb_bottom_port {
u64 wwpn;
u32 fc_port_id;
u32 port_type;
u32 port_state;
u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
u8 active_fc4_types[32];
u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
u32 maximum_frame_size; /* fixed value of 2112 */
u64 seconds_since_last_reset;
u64 tx_frames;
u64 tx_words;
u64 rx_frames;
u64 rx_words;
u64 lip; /* 0 */
u64 nos; /* currently 0 */
u64 error_frames; /* currently 0 */
u64 dumped_frames; /* currently 0 */
u64 link_failure;
u64 loss_of_sync;
u64 loss_of_signal;
u64 psp_error_counts;
u64 invalid_tx_words;
u64 invalid_crcs;
u64 input_requests;
u64 output_requests;
u64 control_requests;
u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
u8 cp_util;
u8 cb_util;
u8 a_util;
u8 res2[253];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
struct fsf_qtcb_bottom_io io;
struct fsf_qtcb_bottom_support support;
struct fsf_qtcb_bottom_config config;
struct fsf_qtcb_bottom_port port;
};
struct fsf_qtcb {
struct fsf_qtcb_prefix prefix;
struct fsf_qtcb_header header;
union fsf_qtcb_bottom bottom;
u8 log[FSF_QTCB_LOG_SIZE];
} __attribute__ ((packed));
struct zfcp_blk_drv_data {
#define ZFCP_BLK_DRV_DATA_MAGIC 0x1
u32 magic;
#define ZFCP_BLK_LAT_VALID 0x1
#define ZFCP_BLK_REQ_ERROR 0x2
u16 flags;
u8 inb_usage;
u8 outb_usage;
u64 channel_lat;
u64 fabric_lat;
} __attribute__ ((packed));
/**
* struct zfcp_fsf_ct_els - zfcp data for ct or els request
* @req: scatter-gather list for request
* @resp: scatter-gather list for response
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
* @status: used to pass error status to calling function
*/
struct zfcp_fsf_ct_els {
struct scatterlist *req;
struct scatterlist *resp;
void (*handler)(void *);
void *handler_data;
struct zfcp_port *port;
int status;
};
#endif /* FSF_H */

View file

@ -0,0 +1,504 @@
/*
* zfcp device driver
*
* Setup and helper functions to access QDIO.
*
* Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
static bool enable_multibuffer = 1;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_shutdown(adapter, 0, id);
return;
}
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED, id);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
{
int i, sbal_idx;
for (i = first; i < first + cnt; i++) {
sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
}
}
/* this needs to be called prior to updating the queue fill level */
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
unsigned long long now, span;
int used;
now = get_tod_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
if (unlikely(qdio_err)) {
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
return;
}
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
spin_lock_irq(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock_irq(&qdio->stat_lock);
atomic_add(count, &qdio->req_q_free);
wake_up(&qdio->req_q_wq);
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
int sbal_no, sbal_idx;
if (unlikely(qdio_err)) {
if (zfcp_adapter_multi_buffer_active(adapter)) {
void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
struct qdio_buffer_element *sbale;
u64 req_id;
u8 scount;
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
req_id = (u64) sbale->addr;
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %
QDIO_MAX_BUFFERS_PER_Q;
pl[sbal_no] = qdio->res_q[sbal_idx];
}
zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
}
zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
/*
* go through all SBALs from input queue currently
* returned by QDIO layer
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
* put SBALs back to response queue
*/
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
if (q_req->sbal_last == q_req->sbal_limit)
return NULL;
/* set chaining flag in first SBALE of current SBAL */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
q_req->sbal_last++;
q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
/* keep this requests number of SBALs up-to-date */
q_req->sbal_number++;
BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
/* start at first SBALE of new SBAL */
q_req->sbale_curr = 0;
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->sflags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
sbale = zfcp_qdio_sbale_next(qdio, q_req);
if (!sbale) {
atomic_inc(&qdio->req_q_full);
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
q_req->sbal_number);
return -EINVAL;
}
sbale->addr = sg_virt(sg);
sbale->length = sg->length;
}
return 0;
}
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
return 0;
}
/**
* zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
* @qdio: pointer to struct zfcp_qdio
*
* The req_q_lock must be held by the caller of this function, and
* this function may only be called from process context; it will
* sleep when waiting for a free sbal.
*
* Returns: 0 on success, -EIO if there is no free sbal after waiting.
*/
int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
if (ret > 0)
return 0;
if (!ret) {
atomic_inc(&qdio->req_q_full);
/* assume hanging outbound queue, try queue recovery */
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
return -EIO;
}
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
int retval;
u8 sbal_number = q_req->sbal_number;
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
q_req->sbal_first, sbal_number);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
sbal_number);
return retval;
}
/* account for transferred buffers */
atomic_sub(sbal_number, &qdio->req_q_free);
qdio->req_q_idx += sbal_number;
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
return 0;
}
static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
struct zfcp_qdio *qdio)
{
memset(id, 0, sizeof(*id));
id->cdev = qdio->adapter->ccw_device;
id->q_format = QDIO_ZFCP_QFMT;
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
if (enable_multibuffer)
id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
id->no_input_qs = 1;
id->no_output_qs = 1;
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
id->input_sbal_addr_array = (void **) (qdio->res_q);
id->output_sbal_addr_array = (void **) (qdio->req_q);
id->scan_threshold =
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
}
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @adapter: pointer to struct zfcp_adapter
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
struct qdio_initialize init_data;
int ret;
ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
if (ret)
return -ENOMEM;
ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
if (ret)
goto free_req_q;
zfcp_qdio_setup_init_data(&init_data, qdio);
init_waitqueue_head(&qdio->req_q_wq);
ret = qdio_allocate(&init_data);
if (ret)
goto free_res_q;
return 0;
free_res_q:
qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
free_req_q:
qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
return ret;
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
struct zfcp_adapter *adapter = qdio->adapter;
int idx, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&qdio->req_q_free);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
}
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, 0);
}
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, otherwise -EIO
*/
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer_element *sbale;
struct qdio_initialize init_data;
struct zfcp_adapter *adapter = qdio->adapter;
struct ccw_device *cdev = adapter->ccw_device;
struct qdio_ssqd_desc ssqd;
int cc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
zfcp_qdio_setup_init_data(&init_data, qdio);
if (qdio_establish(&init_data))
goto failed_establish;
if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
}
qdio->max_sbale_per_req =
ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
- 2;
if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
sbale->sflags = 0;
sbale->addr = NULL;
}
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) {
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
}
return 0;
failed_qdio:
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&cdev->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
if (!qdio)
return;
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
kfree(qdio);
}
int zfcp_qdio_setup(struct zfcp_adapter *adapter)
{
struct zfcp_qdio *qdio;
qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
if (!qdio)
return -ENOMEM;
qdio->adapter = adapter;
if (zfcp_qdio_allocate(qdio)) {
kfree(qdio);
return -ENOMEM;
}
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
adapter->qdio = qdio;
return 0;
}
/**
* zfcp_qdio_siosl - Trigger logging in FCP channel
* @adapter: The zfcp_adapter where to trigger logging
*
* Call the cio siosl function to trigger hardware logging. This
* wrapper function sets a flag to ensure hardware logging is only
* triggered once before going through qdio shutdown.
*
* The triggers are always run from qdio tasklet context, so no
* additional synchronization is necessary.
*/
void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
{
int rc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
return;
rc = ccw_device_siosl(adapter->ccw_device);
if (!rc)
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status);
}

View file

@ -0,0 +1,271 @@
/*
* zfcp device driver
*
* Header file for zfcp qdio interface
*
* Copyright IBM Corp. 2010
*/
#ifndef ZFCP_QDIO_H
#define ZFCP_QDIO_H
#include <asm/qdio.h>
#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
/* Max SBALS for chaining */
#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
/**
* struct zfcp_qdio - basic qdio data structure
* @res_q: response queue
* @req_q: request queue
* @req_q_idx: index of next free buffer
* @req_q_free: number of free buffers in queue
* @stat_lock: lock to protect req_q_util and req_q_time
* @req_q_lock: lock to serialize access to request queue
* @req_q_time: time of last fill level change
* @req_q_util: used for accounting
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this qdio structure
*/
struct zfcp_qdio {
struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
u8 req_q_idx;
atomic_t req_q_free;
spinlock_t stat_lock;
spinlock_t req_q_lock;
unsigned long long req_q_time;
u64 req_q_util;
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct zfcp_adapter *adapter;
u16 max_sbale_per_sbal;
u16 max_sbale_per_req;
};
/**
* struct zfcp_qdio_req - qdio queue related values for a request
* @sbtype: sbal type flags for sbale 0
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
* @sbal_limit: last possible sbal for this request
* @sbale_curr: current sbale at creation of this request
* @sbal_response: sbal used in interrupt
* @qdio_outb_usage: usage of outbound queue
*/
struct zfcp_qdio_req {
u8 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
u8 sbal_limit;
u8 sbale_curr;
u8 sbal_response;
u16 qdio_outb_usage;
};
/**
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
* @q_rec: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
return &qdio->req_q[q_req->sbal_last]->element[0];
}
/**
* zfcp_qdio_sbale_curr - return current sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
* @fsf_req: pointer to struct zfcp_fsf_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
}
/**
* zfcp_qdio_req_init - initialize qdio request
* @qdio: request queue where to start putting the request
* @q_req: the qdio request to start
* @req_id: The request id
* @sbtype: type flags to set for all sbals
* @data: First data block
* @len: Length of first data block
*
* This is the start of putting the request into the queue, the last
* step is passing the request to zfcp_qdio_send. The request queue
* lock must be held during the whole process from init to send.
*/
static inline
void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long req_id, u8 sbtype, void *data, u32 len)
{
struct qdio_buffer_element *sbale;
int count = min(atomic_read(&qdio->req_q_free),
ZFCP_QDIO_MAX_SBALS_PER_REQ);
q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
q_req->sbal_number = 1;
q_req->sbtype = sbtype;
q_req->sbale_curr = 1;
q_req->sbal_limit = (q_req->sbal_first + count - 1)
% QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->addr = (void *) req_id;
sbale->eflags = 0;
sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
sbale++;
sbale->addr = data;
sbale->length = len;
}
/**
* zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
*
* This is only required for single sbal requests, calling it when
* wrapping around to the next sbal is a bug.
*/
static inline
void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
void *data, u32 len)
{
struct qdio_buffer_element *sbale;
BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->addr = data;
sbale->length = len;
}
/**
* zfcp_qdio_set_sbale_last - set last entry flag in current sbale
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
*/
static inline
void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
}
/**
* zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
* @sg: The scatterlist where to check the data size
*
* Returns: 1 when one sbale is enough for the data in the scatterlist,
* 0 if not.
*/
static inline
int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
{
return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
}
/**
* zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
* @q_req: The current zfcp_qdio_req
*/
static inline
void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
struct zfcp_qdio_req *q_req)
{
q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
}
/**
* zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
* @qdio: pointer to struct zfcp_qdio
* @q_req: The current zfcp_qdio_req
* @max_sbals: maximum number of SBALs allowed
*/
static inline
void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
struct zfcp_qdio_req *q_req, int max_sbals)
{
int count = min(atomic_read(&qdio->req_q_free), max_sbals);
q_req->sbal_limit = (q_req->sbal_first + count - 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
/**
* zfcp_qdio_set_data_div - set data division count
* @qdio: pointer to struct zfcp_qdio
* @q_req: The current zfcp_qdio_req
* @count: The data division count
*/
static inline
void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
struct zfcp_qdio_req *q_req, u32 count)
{
struct qdio_buffer_element *sbale;
sbale = qdio->req_q[q_req->sbal_first]->element;
sbale->length = count;
}
/**
* zfcp_qdio_sbale_count - count sbale used
* @sg: pointer to struct scatterlist
*/
static inline
unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
{
unsigned int count = 0;
for (; sg; sg = sg_next(sg))
count++;
return count;
}
/**
* zfcp_qdio_real_bytes - count bytes used
* @sg: pointer to struct scatterlist
*/
static inline
unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
{
unsigned int real_bytes = 0;
for (; sg; sg = sg_next(sg))
real_bytes += sg->length;
return real_bytes;
}
/**
* zfcp_qdio_set_scount - set SBAL count value
* @qdio: pointer to struct zfcp_qdio
* @q_req: The current zfcp_qdio_req
*/
static inline
void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
sbale = qdio->req_q[q_req->sbal_first]->element;
sbale->scount = q_req->sbal_number - 1;
}
#endif /* ZFCP_QDIO_H */

View file

@ -0,0 +1,183 @@
/*
* zfcp device driver
*
* Data structure and helper functions for tracking pending FSF
* requests.
*
* Copyright IBM Corp. 2009
*/
#ifndef ZFCP_REQLIST_H
#define ZFCP_REQLIST_H
/* number of hash buckets */
#define ZFCP_REQ_LIST_BUCKETS 128
/**
* struct zfcp_reqlist - Container for request list (reqlist)
* @lock: Spinlock for protecting the hash list
* @list: Array of hashbuckets, each is a list of requests in this bucket
*/
struct zfcp_reqlist {
spinlock_t lock;
struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
};
static inline int zfcp_reqlist_hash(unsigned long req_id)
{
return req_id % ZFCP_REQ_LIST_BUCKETS;
}
/**
* zfcp_reqlist_alloc - Allocate and initialize reqlist
*
* Returns pointer to allocated reqlist on success, or NULL on
* allocation failure.
*/
static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
{
unsigned int i;
struct zfcp_reqlist *rl;
rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
if (!rl)
return NULL;
spin_lock_init(&rl->lock);
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
INIT_LIST_HEAD(&rl->buckets[i]);
return rl;
}
/**
* zfcp_reqlist_isempty - Check whether the request list empty
* @rl: pointer to reqlist
*
* Returns: 1 if list is empty, 0 if not
*/
static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
{
unsigned int i;
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
if (!list_empty(&rl->buckets[i]))
return 0;
return 1;
}
/**
* zfcp_reqlist_free - Free allocated memory for reqlist
* @rl: The reqlist where to free memory
*/
static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
{
/* sanity check */
BUG_ON(!zfcp_reqlist_isempty(rl));
kfree(rl);
}
static inline struct zfcp_fsf_req *
_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
{
struct zfcp_fsf_req *req;
unsigned int i;
i = zfcp_reqlist_hash(req_id);
list_for_each_entry(req, &rl->buckets[i], list)
if (req->req_id == req_id)
return req;
return NULL;
}
/**
* zfcp_reqlist_find - Lookup FSF request by its request id
* @rl: The reqlist where to lookup the FSF request
* @req_id: The request id to look for
*
* Returns a pointer to the FSF request with the specified request id
* or NULL if there is no known FSF request with this id.
*/
static inline struct zfcp_fsf_req *
zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
spin_lock_irqsave(&rl->lock, flags);
req = _zfcp_reqlist_find(rl, req_id);
spin_unlock_irqrestore(&rl->lock, flags);
return req;
}
/**
* zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
* @rl: reqlist where to search and remove entry
* @req_id: The request id of the request to look for
*
* This functions tries to find the FSF request with the specified
* id and then removes it from the reqlist. The reqlist lock is held
* during both steps of the operation.
*
* Returns: Pointer to the FSF request if the request has been found,
* NULL if it has not been found.
*/
static inline struct zfcp_fsf_req *
zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
spin_lock_irqsave(&rl->lock, flags);
req = _zfcp_reqlist_find(rl, req_id);
if (req)
list_del(&req->list);
spin_unlock_irqrestore(&rl->lock, flags);
return req;
}
/**
* zfcp_reqlist_add - Add entry to reqlist
* @rl: reqlist where to add the entry
* @req: The entry to add
*
* The request id always increases. As an optimization new requests
* are added here with list_add_tail at the end of the bucket lists
* while old requests are looked up starting at the beginning of the
* lists.
*/
static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
struct zfcp_fsf_req *req)
{
unsigned int i;
unsigned long flags;
i = zfcp_reqlist_hash(req->req_id);
spin_lock_irqsave(&rl->lock, flags);
list_add_tail(&req->list, &rl->buckets[i]);
spin_unlock_irqrestore(&rl->lock, flags);
}
/**
* zfcp_reqlist_move - Move all entries from reqlist to simple list
* @rl: The zfcp_reqlist where to remove all entries
* @list: The list where to move all entries
*/
static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
struct list_head *list)
{
unsigned int i;
unsigned long flags;
spin_lock_irqsave(&rl->lock, flags);
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
list_splice_init(&rl->buckets[i], list);
spin_unlock_irqrestore(&rl->lock, flags);
}
#endif /* ZFCP_REQLIST_H */

View file

@ -0,0 +1,733 @@
/*
* zfcp device driver
*
* Interface to Linux SCSI midlayer.
*
* Copyright IBM Corp. 2002, 2013
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_eh.h>
#include <linux/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
static bool enable_dif;
module_param_named(dif, enable_dif, bool, 0400);
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
static bool allow_lun_scan = 1;
module_param(allow_lun_scan, bool, 0600);
MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
default:
return -EOPNOTSUPP;
}
return sdev->queue_depth;
}
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
/* if previous slave_alloc returned early, there is nothing to do */
if (!zfcp_sdev->port)
return;
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
if (sdp->tagged_supported)
scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
else
scsi_adjust_queue_depth(sdp, 0, 1);
return 0;
}
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
}
static
int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
int status, scsi_result, ret;
/* reset the status for this request */
scpnt->result = 0;
scpnt->host_scribble = NULL;
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
status = atomic_read(&zfcp_sdev->status);
if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
!(atomic_read(&zfcp_sdev->port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED)) {
/* only LUN access denied, but port is good
* not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
return 0;
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
/* This could be either
* open LUN pending: this is temporary, will result in
* open LUN or ERP_FAILED, so retry command
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
return 0;
}
ret = zfcp_fsf_fcp_cmnd(scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
return SCSI_MLQUEUE_HOST_BUSY;
return ret;
}
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port;
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
if (!unit && !(allow_lun_scan && npiv)) {
put_device(&port->dev);
return -ENXIO;
}
zfcp_sdev->port = port;
zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
spin_lock_init(&zfcp_sdev->latencies.lock);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
zfcp_erp_wait(port->adapter);
return 0;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
{
struct Scsi_Host *scsi_host = scpnt->device->host;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
while (retry--) {
abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
if (abrt_req)
break;
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
return SUCCESS;
}
}
if (!abrt_req) {
zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
return FAILED;
}
wait_for_completion(&abrt_req->completion);
if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
dbf_tag = "abrt_ok";
else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
dbf_tag = "abrt_nn";
else {
dbf_tag = "abrt_fa";
retval = FAILED;
}
zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
zfcp_fsf_req_free(abrt_req);
return retval;
}
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
if (fsf_req)
break;
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
return SUCCESS;
}
}
if (!fsf_req)
return FAILED;
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
} else
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
zfcp_fsf_req_free(fsf_req);
return retval;
}
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}
static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
return ret;
return SUCCESS;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
static struct scsi_host_template zfcp_scsi_host_template = {
.module = THIS_MODULE,
.name = "zfcp",
.queuecommand = zfcp_scsi_queuecommand,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.change_queue_depth = zfcp_scsi_change_queue_depth,
.proc_name = "zfcp",
.can_queue = 4096,
.this_id = -1,
.sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
/* GCD, adjusted later */
.max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.cmd_per_lun = 1,
.use_clustering = 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
};
/**
* zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
return 0;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
"Registering the FCP device with the "
"SCSI stack failed\n");
return -EIO;
}
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
adapter->scsi_host->max_lun = 0xFFFFFFFF;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
return -EIO;
}
return 0;
}
/**
* zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
* @adapter: The zfcp adapter to unregister.
*/
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
shost = adapter->scsi_host;
if (!shost)
return;
read_lock_irq(&adapter->port_list_lock);
list_for_each_entry(port, &adapter->port_list, list)
port->rport = NULL;
read_unlock_irq(&adapter->port_list_lock);
fc_remove_host(shost);
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
}
static struct fc_host_statistics*
zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
{
struct fc_host_statistics *fc_stats;
if (!adapter->fc_stats) {
fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
if (!fc_stats)
return NULL;
adapter->fc_stats = fc_stats; /* freed in adapter_release */
}
memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
return adapter->fc_stats;
}
static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data,
struct fsf_qtcb_bottom_port *old)
{
fc_stats->seconds_since_last_reset =
data->seconds_since_last_reset - old->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames - old->tx_frames;
fc_stats->tx_words = data->tx_words - old->tx_words;
fc_stats->rx_frames = data->rx_frames - old->rx_frames;
fc_stats->rx_words = data->rx_words - old->rx_words;
fc_stats->lip_count = data->lip - old->lip;
fc_stats->nos_count = data->nos - old->nos;
fc_stats->error_frames = data->error_frames - old->error_frames;
fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
fc_stats->link_failure_count = data->link_failure - old->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
fc_stats->loss_of_signal_count =
data->loss_of_signal - old->loss_of_signal;
fc_stats->prim_seq_protocol_err_count =
data->psp_error_counts - old->psp_error_counts;
fc_stats->invalid_tx_word_count =
data->invalid_tx_words - old->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
fc_stats->fcp_input_requests =
data->input_requests - old->input_requests;
fc_stats->fcp_output_requests =
data->output_requests - old->output_requests;
fc_stats->fcp_control_requests =
data->control_requests - old->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}
static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
struct fsf_qtcb_bottom_port *data)
{
fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
fc_stats->tx_frames = data->tx_frames;
fc_stats->tx_words = data->tx_words;
fc_stats->rx_frames = data->rx_frames;
fc_stats->rx_words = data->rx_words;
fc_stats->lip_count = data->lip;
fc_stats->nos_count = data->nos;
fc_stats->error_frames = data->error_frames;
fc_stats->dumped_frames = data->dumped_frames;
fc_stats->link_failure_count = data->link_failure;
fc_stats->loss_of_sync_count = data->loss_of_sync;
fc_stats->loss_of_signal_count = data->loss_of_signal;
fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
fc_stats->invalid_tx_word_count = data->invalid_tx_words;
fc_stats->invalid_crc_count = data->invalid_crcs;
fc_stats->fcp_input_requests = data->input_requests;
fc_stats->fcp_output_requests = data->output_requests;
fc_stats->fcp_control_requests = data->control_requests;
fc_stats->fcp_input_megabytes = data->input_mb;
fc_stats->fcp_output_megabytes = data->output_mb;
}
static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
{
struct zfcp_adapter *adapter;
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)host->hostdata[0];
fc_stats = zfcp_init_fc_host_stats(adapter);
if (!fc_stats)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) {
kfree(data);
return NULL;
}
if (adapter->stats_reset &&
((jiffies/HZ - adapter->stats_reset) <
data->seconds_since_last_reset))
zfcp_adjust_fc_host_stats(fc_stats, data,
adapter->stats_reset_data);
else
zfcp_set_fc_host_stats(fc_stats, data);
kfree(data);
return fc_stats;
}
static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter;
struct fsf_qtcb_bottom_port *data;
int ret;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
kfree(adapter->stats_reset_data);
adapter->stats_reset_data = data; /* finally freed in
adapter_release */
}
}
static void zfcp_get_host_port_state(struct Scsi_Host *shost)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
int status = atomic_read(&adapter->status);
if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
!(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
else
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
}
static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
{
rport->dev_loss_tmo = timeout;
}
/**
* zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
* @rport: The FC rport where to teminate I/O
*
* Abort all pending SCSI commands for a port by closing the
* port. Using a reopen avoids a conflict with a shutdown
* overwriting a reopen. The "forced" ensures that a disappeared port
* is not opened again as valid due to the cached plogi data in
* non-NPIV mode.
*/
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port;
struct Scsi_Host *shost = rport_to_shost(rport);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
}
}
static void zfcp_scsi_rport_register(struct zfcp_port *port)
{
struct fc_rport_identifiers ids;
struct fc_rport *rport;
if (port->rport)
return;
ids.node_name = port->wwnn;
ids.port_name = port->wwpn;
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
"Registering port 0x%016Lx failed\n",
(unsigned long long)port->wwpn);
return;
}
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
port->starget_id = rport->scsi_target_id;
zfcp_unit_queue_scsi_scan(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
{
struct fc_rport *rport = port->rport;
if (rport) {
fc_remote_port_delete(rport);
port->rport = NULL;
}
}
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
{
unsigned long flags;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
zfcp_scsi_schedule_rport_block(port);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
void zfcp_scsi_rport_work(struct work_struct *work)
{
struct zfcp_port *port = container_of(work, struct zfcp_port,
rport_work);
while (port->rport_task) {
if (port->rport_task == RPORT_ADD) {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_register(port);
} else {
port->rport_task = RPORT_NONE;
zfcp_scsi_rport_block(port);
}
}
put_device(&port->dev);
}
/**
* zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
* @adapter: The adapter where to configure DIF/DIX for the SCSI host
*/
void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
{
unsigned int mask = 0;
unsigned int data_div;
struct Scsi_Host *shost = adapter->scsi_host;
data_div = atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
if (enable_dif &&
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
mask |= SHOST_DIF_TYPE1_PROTECTION;
if (enable_dif && data_div &&
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
mask |= SHOST_DIX_TYPE1_PROTECTION;
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
shost->max_sectors = shost->sg_tablesize * 8;
}
scsi_host_set_prot(shost, mask);
}
/**
* zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
* @scmd: The SCSI command to report the error for
* @ascq: The ASCQ to put in the sense buffer
*
* See the error handling in sd_done for the sense codes used here.
* Set DID_SOFT_ERROR to retry the request, if possible.
*/
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
scsi_build_sense_buffer(1, scmd->sense_buffer,
ILLEGAL_REQUEST, 0x10, ascq);
set_driver_byte(scmd, DRIVER_SENSE);
scmd->result |= SAM_STAT_CHECK_CONDITION;
set_host_byte(scmd, DID_SOFT_ERROR);
}
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
.show_starget_node_name = 1,
.show_rport_supported_classes = 1,
.show_rport_maxframe_size = 1,
.show_rport_dev_loss_tmo = 1,
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_permanent_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.get_fc_host_stats = zfcp_get_fc_host_stats,
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.bsg_request = zfcp_fc_exec_bsg_job,
.bsg_timeout = zfcp_fc_timeout_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
.show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};

View file

@ -0,0 +1,577 @@
/*
* zfcp device driver
*
* sysfs attributes.
*
* Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
_show, _store)
#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct ccw_device *cdev = to_ccwdev(dev); \
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
int i; \
\
if (!adapter) \
return -ENODEV; \
\
i = sprintf(buf, _format, _value); \
zfcp_ccw_adapter_put(adapter); \
return i; \
} \
static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
(unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
atomic_read(&port->status));
ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
(atomic_read(&port->status) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
zfcp_erp_wait(port->adapter);
return count;
}
static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
struct scsi_device *sdev;
unsigned int status, failed = 1;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
status = atomic_read(&sdev_to_zfcp(sdev)->status);
failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
scsi_device_put(sdev);
}
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
unsigned long val;
struct scsi_device *sdev;
if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai2");
zfcp_erp_wait(unit->port->adapter);
} else
zfcp_unit_scsi_scan(unit);
return count;
}
static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
int i;
if (!adapter)
return -ENODEV;
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
i = sprintf(buf, "1\n");
else
i = sprintf(buf, "0\n");
zfcp_ccw_adapter_put(adapter);
return i;
}
static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
unsigned long val;
int retval = 0;
if (!adapter)
return -ENODEV;
if (kstrtoul(buf, 0, &val) || val != 0) {
retval = -EINVAL;
goto out;
}
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"syafai2");
zfcp_erp_wait(adapter);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_adapter_failed_show,
zfcp_sysfs_adapter_failed_store);
static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return -ENODEV;
/* sync the user-space- with the kernel-invocation of scan_work */
queue_work(adapter->work_queue, &adapter->scan_work);
flush_work(&adapter->scan_work);
zfcp_ccw_adapter_put(adapter);
return (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
struct zfcp_port *port;
u64 wwpn;
int retval = -EINVAL;
if (!adapter)
return -ENODEV;
if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out;
else
retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) > 0) {
retval = -EBUSY;
mutex_unlock(&zfcp_sysfs_port_units_mutex);
goto out;
}
/* port is about to be removed, so no more unit_add */
atomic_set(&port->units, -1);
mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1");
device_unregister(&port->dev);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
&dev_attr_adapter_port_remove.attr,
&dev_attr_adapter_port_rescan.attr,
&dev_attr_adapter_peer_wwnn.attr,
&dev_attr_adapter_peer_wwpn.attr,
&dev_attr_adapter_peer_d_id.attr,
&dev_attr_adapter_card_version.attr,
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
NULL
};
struct attribute_group zfcp_sysfs_adapter_attrs = {
.attrs = zfcp_adapter_attrs,
};
static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
int retval;
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_remove(port, fcp_lun))
return -EINVAL;
return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
static struct attribute_group zfcp_port_attr_group = {
.attrs = zfcp_port_attrs,
};
const struct attribute_group *zfcp_port_attr_groups[] = {
&zfcp_port_attr_group,
NULL,
};
static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_failed.attr,
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
.attrs = zfcp_unit_attrs,
};
const struct attribute_group *zfcp_unit_attr_groups[] = {
&zfcp_unit_attr_group,
NULL,
};
#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
fmin = lat->_name.fabric.min * adapter->timer_ticks; \
fmax = lat->_name.fabric.max * adapter->timer_ticks; \
csum = lat->_name.channel.sum * adapter->timer_ticks; \
cmin = lat->_name.channel.min * adapter->timer_ticks; \
cmax = lat->_name.channel.max * adapter->timer_ticks; \
cc = lat->_name.counter; \
spin_unlock_bh(&lat->lock); \
\
do_div(fsum, 1000); \
do_div(fmin, 1000); \
do_div(fmax, 1000); \
do_div(csum, 1000); \
do_div(cmin, 1000); \
do_div(cmax, 1000); \
\
return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
fmin, fmax, fsum, cmin, cmax, csum, cc); \
} \
static ssize_t \
zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
lat->_name.fabric.sum = 0; \
lat->_name.fabric.min = 0xFFFFFFFF; \
lat->_name.fabric.max = 0; \
lat->_name.channel.sum = 0; \
lat->_name.channel.min = 0xFFFFFFFF; \
lat->_name.channel.max = 0; \
lat->_name.counter = 0; \
spin_unlock_irqrestore(&lat->lock, flags); \
\
return (ssize_t) count; \
} \
static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
zfcp_sysfs_unit_##_name##_latency_show, \
zfcp_sysfs_unit_##_name##_latency_store);
ZFCP_DEFINE_LATENCY_ATTR(read);
ZFCP_DEFINE_LATENCY_ATTR(write);
ZFCP_DEFINE_LATENCY_ATTR(cmd);
#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
dev_name(&port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
&dev_attr_hba_id,
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
NULL
};
static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_port *qtcb_port;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
return retval;
}
static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
static int zfcp_sysfs_adapter_ex_config(struct device *dev,
struct fsf_statistics_info *stat_inf)
{
struct Scsi_Host *scsi_host = dev_to_shost(dev);
struct fsf_qtcb_bottom_config *qtcb_config;
struct zfcp_adapter *adapter;
int retval;
adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
return -EOPNOTSUPP;
qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
GFP_KERNEL);
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
return retval;
}
#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct fsf_statistics_info stat_info; \
int retval; \
\
retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
if (retval) \
return retval; \
\
return sprintf(buf, _format, ## _arg); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
(unsigned long long) stat_info.input_req,
(unsigned long long) stat_info.output_req,
(unsigned long long) stat_info.control_req);
ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
(unsigned long long) stat_info.input_mb,
(unsigned long long) stat_info.output_mb);
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_qdio *qdio =
((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
u64 util;
spin_lock_bh(&qdio->stat_lock);
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
(unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
&dev_attr_queue_full,
NULL
};

View file

@ -0,0 +1,255 @@
/*
* zfcp device driver
*
* Tracking of manually configured LUNs and helper functions to
* register the LUNs with the SCSI midlayer.
*
* Copyright IBM Corp. 2010
*/
#include "zfcp_def.h"
#include "zfcp_ext.h"
/**
* zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
* @unit: The zfcp LUN/unit to register
*
* When the SCSI midlayer is not allowed to automatically scan and
* attach SCSI devices, zfcp has to register the single devices with
* the SCSI midlayer.
*/
void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
{
struct fc_rport *rport = unit->port->rport;
u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
}
static void zfcp_unit_scsi_scan_work(struct work_struct *work)
{
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
scsi_work);
zfcp_unit_scsi_scan(unit);
put_device(&unit->dev);
}
/**
* zfcp_unit_queue_scsi_scan - Register configured units on port
* @port: The zfcp_port where to register units
*
* After opening a port, all units configured on this port have to be
* registered with the SCSI midlayer. This function should be called
* after calling fc_remote_port_add, so that the fc_rport is already
* ONLINE and the call to scsi_scan_target runs the same way as the
* call in the FC transport class.
*/
void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
list_for_each_entry(unit, &port->unit_list, list) {
get_device(&unit->dev);
if (scsi_queue_work(port->adapter->scsi_host,
&unit->scsi_work) <= 0)
put_device(&unit->dev);
}
read_unlock_irq(&port->unit_list_lock);
}
static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
get_device(&unit->dev);
return unit;
}
return NULL;
}
/**
* zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
* @port: zfcp_port where to look for the unit
* @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
*
* If zfcp_unit is found, a reference is acquired that has to be
* released later.
*
* Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
* with the specified FCP LUN.
*/
struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
read_unlock_irq(&port->unit_list_lock);
return unit;
}
/**
* zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
* @dev: pointer to device in zfcp_unit
*/
static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
atomic_dec(&unit->port->units);
kfree(unit);
}
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
*/
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) == -1) {
/* port is already gone */
retval = -ENODEV;
goto out;
}
unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
retval = -EEXIST;
goto out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit) {
retval = -ENOMEM;
goto out;
}
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
unit->dev.groups = zfcp_unit_attr_groups;
INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
retval = -ENOMEM;
goto out;
}
if (device_register(&unit->dev)) {
put_device(&unit->dev);
retval = -ENOMEM;
goto out;
}
atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
zfcp_unit_scsi_scan(unit);
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
return retval;
}
/**
* zfcp_unit_sdev - Return SCSI device for zfcp_unit
* @unit: The zfcp_unit where to get the SCSI device for
*
* Returns: scsi_device pointer on success, NULL if there is no SCSI
* device for this zfcp_unit
*
* On success, the caller also holds a reference to the SCSI device
* that must be released with scsi_device_put.
*/
struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
port = unit->port;
shost = port->adapter->scsi_host;
return scsi_device_lookup(shost, 0, port->starget_id, lun);
}
/**
* zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
* @unit: The unit to lookup the SCSI device for
*
* Returns the zfcp LUN status field of the SCSI device if the SCSI device
* for the zfcp_unit exists, 0 otherwise.
*/
unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
{
unsigned int status = 0;
struct scsi_device *sdev;
struct zfcp_scsi_dev *zfcp_sdev;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
scsi_device_put(sdev);
}
return status;
}
/**
* zfcp_unit_remove - Remove entry from list of configured units
* @port: The port where to remove the unit from the configuration
* @fcp_lun: The 64 bit LUN of the unit to remove
*
* Returns: -EINVAL if a unit with the specified LUN does not exist,
* 0 on success.
*/
int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
struct scsi_device *sdev;
write_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
if (unit)
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
if (!unit)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
put_device(&unit->dev);
device_unregister(&unit->dev);
return 0;
}