mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-09 01:28:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
90
drivers/s390/block/Kconfig
Normal file
90
drivers/s390/block/Kconfig
Normal file
|
@ -0,0 +1,90 @@
|
|||
comment "S/390 block device drivers"
|
||||
depends on S390 && BLOCK
|
||||
|
||||
config BLK_DEV_XPRAM
|
||||
def_tristate m
|
||||
prompt "XPRAM disk support"
|
||||
depends on S390 && BLOCK
|
||||
help
|
||||
Select this option if you want to use your expanded storage on S/390
|
||||
or zSeries as a disk. This is useful as a _fast_ swap device if you
|
||||
want to access more than 2G of memory when running in 31 bit mode.
|
||||
This option is also available as a module which will be called
|
||||
xpram. If unsure, say "N".
|
||||
|
||||
config DCSSBLK
|
||||
def_tristate m
|
||||
prompt "DCSSBLK support"
|
||||
depends on S390 && BLOCK
|
||||
help
|
||||
Support for dcss block device
|
||||
|
||||
config DASD
|
||||
def_tristate y
|
||||
prompt "Support for DASD devices"
|
||||
depends on CCW && BLOCK
|
||||
select IOSCHED_DEADLINE
|
||||
help
|
||||
Enable this option if you want to access DASDs directly utilizing
|
||||
S/390s channel subsystem commands. This is necessary for running
|
||||
natively on a single image or an LPAR.
|
||||
|
||||
config DASD_PROFILE
|
||||
def_bool y
|
||||
prompt "Profiling support for dasd devices"
|
||||
depends on DASD
|
||||
help
|
||||
Enable this option if you want to see profiling information
|
||||
in /proc/dasd/statistics.
|
||||
|
||||
config DASD_ECKD
|
||||
def_tristate y
|
||||
prompt "Support for ECKD Disks"
|
||||
depends on DASD
|
||||
help
|
||||
ECKD devices are the most commonly used devices. You should enable
|
||||
this option unless you are very sure to have no ECKD device.
|
||||
|
||||
config DASD_FBA
|
||||
def_tristate y
|
||||
prompt "Support for FBA Disks"
|
||||
depends on DASD
|
||||
help
|
||||
Select this option to be able to access FBA devices. It is safe to
|
||||
say "Y".
|
||||
|
||||
config DASD_DIAG
|
||||
def_tristate y
|
||||
prompt "Support for DIAG access to Disks"
|
||||
depends on DASD
|
||||
help
|
||||
Select this option if you want to use Diagnose250 command to access
|
||||
Disks under VM. If you are not running under VM or unsure what it is,
|
||||
say "N".
|
||||
|
||||
config DASD_EER
|
||||
def_bool y
|
||||
prompt "Extended error reporting (EER)"
|
||||
depends on DASD
|
||||
help
|
||||
This driver provides a character device interface to the
|
||||
DASD extended error reporting. This is only needed if you want to
|
||||
use applications written for the EER facility.
|
||||
|
||||
config SCM_BLOCK
|
||||
def_tristate m
|
||||
prompt "Support for Storage Class Memory"
|
||||
depends on S390 && BLOCK && EADM_SCH && SCM_BUS
|
||||
help
|
||||
Block device driver for Storage Class Memory (SCM). This driver
|
||||
provides a block device interface for each available SCM increment.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called scm_block.
|
||||
|
||||
config SCM_BLOCK_CLUSTER_WRITE
|
||||
def_bool y
|
||||
prompt "SCM force cluster writes"
|
||||
depends on SCM_BLOCK
|
||||
help
|
||||
Force writes to Storage Class Memory (SCM) to be in done in clusters.
|
25
drivers/s390/block/Makefile
Normal file
25
drivers/s390/block/Makefile
Normal file
|
@ -0,0 +1,25 @@
|
|||
#
|
||||
# S/390 block devices
|
||||
#
|
||||
|
||||
dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
|
||||
dasd_fba_mod-objs := dasd_fba.o
|
||||
dasd_diag_mod-objs := dasd_diag.o
|
||||
dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
|
||||
dasd_genhd.o dasd_erp.o
|
||||
ifdef CONFIG_DASD_EER
|
||||
dasd_mod-objs += dasd_eer.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DASD) += dasd_mod.o
|
||||
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
|
||||
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
|
||||
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
|
||||
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
|
||||
obj-$(CONFIG_DCSSBLK) += dcssblk.o
|
||||
|
||||
scm_block-objs := scm_drv.o scm_blk.o
|
||||
ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
|
||||
scm_block-objs += scm_blk_cluster.o
|
||||
endif
|
||||
obj-$(CONFIG_SCM_BLOCK) += scm_block.o
|
3931
drivers/s390/block/dasd.c
Normal file
3931
drivers/s390/block/dasd.c
Normal file
File diff suppressed because it is too large
Load diff
2771
drivers/s390/block/dasd_3990_erp.c
Normal file
2771
drivers/s390/block/dasd_3990_erp.c
Normal file
File diff suppressed because it is too large
Load diff
990
drivers/s390/block/dasd_alias.c
Normal file
990
drivers/s390/block/dasd_alias.c
Normal file
|
@ -0,0 +1,990 @@
|
|||
/*
|
||||
* PAV alias management for the DASD ECKD discipline
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd-eckd"
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include "dasd_int.h"
|
||||
#include "dasd_eckd.h"
|
||||
|
||||
#ifdef PRINTK_HEADER
|
||||
#undef PRINTK_HEADER
|
||||
#endif /* PRINTK_HEADER */
|
||||
#define PRINTK_HEADER "dasd(eckd):"
|
||||
|
||||
|
||||
/*
|
||||
* General concept of alias management:
|
||||
* - PAV and DASD alias management is specific to the eckd discipline.
|
||||
* - A device is connected to an lcu as long as the device exists.
|
||||
* dasd_alias_make_device_known_to_lcu will be called wenn the
|
||||
* device is checked by the eckd discipline and
|
||||
* dasd_alias_disconnect_device_from_lcu will be called
|
||||
* before the device is deleted.
|
||||
* - The dasd_alias_add_device / dasd_alias_remove_device
|
||||
* functions mark the point when a device is 'ready for service'.
|
||||
* - A summary unit check is a rare occasion, but it is mandatory to
|
||||
* support it. It requires some complex recovery actions before the
|
||||
* devices can be used again (see dasd_alias_handle_summary_unit_check).
|
||||
* - dasd_alias_get_start_dev will find an alias device that can be used
|
||||
* instead of the base device and does some (very simple) load balancing.
|
||||
* This is the function that gets called for each I/O, so when improving
|
||||
* something, this function should get faster or better, the rest has just
|
||||
* to be correct.
|
||||
*/
|
||||
|
||||
|
||||
static void summary_unit_check_handling_work(struct work_struct *);
|
||||
static void lcu_update_work(struct work_struct *);
|
||||
static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
|
||||
|
||||
static struct alias_root aliastree = {
|
||||
.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
|
||||
};
|
||||
|
||||
static struct alias_server *_find_server(struct dasd_uid *uid)
|
||||
{
|
||||
struct alias_server *pos;
|
||||
list_for_each_entry(pos, &aliastree.serverlist, server) {
|
||||
if (!strncmp(pos->uid.vendor, uid->vendor,
|
||||
sizeof(uid->vendor))
|
||||
&& !strncmp(pos->uid.serial, uid->serial,
|
||||
sizeof(uid->serial)))
|
||||
return pos;
|
||||
};
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct alias_lcu *_find_lcu(struct alias_server *server,
|
||||
struct dasd_uid *uid)
|
||||
{
|
||||
struct alias_lcu *pos;
|
||||
list_for_each_entry(pos, &server->lculist, lcu) {
|
||||
if (pos->uid.ssid == uid->ssid)
|
||||
return pos;
|
||||
};
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
|
||||
struct dasd_uid *uid)
|
||||
{
|
||||
struct alias_pav_group *pos;
|
||||
__u8 search_unit_addr;
|
||||
|
||||
/* for hyper pav there is only one group */
|
||||
if (lcu->pav == HYPER_PAV) {
|
||||
if (list_empty(&lcu->grouplist))
|
||||
return NULL;
|
||||
else
|
||||
return list_first_entry(&lcu->grouplist,
|
||||
struct alias_pav_group, group);
|
||||
}
|
||||
|
||||
/* for base pav we have to find the group that matches the base */
|
||||
if (uid->type == UA_BASE_DEVICE)
|
||||
search_unit_addr = uid->real_unit_addr;
|
||||
else
|
||||
search_unit_addr = uid->base_unit_addr;
|
||||
list_for_each_entry(pos, &lcu->grouplist, group) {
|
||||
if (pos->uid.base_unit_addr == search_unit_addr &&
|
||||
!strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
|
||||
return pos;
|
||||
};
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct alias_server *_allocate_server(struct dasd_uid *uid)
|
||||
{
|
||||
struct alias_server *server;
|
||||
|
||||
server = kzalloc(sizeof(*server), GFP_KERNEL);
|
||||
if (!server)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
|
||||
memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
|
||||
INIT_LIST_HEAD(&server->server);
|
||||
INIT_LIST_HEAD(&server->lculist);
|
||||
return server;
|
||||
}
|
||||
|
||||
static void _free_server(struct alias_server *server)
|
||||
{
|
||||
kfree(server);
|
||||
}
|
||||
|
||||
static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
|
||||
{
|
||||
struct alias_lcu *lcu;
|
||||
|
||||
lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
|
||||
if (!lcu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
|
||||
if (!lcu->uac)
|
||||
goto out_err1;
|
||||
lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
|
||||
if (!lcu->rsu_cqr)
|
||||
goto out_err2;
|
||||
lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!lcu->rsu_cqr->cpaddr)
|
||||
goto out_err3;
|
||||
lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
|
||||
if (!lcu->rsu_cqr->data)
|
||||
goto out_err4;
|
||||
|
||||
memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
|
||||
memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
|
||||
lcu->uid.ssid = uid->ssid;
|
||||
lcu->pav = NO_PAV;
|
||||
lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
|
||||
INIT_LIST_HEAD(&lcu->lcu);
|
||||
INIT_LIST_HEAD(&lcu->inactive_devices);
|
||||
INIT_LIST_HEAD(&lcu->active_devices);
|
||||
INIT_LIST_HEAD(&lcu->grouplist);
|
||||
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
|
||||
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
|
||||
spin_lock_init(&lcu->lock);
|
||||
init_completion(&lcu->lcu_setup);
|
||||
return lcu;
|
||||
|
||||
out_err4:
|
||||
kfree(lcu->rsu_cqr->cpaddr);
|
||||
out_err3:
|
||||
kfree(lcu->rsu_cqr);
|
||||
out_err2:
|
||||
kfree(lcu->uac);
|
||||
out_err1:
|
||||
kfree(lcu);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void _free_lcu(struct alias_lcu *lcu)
|
||||
{
|
||||
kfree(lcu->rsu_cqr->data);
|
||||
kfree(lcu->rsu_cqr->cpaddr);
|
||||
kfree(lcu->rsu_cqr);
|
||||
kfree(lcu->uac);
|
||||
kfree(lcu);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the function that will allocate all the server and lcu data,
|
||||
* so this function must be called first for a new device.
|
||||
* If the return value is 1, the lcu was already known before, if it
|
||||
* is 0, this is a new lcu.
|
||||
* Negative return code indicates that something went wrong (e.g. -ENOMEM)
|
||||
*/
|
||||
int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
unsigned long flags;
|
||||
struct alias_server *server, *newserver;
|
||||
struct alias_lcu *lcu, *newlcu;
|
||||
struct dasd_uid uid;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
|
||||
device->discipline->get_uid(device, &uid);
|
||||
spin_lock_irqsave(&aliastree.lock, flags);
|
||||
server = _find_server(&uid);
|
||||
if (!server) {
|
||||
spin_unlock_irqrestore(&aliastree.lock, flags);
|
||||
newserver = _allocate_server(&uid);
|
||||
if (IS_ERR(newserver))
|
||||
return PTR_ERR(newserver);
|
||||
spin_lock_irqsave(&aliastree.lock, flags);
|
||||
server = _find_server(&uid);
|
||||
if (!server) {
|
||||
list_add(&newserver->server, &aliastree.serverlist);
|
||||
server = newserver;
|
||||
} else {
|
||||
/* someone was faster */
|
||||
_free_server(newserver);
|
||||
}
|
||||
}
|
||||
|
||||
lcu = _find_lcu(server, &uid);
|
||||
if (!lcu) {
|
||||
spin_unlock_irqrestore(&aliastree.lock, flags);
|
||||
newlcu = _allocate_lcu(&uid);
|
||||
if (IS_ERR(newlcu))
|
||||
return PTR_ERR(newlcu);
|
||||
spin_lock_irqsave(&aliastree.lock, flags);
|
||||
lcu = _find_lcu(server, &uid);
|
||||
if (!lcu) {
|
||||
list_add(&newlcu->lcu, &server->lculist);
|
||||
lcu = newlcu;
|
||||
} else {
|
||||
/* someone was faster */
|
||||
_free_lcu(newlcu);
|
||||
}
|
||||
}
|
||||
spin_lock(&lcu->lock);
|
||||
list_add(&device->alias_list, &lcu->inactive_devices);
|
||||
private->lcu = lcu;
|
||||
spin_unlock(&lcu->lock);
|
||||
spin_unlock_irqrestore(&aliastree.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function removes a device from the scope of alias management.
|
||||
* The complicated part is to make sure that it is not in use by
|
||||
* any of the workers. If necessary cancel the work.
|
||||
*/
|
||||
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
unsigned long flags;
|
||||
struct alias_lcu *lcu;
|
||||
struct alias_server *server;
|
||||
int was_pending;
|
||||
struct dasd_uid uid;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
lcu = private->lcu;
|
||||
/* nothing to do if already disconnected */
|
||||
if (!lcu)
|
||||
return;
|
||||
device->discipline->get_uid(device, &uid);
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
list_del_init(&device->alias_list);
|
||||
/* make sure that the workers don't use this device */
|
||||
if (device == lcu->suc_data.device) {
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
cancel_work_sync(&lcu->suc_data.worker);
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
if (device == lcu->suc_data.device)
|
||||
lcu->suc_data.device = NULL;
|
||||
}
|
||||
was_pending = 0;
|
||||
if (device == lcu->ruac_data.device) {
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
was_pending = 1;
|
||||
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
if (device == lcu->ruac_data.device)
|
||||
lcu->ruac_data.device = NULL;
|
||||
}
|
||||
private->lcu = NULL;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&aliastree.lock, flags);
|
||||
spin_lock(&lcu->lock);
|
||||
if (list_empty(&lcu->grouplist) &&
|
||||
list_empty(&lcu->active_devices) &&
|
||||
list_empty(&lcu->inactive_devices)) {
|
||||
list_del(&lcu->lcu);
|
||||
spin_unlock(&lcu->lock);
|
||||
_free_lcu(lcu);
|
||||
lcu = NULL;
|
||||
} else {
|
||||
if (was_pending)
|
||||
_schedule_lcu_update(lcu, NULL);
|
||||
spin_unlock(&lcu->lock);
|
||||
}
|
||||
server = _find_server(&uid);
|
||||
if (server && list_empty(&server->lculist)) {
|
||||
list_del(&server->server);
|
||||
_free_server(server);
|
||||
}
|
||||
spin_unlock_irqrestore(&aliastree.lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function assumes that the unit address configuration stored
|
||||
* in the lcu is up to date and will update the device uid before
|
||||
* adding it to a pav group.
|
||||
*/
|
||||
|
||||
static int _add_device_to_lcu(struct alias_lcu *lcu,
|
||||
struct dasd_device *device,
|
||||
struct dasd_device *pos)
|
||||
{
|
||||
|
||||
struct dasd_eckd_private *private;
|
||||
struct alias_pav_group *group;
|
||||
struct dasd_uid uid;
|
||||
unsigned long flags;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
|
||||
/* only lock if not already locked */
|
||||
if (device != pos)
|
||||
spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
|
||||
CDEV_NESTED_SECOND);
|
||||
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
|
||||
private->uid.base_unit_addr =
|
||||
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
|
||||
uid = private->uid;
|
||||
|
||||
if (device != pos)
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
|
||||
/* if we have no PAV anyway, we don't need to bother with PAV groups */
|
||||
if (lcu->pav == NO_PAV) {
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
return 0;
|
||||
}
|
||||
|
||||
group = _find_group(lcu, &uid);
|
||||
if (!group) {
|
||||
group = kzalloc(sizeof(*group), GFP_ATOMIC);
|
||||
if (!group)
|
||||
return -ENOMEM;
|
||||
memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
|
||||
memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
|
||||
group->uid.ssid = uid.ssid;
|
||||
if (uid.type == UA_BASE_DEVICE)
|
||||
group->uid.base_unit_addr = uid.real_unit_addr;
|
||||
else
|
||||
group->uid.base_unit_addr = uid.base_unit_addr;
|
||||
memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
|
||||
INIT_LIST_HEAD(&group->group);
|
||||
INIT_LIST_HEAD(&group->baselist);
|
||||
INIT_LIST_HEAD(&group->aliaslist);
|
||||
list_add(&group->group, &lcu->grouplist);
|
||||
}
|
||||
if (uid.type == UA_BASE_DEVICE)
|
||||
list_move(&device->alias_list, &group->baselist);
|
||||
else
|
||||
list_move(&device->alias_list, &group->aliaslist);
|
||||
private->pavgroup = group;
|
||||
return 0;
|
||||
};
|
||||
|
||||
static void _remove_device_from_lcu(struct alias_lcu *lcu,
|
||||
struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
struct alias_pav_group *group;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
list_move(&device->alias_list, &lcu->inactive_devices);
|
||||
group = private->pavgroup;
|
||||
if (!group)
|
||||
return;
|
||||
private->pavgroup = NULL;
|
||||
if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
|
||||
list_del(&group->group);
|
||||
kfree(group);
|
||||
return;
|
||||
}
|
||||
if (group->next == device)
|
||||
group->next = NULL;
|
||||
};
|
||||
|
||||
static int
|
||||
suborder_not_supported(struct dasd_ccw_req *cqr)
|
||||
{
|
||||
char *sense;
|
||||
char reason;
|
||||
char msg_format;
|
||||
char msg_no;
|
||||
|
||||
sense = dasd_get_sense(&cqr->irb);
|
||||
if (!sense)
|
||||
return 0;
|
||||
|
||||
reason = sense[0];
|
||||
msg_format = (sense[7] & 0xF0);
|
||||
msg_no = (sense[7] & 0x0F);
|
||||
|
||||
/* command reject, Format 0 MSG 4 - invalid parameter */
|
||||
if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_unit_address_configuration(struct dasd_device *device,
|
||||
struct alias_lcu *lcu)
|
||||
{
|
||||
struct dasd_psf_prssd_data *prssdp;
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct ccw1 *ccw;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
|
||||
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
|
||||
(sizeof(struct dasd_psf_prssd_data)),
|
||||
device);
|
||||
if (IS_ERR(cqr))
|
||||
return PTR_ERR(cqr);
|
||||
cqr->startdev = device;
|
||||
cqr->memdev = device;
|
||||
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
cqr->retries = 10;
|
||||
cqr->expires = 20 * HZ;
|
||||
|
||||
/* Prepare for Read Subsystem Data */
|
||||
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
|
||||
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
|
||||
prssdp->order = PSF_ORDER_PRSSD;
|
||||
prssdp->suborder = 0x0e; /* Read unit address configuration */
|
||||
/* all other bytes of prssdp must be zero */
|
||||
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_PSF;
|
||||
ccw->count = sizeof(struct dasd_psf_prssd_data);
|
||||
ccw->flags |= CCW_FLAG_CC;
|
||||
ccw->cda = (__u32)(addr_t) prssdp;
|
||||
|
||||
/* Read Subsystem Data - feature codes */
|
||||
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
|
||||
|
||||
ccw++;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
|
||||
ccw->count = sizeof(*(lcu->uac));
|
||||
ccw->cda = (__u32)(addr_t) lcu->uac;
|
||||
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
/* need to unset flag here to detect race with summary unit check */
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
lcu->flags &= ~NEED_UAC_UPDATE;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
||||
do {
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc && suborder_not_supported(cqr))
|
||||
return -EOPNOTSUPP;
|
||||
} while (rc && (cqr->retries > 0));
|
||||
if (rc) {
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
lcu->flags |= NEED_UAC_UPDATE;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
}
|
||||
dasd_kfree_request(cqr, cqr->memdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct alias_pav_group *pavgroup, *tempgroup;
|
||||
struct dasd_device *device, *tempdev;
|
||||
int i, rc;
|
||||
struct dasd_eckd_private *private;
|
||||
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
|
||||
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
|
||||
alias_list) {
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
private->pavgroup = NULL;
|
||||
}
|
||||
list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
|
||||
alias_list) {
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
private->pavgroup = NULL;
|
||||
}
|
||||
list_del(&pavgroup->group);
|
||||
kfree(pavgroup);
|
||||
}
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
||||
rc = read_unit_address_configuration(refdev, lcu);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* need to take cdev lock before lcu lock */
|
||||
spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
|
||||
CDEV_NESTED_FIRST);
|
||||
spin_lock(&lcu->lock);
|
||||
lcu->pav = NO_PAV;
|
||||
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
|
||||
switch (lcu->uac->unit[i].ua_type) {
|
||||
case UA_BASE_PAV_ALIAS:
|
||||
lcu->pav = BASE_PAV;
|
||||
break;
|
||||
case UA_HYPER_PAV_ALIAS:
|
||||
lcu->pav = HYPER_PAV;
|
||||
break;
|
||||
}
|
||||
if (lcu->pav != NO_PAV)
|
||||
break;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
|
||||
alias_list) {
|
||||
_add_device_to_lcu(lcu, device, refdev);
|
||||
}
|
||||
spin_unlock(&lcu->lock);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lcu_update_work(struct work_struct *work)
|
||||
{
|
||||
struct alias_lcu *lcu;
|
||||
struct read_uac_work_data *ruac_data;
|
||||
struct dasd_device *device;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
|
||||
lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
|
||||
device = ruac_data->device;
|
||||
rc = _lcu_update(device, lcu);
|
||||
/*
|
||||
* Need to check flags again, as there could have been another
|
||||
* prepare_update or a new device a new device while we were still
|
||||
* processing the data
|
||||
*/
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
|
||||
" alias data in lcu (rc = %d), retry later", rc);
|
||||
schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
|
||||
} else {
|
||||
lcu->ruac_data.device = NULL;
|
||||
lcu->flags &= ~UPDATE_PENDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
}
|
||||
|
||||
static int _schedule_lcu_update(struct alias_lcu *lcu,
|
||||
struct dasd_device *device)
|
||||
{
|
||||
struct dasd_device *usedev = NULL;
|
||||
struct alias_pav_group *group;
|
||||
|
||||
lcu->flags |= NEED_UAC_UPDATE;
|
||||
if (lcu->ruac_data.device) {
|
||||
/* already scheduled or running */
|
||||
return 0;
|
||||
}
|
||||
if (device && !list_empty(&device->alias_list))
|
||||
usedev = device;
|
||||
|
||||
if (!usedev && !list_empty(&lcu->grouplist)) {
|
||||
group = list_first_entry(&lcu->grouplist,
|
||||
struct alias_pav_group, group);
|
||||
if (!list_empty(&group->baselist))
|
||||
usedev = list_first_entry(&group->baselist,
|
||||
struct dasd_device,
|
||||
alias_list);
|
||||
else if (!list_empty(&group->aliaslist))
|
||||
usedev = list_first_entry(&group->aliaslist,
|
||||
struct dasd_device,
|
||||
alias_list);
|
||||
}
|
||||
if (!usedev && !list_empty(&lcu->active_devices)) {
|
||||
usedev = list_first_entry(&lcu->active_devices,
|
||||
struct dasd_device, alias_list);
|
||||
}
|
||||
/*
|
||||
* if we haven't found a proper device yet, give up for now, the next
|
||||
* device that will be set active will trigger an lcu update
|
||||
*/
|
||||
if (!usedev)
|
||||
return -EINVAL;
|
||||
lcu->ruac_data.device = usedev;
|
||||
schedule_delayed_work(&lcu->ruac_data.dwork, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dasd_alias_add_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
struct alias_lcu *lcu;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
lcu = private->lcu;
|
||||
rc = 0;
|
||||
|
||||
/* need to take cdev lock before lcu lock */
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
spin_lock(&lcu->lock);
|
||||
if (!(lcu->flags & UPDATE_PENDING)) {
|
||||
rc = _add_device_to_lcu(lcu, device, device);
|
||||
if (rc)
|
||||
lcu->flags |= UPDATE_PENDING;
|
||||
}
|
||||
if (lcu->flags & UPDATE_PENDING) {
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
_schedule_lcu_update(lcu, device);
|
||||
}
|
||||
spin_unlock(&lcu->lock);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int dasd_alias_update_add_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
private->lcu->flags |= UPDATE_PENDING;
|
||||
return dasd_alias_add_device(device);
|
||||
}
|
||||
|
||||
int dasd_alias_remove_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
struct alias_lcu *lcu;
|
||||
unsigned long flags;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
lcu = private->lcu;
|
||||
/* nothing to do if already removed */
|
||||
if (!lcu)
|
||||
return 0;
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
_remove_device_from_lcu(lcu, device);
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
|
||||
{
|
||||
|
||||
struct dasd_device *alias_device;
|
||||
struct alias_pav_group *group;
|
||||
struct alias_lcu *lcu;
|
||||
struct dasd_eckd_private *private, *alias_priv;
|
||||
unsigned long flags;
|
||||
|
||||
private = (struct dasd_eckd_private *) base_device->private;
|
||||
group = private->pavgroup;
|
||||
lcu = private->lcu;
|
||||
if (!group || !lcu)
|
||||
return NULL;
|
||||
if (lcu->pav == NO_PAV ||
|
||||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
|
||||
return NULL;
|
||||
if (unlikely(!(private->features.feature[8] & 0x01))) {
|
||||
/*
|
||||
* PAV enabled but prefix not, very unlikely
|
||||
* seems to be a lost pathgroup
|
||||
* use base device to do IO
|
||||
*/
|
||||
DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
|
||||
"Prefix not enabled with PAV enabled\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
alias_device = group->next;
|
||||
if (!alias_device) {
|
||||
if (list_empty(&group->aliaslist)) {
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
return NULL;
|
||||
} else {
|
||||
alias_device = list_first_entry(&group->aliaslist,
|
||||
struct dasd_device,
|
||||
alias_list);
|
||||
}
|
||||
}
|
||||
if (list_is_last(&alias_device->alias_list, &group->aliaslist))
|
||||
group->next = list_first_entry(&group->aliaslist,
|
||||
struct dasd_device, alias_list);
|
||||
else
|
||||
group->next = list_first_entry(&alias_device->alias_list,
|
||||
struct dasd_device, alias_list);
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
alias_priv = (struct dasd_eckd_private *) alias_device->private;
|
||||
if ((alias_priv->count < private->count) && !alias_device->stopped)
|
||||
return alias_device;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Summary unit check handling depends on the way alias devices
|
||||
* are handled so it is done here rather then in dasd_eckd.c
|
||||
*/
|
||||
static int reset_summary_unit_check(struct alias_lcu *lcu,
|
||||
struct dasd_device *device,
|
||||
char reason)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
int rc = 0;
|
||||
struct ccw1 *ccw;
|
||||
|
||||
cqr = lcu->rsu_cqr;
|
||||
strncpy((char *) &cqr->magic, "ECKD", 4);
|
||||
ASCEBC((char *) &cqr->magic, 4);
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
|
||||
ccw->flags = 0 ;
|
||||
ccw->count = 16;
|
||||
ccw->cda = (__u32)(addr_t) cqr->data;
|
||||
((char *)cqr->data)[0] = reason;
|
||||
|
||||
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
cqr->retries = 255; /* set retry counter to enable basic ERP */
|
||||
cqr->startdev = device;
|
||||
cqr->memdev = device;
|
||||
cqr->block = NULL;
|
||||
cqr->expires = 5 * HZ;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
|
||||
{
|
||||
struct alias_pav_group *pavgroup;
|
||||
struct dasd_device *device;
|
||||
struct dasd_eckd_private *private;
|
||||
unsigned long flags;
|
||||
|
||||
/* active and inactive list can contain alias as well as base devices */
|
||||
list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (private->uid.type != UA_BASE_DEVICE) {
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
|
||||
flags);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_schedule_block_bh(device->block);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (private->uid.type != UA_BASE_DEVICE) {
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
|
||||
flags);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_schedule_block_bh(device->block);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
||||
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
||||
dasd_schedule_block_bh(device->block);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
|
||||
{
|
||||
struct alias_pav_group *pavgroup;
|
||||
struct dasd_device *device, *temp;
|
||||
struct dasd_eckd_private *private;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(active);
|
||||
|
||||
/*
|
||||
* Problem here ist that dasd_flush_device_queue may wait
|
||||
* for termination of a request to complete. We can't keep
|
||||
* the lcu lock during that time, so we must assume that
|
||||
* the lists may have changed.
|
||||
* Idea: first gather all active alias devices in a separate list,
|
||||
* then flush the first element of this list unlocked, and afterwards
|
||||
* check if it is still on the list before moving it to the
|
||||
* active_devices list.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
list_for_each_entry_safe(device, temp, &lcu->active_devices,
|
||||
alias_list) {
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
if (private->uid.type == UA_BASE_DEVICE)
|
||||
continue;
|
||||
list_move(&device->alias_list, &active);
|
||||
}
|
||||
|
||||
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
||||
list_splice_init(&pavgroup->aliaslist, &active);
|
||||
}
|
||||
while (!list_empty(&active)) {
|
||||
device = list_first_entry(&active, struct dasd_device,
|
||||
alias_list);
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
rc = dasd_flush_device_queue(device);
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
/*
|
||||
* only move device around if it wasn't moved away while we
|
||||
* were waiting for the flush
|
||||
*/
|
||||
if (device == list_first_entry(&active,
|
||||
struct dasd_device, alias_list))
|
||||
list_move(&device->alias_list, &lcu->active_devices);
|
||||
}
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
}
|
||||
|
||||
static void __stop_device_on_lcu(struct dasd_device *device,
|
||||
struct dasd_device *pos)
|
||||
{
|
||||
/* If pos == device then device is already locked! */
|
||||
if (pos == device) {
|
||||
dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
|
||||
return;
|
||||
}
|
||||
spin_lock(get_ccwdev_lock(pos->cdev));
|
||||
dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
|
||||
spin_unlock(get_ccwdev_lock(pos->cdev));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called in interrupt context, so the
|
||||
* cdev lock for device is already locked!
|
||||
*/
|
||||
static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
|
||||
struct dasd_device *device)
|
||||
{
|
||||
struct alias_pav_group *pavgroup;
|
||||
struct dasd_device *pos;
|
||||
|
||||
list_for_each_entry(pos, &lcu->active_devices, alias_list)
|
||||
__stop_device_on_lcu(device, pos);
|
||||
list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
|
||||
__stop_device_on_lcu(device, pos);
|
||||
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
||||
list_for_each_entry(pos, &pavgroup->baselist, alias_list)
|
||||
__stop_device_on_lcu(device, pos);
|
||||
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
|
||||
__stop_device_on_lcu(device, pos);
|
||||
}
|
||||
}
|
||||
|
||||
static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
|
||||
{
|
||||
struct alias_pav_group *pavgroup;
|
||||
struct dasd_device *device;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_entry(device, &lcu->active_devices, alias_list) {
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
}
|
||||
|
||||
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
}
|
||||
|
||||
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
|
||||
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
|
||||
flags);
|
||||
}
|
||||
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
|
||||
flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void summary_unit_check_handling_work(struct work_struct *work)
|
||||
{
|
||||
struct alias_lcu *lcu;
|
||||
struct summary_unit_check_work_data *suc_data;
|
||||
unsigned long flags;
|
||||
struct dasd_device *device;
|
||||
|
||||
suc_data = container_of(work, struct summary_unit_check_work_data,
|
||||
worker);
|
||||
lcu = container_of(suc_data, struct alias_lcu, suc_data);
|
||||
device = suc_data->device;
|
||||
|
||||
/* 1. flush alias devices */
|
||||
flush_all_alias_devices_on_lcu(lcu);
|
||||
|
||||
/* 2. reset summary unit check */
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
dasd_device_remove_stop_bits(device,
|
||||
(DASD_STOPPED_SU | DASD_STOPPED_PENDING));
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
reset_summary_unit_check(lcu, device, suc_data->reason);
|
||||
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
_unstop_all_devices_on_lcu(lcu);
|
||||
_restart_all_base_devices_on_lcu(lcu);
|
||||
/* 3. read new alias configuration */
|
||||
_schedule_lcu_update(lcu, device);
|
||||
lcu->suc_data.device = NULL;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* note: this will be called from int handler context (cdev locked)
|
||||
*/
|
||||
void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
|
||||
struct irb *irb)
|
||||
{
|
||||
struct alias_lcu *lcu;
|
||||
char reason;
|
||||
struct dasd_eckd_private *private;
|
||||
char *sense;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
|
||||
sense = dasd_get_sense(irb);
|
||||
if (sense) {
|
||||
reason = sense[8];
|
||||
DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
|
||||
"eckd handle summary unit check: reason", reason);
|
||||
} else {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"eckd handle summary unit check:"
|
||||
" no reason code available");
|
||||
return;
|
||||
}
|
||||
|
||||
lcu = private->lcu;
|
||||
if (!lcu) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"device not ready to handle summary"
|
||||
" unit check (no lcu structure)");
|
||||
return;
|
||||
}
|
||||
spin_lock(&lcu->lock);
|
||||
_stop_all_devices_on_lcu(lcu, device);
|
||||
/* prepare for lcu_update */
|
||||
private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
|
||||
/* If this device is about to be removed just return and wait for
|
||||
* the next interrupt on a different device
|
||||
*/
|
||||
if (list_empty(&device->alias_list)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"device is in offline processing,"
|
||||
" don't do summary unit check handling");
|
||||
spin_unlock(&lcu->lock);
|
||||
return;
|
||||
}
|
||||
if (lcu->suc_data.device) {
|
||||
/* already scheduled or running */
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"previous instance of summary unit check worker"
|
||||
" still pending");
|
||||
spin_unlock(&lcu->lock);
|
||||
return ;
|
||||
}
|
||||
lcu->suc_data.reason = reason;
|
||||
lcu->suc_data.device = device;
|
||||
spin_unlock(&lcu->lock);
|
||||
schedule_work(&lcu->suc_data.worker);
|
||||
};
|
1553
drivers/s390/block/dasd_devmap.c
Normal file
1553
drivers/s390/block/dasd_devmap.c
Normal file
File diff suppressed because it is too large
Load diff
663
drivers/s390/block/dasd_diag.c
Normal file
663
drivers/s390/block/dasd_diag.c
Normal file
|
@ -0,0 +1,663 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Based on.......: linux/drivers/s390/block/mdisk.c
|
||||
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
*
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
#include <asm/dasd.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/vtoc.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
#include "dasd_int.h"
|
||||
#include "dasd_diag.h"
|
||||
|
||||
#define PRINTK_HEADER "dasd(diag):"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* The maximum number of blocks per request (max_blocks) is dependent on the
|
||||
* amount of storage that is available in the static I/O buffer for each
|
||||
* device. Currently each device gets 2 pages. We want to fit two requests
|
||||
* into the available memory so that we can immediately start the next if one
|
||||
* finishes. */
|
||||
#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
|
||||
sizeof(struct dasd_diag_req)) / \
|
||||
sizeof(struct dasd_diag_bio)) / 2)
|
||||
#define DIAG_MAX_RETRIES 32
|
||||
#define DIAG_TIMEOUT 50
|
||||
|
||||
static struct dasd_discipline dasd_diag_discipline;
|
||||
|
||||
struct dasd_diag_private {
|
||||
struct dasd_diag_characteristics rdc_data;
|
||||
struct dasd_diag_rw_io iob;
|
||||
struct dasd_diag_init_io iib;
|
||||
blocknum_t pt_block;
|
||||
struct ccw_dev_id dev_id;
|
||||
};
|
||||
|
||||
struct dasd_diag_req {
|
||||
unsigned int block_count;
|
||||
struct dasd_diag_bio bio[0];
|
||||
};
|
||||
|
||||
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
|
||||
|
||||
/* Perform DIAG250 call with block I/O parameter list iob (input and output)
|
||||
* and function code cmd.
|
||||
* In case of an exception return 3. Otherwise return result of bitwise OR of
|
||||
* resulting condition code and DIAG return code. */
|
||||
static inline int dia250(void *iob, int cmd)
|
||||
{
|
||||
register unsigned long reg2 asm ("2") = (unsigned long) iob;
|
||||
typedef union {
|
||||
struct dasd_diag_init_io init_io;
|
||||
struct dasd_diag_rw_io rw_io;
|
||||
} addr_type;
|
||||
int rc;
|
||||
|
||||
rc = 3;
|
||||
asm volatile(
|
||||
" diag 2,%2,0x250\n"
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
" or %0,3\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (rc), "=m" (*(addr_type *) iob)
|
||||
: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
|
||||
: "3", "cc");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Initialize block I/O to DIAG device using the specified blocksize and
|
||||
* block offset. On success, return zero and set end_block to contain the
|
||||
* number of blocks on the device minus the specified offset. Return non-zero
|
||||
* otherwise. */
|
||||
static inline int
|
||||
mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
|
||||
blocknum_t offset, blocknum_t *end_block)
|
||||
{
|
||||
struct dasd_diag_private *private;
|
||||
struct dasd_diag_init_io *iib;
|
||||
int rc;
|
||||
|
||||
private = (struct dasd_diag_private *) device->private;
|
||||
iib = &private->iib;
|
||||
memset(iib, 0, sizeof (struct dasd_diag_init_io));
|
||||
|
||||
iib->dev_nr = private->dev_id.devno;
|
||||
iib->block_size = blocksize;
|
||||
iib->offset = offset;
|
||||
iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
|
||||
|
||||
rc = dia250(iib, INIT_BIO);
|
||||
|
||||
if ((rc & 3) == 0 && end_block)
|
||||
*end_block = iib->end_block;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Remove block I/O environment for device. Return zero on success, non-zero
|
||||
* otherwise. */
|
||||
static inline int
|
||||
mdsk_term_io(struct dasd_device * device)
|
||||
{
|
||||
struct dasd_diag_private *private;
|
||||
struct dasd_diag_init_io *iib;
|
||||
int rc;
|
||||
|
||||
private = (struct dasd_diag_private *) device->private;
|
||||
iib = &private->iib;
|
||||
memset(iib, 0, sizeof (struct dasd_diag_init_io));
|
||||
iib->dev_nr = private->dev_id.devno;
|
||||
rc = dia250(iib, TERM_BIO);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Error recovery for failed DIAG requests - try to reestablish the DIAG
|
||||
* environment. */
|
||||
static void
|
||||
dasd_diag_erp(struct dasd_device *device)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mdsk_term_io(device);
|
||||
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
|
||||
if (rc == 4) {
|
||||
if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
|
||||
pr_warning("%s: The access mode of a DIAG device "
|
||||
"changed to read-only\n",
|
||||
dev_name(&device->cdev->dev));
|
||||
rc = 0;
|
||||
}
|
||||
if (rc)
|
||||
pr_warning("%s: DIAG ERP failed with "
|
||||
"rc=%d\n", dev_name(&device->cdev->dev), rc);
|
||||
}
|
||||
|
||||
/* Start a given request at the device. Return zero on success, non-zero
|
||||
* otherwise. */
|
||||
static int
|
||||
dasd_start_diag(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
struct dasd_diag_private *private;
|
||||
struct dasd_diag_req *dreq;
|
||||
int rc;
|
||||
|
||||
device = cqr->startdev;
|
||||
if (cqr->retries < 0) {
|
||||
DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
|
||||
"- no retry left)", cqr);
|
||||
cqr->status = DASD_CQR_ERROR;
|
||||
return -EIO;
|
||||
}
|
||||
private = (struct dasd_diag_private *) device->private;
|
||||
dreq = (struct dasd_diag_req *) cqr->data;
|
||||
|
||||
private->iob.dev_nr = private->dev_id.devno;
|
||||
private->iob.key = 0;
|
||||
private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
|
||||
private->iob.block_count = dreq->block_count;
|
||||
private->iob.interrupt_params = (addr_t) cqr;
|
||||
private->iob.bio_list = dreq->bio;
|
||||
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
|
||||
|
||||
cqr->startclk = get_tod_clock();
|
||||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
|
||||
rc = dia250(&private->iob, RW_BIO);
|
||||
switch (rc) {
|
||||
case 0: /* Synchronous I/O finished successfully */
|
||||
cqr->stopclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_SUCCESS;
|
||||
/* Indicate to calling function that only a dasd_schedule_bh()
|
||||
and no timer is needed */
|
||||
rc = -EACCES;
|
||||
break;
|
||||
case 8: /* Asynchronous I/O was started */
|
||||
cqr->status = DASD_CQR_IN_IO;
|
||||
rc = 0;
|
||||
break;
|
||||
default: /* Error condition */
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
|
||||
dasd_diag_erp(device);
|
||||
rc = -EIO;
|
||||
break;
|
||||
}
|
||||
cqr->intrc = rc;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Terminate given request at the device. */
|
||||
static int
|
||||
dasd_diag_term_IO(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
|
||||
device = cqr->startdev;
|
||||
mdsk_term_io(device);
|
||||
mdsk_init_io(device, device->block->bp_block, 0, NULL);
|
||||
cqr->status = DASD_CQR_CLEAR_PENDING;
|
||||
cqr->stopclk = get_tod_clock();
|
||||
dasd_schedule_device_bh(device);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handle external interruption. */
|
||||
static void dasd_ext_handler(struct ext_code ext_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
struct dasd_ccw_req *cqr, *next;
|
||||
struct dasd_device *device;
|
||||
unsigned long long expires;
|
||||
unsigned long flags;
|
||||
addr_t ip;
|
||||
int rc;
|
||||
|
||||
switch (ext_code.subcode >> 8) {
|
||||
case DASD_DIAG_CODE_31BIT:
|
||||
ip = (addr_t) param32;
|
||||
break;
|
||||
case DASD_DIAG_CODE_64BIT:
|
||||
ip = (addr_t) param64;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
inc_irq_stat(IRQEXT_DSD);
|
||||
if (!ip) { /* no intparm: unsolicited interrupt */
|
||||
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
|
||||
"interrupt");
|
||||
return;
|
||||
}
|
||||
cqr = (struct dasd_ccw_req *) ip;
|
||||
device = (struct dasd_device *) cqr->startdev;
|
||||
if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
" magic number of dasd_ccw_req 0x%08X doesn't"
|
||||
" match discipline 0x%08X",
|
||||
cqr->magic, *(int *) (&device->discipline->name));
|
||||
return;
|
||||
}
|
||||
|
||||
/* get irq lock to modify request queue */
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
|
||||
/* Check for a pending clear operation */
|
||||
if (cqr->status == DASD_CQR_CLEAR_PENDING) {
|
||||
cqr->status = DASD_CQR_CLEARED;
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
return;
|
||||
}
|
||||
|
||||
cqr->stopclk = get_tod_clock();
|
||||
|
||||
expires = 0;
|
||||
if ((ext_code.subcode & 0xff) == 0) {
|
||||
cqr->status = DASD_CQR_SUCCESS;
|
||||
/* Start first request on queue if possible -> fast_io. */
|
||||
if (!list_empty(&device->ccw_queue)) {
|
||||
next = list_entry(device->ccw_queue.next,
|
||||
struct dasd_ccw_req, devlist);
|
||||
if (next->status == DASD_CQR_QUEUED) {
|
||||
rc = dasd_start_diag(next);
|
||||
if (rc == 0)
|
||||
expires = next->expires;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
|
||||
"request %p was %d (%d retries left)", cqr,
|
||||
ext_code.subcode & 0xff, cqr->retries);
|
||||
dasd_diag_erp(device);
|
||||
}
|
||||
|
||||
if (expires != 0)
|
||||
dasd_device_set_timer(device, expires);
|
||||
else
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
}
|
||||
|
||||
/* Check whether device can be controlled by DIAG discipline. Return zero on
|
||||
* success, non-zero otherwise. */
|
||||
static int
|
||||
dasd_diag_check_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_block *block;
|
||||
struct dasd_diag_private *private;
|
||||
struct dasd_diag_characteristics *rdc_data;
|
||||
struct dasd_diag_bio bio;
|
||||
struct vtoc_cms_label *label;
|
||||
blocknum_t end_block;
|
||||
unsigned int sb, bsize;
|
||||
int rc;
|
||||
|
||||
private = (struct dasd_diag_private *) device->private;
|
||||
if (private == NULL) {
|
||||
private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
|
||||
if (private == NULL) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"Allocating memory for private DASD data "
|
||||
"failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ccw_device_get_id(device->cdev, &private->dev_id);
|
||||
device->private = (void *) private;
|
||||
}
|
||||
block = dasd_alloc_block();
|
||||
if (IS_ERR(block)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"could not allocate dasd block structure");
|
||||
device->private = NULL;
|
||||
kfree(private);
|
||||
return PTR_ERR(block);
|
||||
}
|
||||
device->block = block;
|
||||
block->base = device;
|
||||
|
||||
/* Read Device Characteristics */
|
||||
rdc_data = (void *) &(private->rdc_data);
|
||||
rdc_data->dev_nr = private->dev_id.devno;
|
||||
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
|
||||
|
||||
rc = diag210((struct diag210 *) rdc_data);
|
||||
if (rc) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
|
||||
"information (rc=%d)", rc);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
device->default_expires = DIAG_TIMEOUT;
|
||||
device->default_retries = DIAG_MAX_RETRIES;
|
||||
|
||||
/* Figure out position of label block */
|
||||
switch (private->rdc_data.vdev_class) {
|
||||
case DEV_CLASS_FBA:
|
||||
private->pt_block = 1;
|
||||
break;
|
||||
case DEV_CLASS_ECKD:
|
||||
private->pt_block = 2;
|
||||
break;
|
||||
default:
|
||||
pr_warning("%s: Device type %d is not supported "
|
||||
"in DIAG mode\n", dev_name(&device->cdev->dev),
|
||||
private->rdc_data.vdev_class);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
DBF_DEV_EVENT(DBF_INFO, device,
|
||||
"%04X: %04X on real %04X/%02X",
|
||||
rdc_data->dev_nr,
|
||||
rdc_data->vdev_type,
|
||||
rdc_data->rdev_type, rdc_data->rdev_model);
|
||||
|
||||
/* terminate all outstanding operations */
|
||||
mdsk_term_io(device);
|
||||
|
||||
/* figure out blocksize of device */
|
||||
label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
|
||||
if (label == NULL) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"No memory to allocate initialization request");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rc = 0;
|
||||
end_block = 0;
|
||||
/* try all sizes - needed for ECKD devices */
|
||||
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
|
||||
mdsk_init_io(device, bsize, 0, &end_block);
|
||||
memset(&bio, 0, sizeof (struct dasd_diag_bio));
|
||||
bio.type = MDSK_READ_REQ;
|
||||
bio.block_number = private->pt_block + 1;
|
||||
bio.buffer = label;
|
||||
memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
|
||||
private->iob.dev_nr = rdc_data->dev_nr;
|
||||
private->iob.key = 0;
|
||||
private->iob.flags = 0; /* do synchronous io */
|
||||
private->iob.block_count = 1;
|
||||
private->iob.interrupt_params = 0;
|
||||
private->iob.bio_list = &bio;
|
||||
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
|
||||
rc = dia250(&private->iob, RW_BIO);
|
||||
if (rc == 3) {
|
||||
pr_warning("%s: A 64-bit DIAG call failed\n",
|
||||
dev_name(&device->cdev->dev));
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out_label;
|
||||
}
|
||||
mdsk_term_io(device);
|
||||
if (rc == 0)
|
||||
break;
|
||||
}
|
||||
if (bsize > PAGE_SIZE) {
|
||||
pr_warning("%s: Accessing the DASD failed because of an "
|
||||
"incorrect format (rc=%d)\n",
|
||||
dev_name(&device->cdev->dev), rc);
|
||||
rc = -EIO;
|
||||
goto out_label;
|
||||
}
|
||||
/* check for label block */
|
||||
if (memcmp(label->label_id, DASD_DIAG_CMS1,
|
||||
sizeof(DASD_DIAG_CMS1)) == 0) {
|
||||
/* get formatted blocksize from label block */
|
||||
bsize = (unsigned int) label->block_size;
|
||||
block->blocks = (unsigned long) label->block_count;
|
||||
} else
|
||||
block->blocks = end_block;
|
||||
block->bp_block = bsize;
|
||||
block->s2b_shift = 0; /* bits to shift 512 to get a block */
|
||||
for (sb = 512; sb < bsize; sb = sb << 1)
|
||||
block->s2b_shift++;
|
||||
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
|
||||
if (rc && (rc != 4)) {
|
||||
pr_warning("%s: DIAG initialization failed with rc=%d\n",
|
||||
dev_name(&device->cdev->dev), rc);
|
||||
rc = -EIO;
|
||||
} else {
|
||||
if (rc == 4)
|
||||
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
|
||||
pr_info("%s: New DASD with %ld byte/block, total size %ld "
|
||||
"KB%s\n", dev_name(&device->cdev->dev),
|
||||
(unsigned long) block->bp_block,
|
||||
(unsigned long) (block->blocks <<
|
||||
block->s2b_shift) >> 1,
|
||||
(rc == 4) ? ", read-only device" : "");
|
||||
rc = 0;
|
||||
}
|
||||
out_label:
|
||||
free_page((long) label);
|
||||
out:
|
||||
if (rc) {
|
||||
device->block = NULL;
|
||||
dasd_free_block(block);
|
||||
device->private = NULL;
|
||||
kfree(private);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Fill in virtual disk geometry for device. Return zero on success, non-zero
|
||||
* otherwise. */
|
||||
static int
|
||||
dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
|
||||
{
|
||||
if (dasd_check_blocksize(block->bp_block) != 0)
|
||||
return -EINVAL;
|
||||
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
|
||||
geo->heads = 16;
|
||||
geo->sectors = 128 >> block->s2b_shift;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dasd_erp_fn_t
|
||||
dasd_diag_erp_action(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
return dasd_default_erp_action;
|
||||
}
|
||||
|
||||
static dasd_erp_fn_t
|
||||
dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
return dasd_default_erp_postaction;
|
||||
}
|
||||
|
||||
/* Create DASD request from block device request. Return pointer to new
|
||||
* request on success, ERR_PTR otherwise. */
|
||||
static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
|
||||
struct dasd_block *block,
|
||||
struct request *req)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct dasd_diag_req *dreq;
|
||||
struct dasd_diag_bio *dbio;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
char *dst;
|
||||
unsigned int count, datasize;
|
||||
sector_t recid, first_rec, last_rec;
|
||||
unsigned int blksize, off;
|
||||
unsigned char rw_cmd;
|
||||
|
||||
if (rq_data_dir(req) == READ)
|
||||
rw_cmd = MDSK_READ_REQ;
|
||||
else if (rq_data_dir(req) == WRITE)
|
||||
rw_cmd = MDSK_WRITE_REQ;
|
||||
else
|
||||
return ERR_PTR(-EINVAL);
|
||||
blksize = block->bp_block;
|
||||
/* Calculate record id of first and last block. */
|
||||
first_rec = blk_rq_pos(req) >> block->s2b_shift;
|
||||
last_rec =
|
||||
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
|
||||
/* Check struct bio and count the number of blocks for the request. */
|
||||
count = 0;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
if (bv.bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv.bv_len >> (block->s2b_shift + 9);
|
||||
}
|
||||
/* Paranoia. */
|
||||
if (count != last_rec - first_rec + 1)
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* Build the request */
|
||||
datasize = sizeof(struct dasd_diag_req) +
|
||||
count*sizeof(struct dasd_diag_bio);
|
||||
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
|
||||
if (IS_ERR(cqr))
|
||||
return cqr;
|
||||
|
||||
dreq = (struct dasd_diag_req *) cqr->data;
|
||||
dreq->block_count = count;
|
||||
dbio = dreq->bio;
|
||||
recid = first_rec;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = page_address(bv.bv_page) + bv.bv_offset;
|
||||
for (off = 0; off < bv.bv_len; off += blksize) {
|
||||
memset(dbio, 0, sizeof (struct dasd_diag_bio));
|
||||
dbio->type = rw_cmd;
|
||||
dbio->block_number = recid + 1;
|
||||
dbio->buffer = dst;
|
||||
dbio++;
|
||||
dst += blksize;
|
||||
recid++;
|
||||
}
|
||||
}
|
||||
cqr->retries = memdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
if (blk_noretry_request(req) ||
|
||||
block->base->features & DASD_FEATURE_FAILFAST)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->startdev = memdev;
|
||||
cqr->memdev = memdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = memdev->default_expires * HZ;
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
|
||||
/* Release DASD request. Return non-zero if request was successful, zero
|
||||
* otherwise. */
|
||||
static int
|
||||
dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = cqr->status == DASD_CQR_DONE;
|
||||
dasd_sfree_request(cqr, cqr->memdev);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
|
||||
{
|
||||
if (cqr->retries < 0)
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
else
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
};
|
||||
|
||||
/* Fill in IOCTL data for device. */
|
||||
static int
|
||||
dasd_diag_fill_info(struct dasd_device * device,
|
||||
struct dasd_information2_t * info)
|
||||
{
|
||||
struct dasd_diag_private *private;
|
||||
|
||||
private = (struct dasd_diag_private *) device->private;
|
||||
info->label_block = (unsigned int) private->pt_block;
|
||||
info->FBA_layout = 1;
|
||||
info->format = DASD_FORMAT_LDL;
|
||||
info->characteristics_size = sizeof (struct dasd_diag_characteristics);
|
||||
memcpy(info->characteristics,
|
||||
&((struct dasd_diag_private *) device->private)->rdc_data,
|
||||
sizeof (struct dasd_diag_characteristics));
|
||||
info->confdata_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
|
||||
struct irb *stat)
|
||||
{
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"dump sense not available for DIAG data");
|
||||
}
|
||||
|
||||
static struct dasd_discipline dasd_diag_discipline = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "DIAG",
|
||||
.ebcname = "DIAG",
|
||||
.max_blocks = DIAG_MAX_BLOCKS,
|
||||
.check_device = dasd_diag_check_device,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_diag_fill_geometry,
|
||||
.start_IO = dasd_start_diag,
|
||||
.term_IO = dasd_diag_term_IO,
|
||||
.handle_terminated_request = dasd_diag_handle_terminated_request,
|
||||
.erp_action = dasd_diag_erp_action,
|
||||
.erp_postaction = dasd_diag_erp_postaction,
|
||||
.build_cp = dasd_diag_build_cp,
|
||||
.free_cp = dasd_diag_free_cp,
|
||||
.dump_sense = dasd_diag_dump_sense,
|
||||
.fill_info = dasd_diag_fill_info,
|
||||
};
|
||||
|
||||
static int __init
|
||||
dasd_diag_init(void)
|
||||
{
|
||||
if (!MACHINE_IS_VM) {
|
||||
pr_info("Discipline %s cannot be used without z/VM\n",
|
||||
dasd_diag_discipline.name);
|
||||
return -ENODEV;
|
||||
}
|
||||
ASCEBC(dasd_diag_discipline.ebcname, 4);
|
||||
|
||||
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
|
||||
register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
|
||||
dasd_diag_discipline_pointer = &dasd_diag_discipline;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit
|
||||
dasd_diag_cleanup(void)
|
||||
{
|
||||
unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
|
||||
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
|
||||
dasd_diag_discipline_pointer = NULL;
|
||||
}
|
||||
|
||||
module_init(dasd_diag_init);
|
||||
module_exit(dasd_diag_cleanup);
|
122
drivers/s390/block/dasd_diag.h
Normal file
122
drivers/s390/block/dasd_diag.h
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Based on.......: linux/drivers/s390/block/mdisk.h
|
||||
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
*
|
||||
*/
|
||||
|
||||
#define MDSK_WRITE_REQ 0x01
|
||||
#define MDSK_READ_REQ 0x02
|
||||
|
||||
#define INIT_BIO 0x00
|
||||
#define RW_BIO 0x01
|
||||
#define TERM_BIO 0x02
|
||||
|
||||
#define DEV_CLASS_FBA 0x01
|
||||
#define DEV_CLASS_ECKD 0x04
|
||||
|
||||
#define DASD_DIAG_CODE_31BIT 0x03
|
||||
#define DASD_DIAG_CODE_64BIT 0x07
|
||||
|
||||
#define DASD_DIAG_RWFLAG_ASYNC 0x02
|
||||
#define DASD_DIAG_RWFLAG_NOCACHE 0x01
|
||||
|
||||
#define DASD_DIAG_FLAGA_FORMAT_64BIT 0x80
|
||||
|
||||
struct dasd_diag_characteristics {
|
||||
u16 dev_nr;
|
||||
u16 rdc_len;
|
||||
u8 vdev_class;
|
||||
u8 vdev_type;
|
||||
u8 vdev_status;
|
||||
u8 vdev_flags;
|
||||
u8 rdev_class;
|
||||
u8 rdev_type;
|
||||
u8 rdev_model;
|
||||
u8 rdev_features;
|
||||
} __attribute__ ((packed, aligned(4)));
|
||||
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
|
||||
|
||||
typedef u64 blocknum_t;
|
||||
typedef s64 sblocknum_t;
|
||||
|
||||
struct dasd_diag_bio {
|
||||
u8 type;
|
||||
u8 status;
|
||||
u8 spare1[2];
|
||||
u32 alet;
|
||||
blocknum_t block_number;
|
||||
void *buffer;
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
struct dasd_diag_init_io {
|
||||
u16 dev_nr;
|
||||
u8 flaga;
|
||||
u8 spare1[21];
|
||||
u32 block_size;
|
||||
u8 spare2[4];
|
||||
blocknum_t offset;
|
||||
sblocknum_t start_block;
|
||||
blocknum_t end_block;
|
||||
u8 spare3[8];
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
struct dasd_diag_rw_io {
|
||||
u16 dev_nr;
|
||||
u8 flaga;
|
||||
u8 spare1[21];
|
||||
u8 key;
|
||||
u8 flags;
|
||||
u8 spare2[2];
|
||||
u32 block_count;
|
||||
u32 alet;
|
||||
u8 spare3[4];
|
||||
u64 interrupt_params;
|
||||
struct dasd_diag_bio *bio_list;
|
||||
u8 spare4[8];
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
#else /* CONFIG_64BIT */
|
||||
#define DASD_DIAG_FLAGA_DEFAULT 0x0
|
||||
|
||||
typedef u32 blocknum_t;
|
||||
typedef s32 sblocknum_t;
|
||||
|
||||
struct dasd_diag_bio {
|
||||
u8 type;
|
||||
u8 status;
|
||||
u16 spare1;
|
||||
blocknum_t block_number;
|
||||
u32 alet;
|
||||
void *buffer;
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
struct dasd_diag_init_io {
|
||||
u16 dev_nr;
|
||||
u8 flaga;
|
||||
u8 spare1[21];
|
||||
u32 block_size;
|
||||
blocknum_t offset;
|
||||
sblocknum_t start_block;
|
||||
blocknum_t end_block;
|
||||
u8 spare2[24];
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
struct dasd_diag_rw_io {
|
||||
u16 dev_nr;
|
||||
u8 flaga;
|
||||
u8 spare1[21];
|
||||
u8 key;
|
||||
u8 flags;
|
||||
u8 spare2[2];
|
||||
u32 block_count;
|
||||
u32 alet;
|
||||
struct dasd_diag_bio *bio_list;
|
||||
u32 interrupt_params;
|
||||
u8 spare3[20];
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
#endif /* CONFIG_64BIT */
|
4955
drivers/s390/block/dasd_eckd.c
Normal file
4955
drivers/s390/block/dasd_eckd.c
Normal file
File diff suppressed because it is too large
Load diff
533
drivers/s390/block/dasd_eckd.h
Normal file
533
drivers/s390/block/dasd_eckd.h
Normal file
|
@ -0,0 +1,533 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DASD_ECKD_H
|
||||
#define DASD_ECKD_H
|
||||
|
||||
/*****************************************************************************
|
||||
* SECTION: CCW Definitions
|
||||
****************************************************************************/
|
||||
#define DASD_ECKD_CCW_WRITE 0x05
|
||||
#define DASD_ECKD_CCW_READ 0x06
|
||||
#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
|
||||
#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
|
||||
#define DASD_ECKD_CCW_WRITE_KD 0x0d
|
||||
#define DASD_ECKD_CCW_READ_KD 0x0e
|
||||
#define DASD_ECKD_CCW_ERASE 0x11
|
||||
#define DASD_ECKD_CCW_READ_COUNT 0x12
|
||||
#define DASD_ECKD_CCW_SLCK 0x14
|
||||
#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
|
||||
#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
|
||||
#define DASD_ECKD_CCW_WRITE_CKD 0x1d
|
||||
#define DASD_ECKD_CCW_READ_CKD 0x1e
|
||||
#define DASD_ECKD_CCW_PSF 0x27
|
||||
#define DASD_ECKD_CCW_SNID 0x34
|
||||
#define DASD_ECKD_CCW_RSSD 0x3e
|
||||
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
|
||||
#define DASD_ECKD_CCW_SNSS 0x54
|
||||
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
|
||||
#define DASD_ECKD_CCW_WRITE_MT 0x85
|
||||
#define DASD_ECKD_CCW_READ_MT 0x86
|
||||
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
|
||||
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
|
||||
#define DASD_ECKD_CCW_RELEASE 0x94
|
||||
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
|
||||
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
|
||||
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
|
||||
#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
|
||||
#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
|
||||
#define DASD_ECKD_CCW_RESERVE 0xB4
|
||||
#define DASD_ECKD_CCW_READ_TRACK 0xDE
|
||||
#define DASD_ECKD_CCW_PFX 0xE7
|
||||
#define DASD_ECKD_CCW_PFX_READ 0xEA
|
||||
#define DASD_ECKD_CCW_RSCK 0xF9
|
||||
#define DASD_ECKD_CCW_RCD 0xFA
|
||||
|
||||
/*
|
||||
* Perform Subsystem Function / Sub-Orders
|
||||
*/
|
||||
#define PSF_ORDER_PRSSD 0x18
|
||||
#define PSF_ORDER_CUIR_RESPONSE 0x1A
|
||||
#define PSF_ORDER_SSC 0x1D
|
||||
|
||||
/*
|
||||
* CUIR response condition codes
|
||||
*/
|
||||
#define PSF_CUIR_INVALID 0x00
|
||||
#define PSF_CUIR_COMPLETED 0x01
|
||||
#define PSF_CUIR_NOT_SUPPORTED 0x02
|
||||
#define PSF_CUIR_ERROR_IN_REQ 0x03
|
||||
#define PSF_CUIR_DENIED 0x04
|
||||
#define PSF_CUIR_LAST_PATH 0x05
|
||||
#define PSF_CUIR_DEVICE_ONLINE 0x06
|
||||
#define PSF_CUIR_VARY_FAILURE 0x07
|
||||
#define PSF_CUIR_SOFTWARE_FAILURE 0x08
|
||||
#define PSF_CUIR_NOT_RECOGNIZED 0x09
|
||||
|
||||
/*
|
||||
* CUIR codes
|
||||
*/
|
||||
#define CUIR_QUIESCE 0x01
|
||||
#define CUIR_RESUME 0x02
|
||||
|
||||
/*
|
||||
* attention message definitions
|
||||
*/
|
||||
#define ATTENTION_LENGTH_CUIR 0x0e
|
||||
#define ATTENTION_FORMAT_CUIR 0x01
|
||||
|
||||
/*
|
||||
* Size that is reportet for large volumes in the old 16-bit no_cyl field
|
||||
*/
|
||||
#define LV_COMPAT_CYL 0xFFFE
|
||||
|
||||
|
||||
#define FCX_MAX_DATA_FACTOR 65536
|
||||
#define DASD_ECKD_RCD_DATA_SIZE 256
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
* SECTION: Type Definitions
|
||||
****************************************************************************/
|
||||
|
||||
struct eckd_count {
|
||||
__u16 cyl;
|
||||
__u16 head;
|
||||
__u8 record;
|
||||
__u8 kl;
|
||||
__u16 dl;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ch_t {
|
||||
__u16 cyl;
|
||||
__u16 head;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct chs_t {
|
||||
__u16 cyl;
|
||||
__u16 head;
|
||||
__u32 sector;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct chr_t {
|
||||
__u16 cyl;
|
||||
__u16 head;
|
||||
__u8 record;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct geom_t {
|
||||
__u16 cyl;
|
||||
__u16 head;
|
||||
__u32 sector;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct eckd_home {
|
||||
__u8 skip_control[14];
|
||||
__u16 cell_number;
|
||||
__u8 physical_addr[3];
|
||||
__u8 flag;
|
||||
struct ch_t track_addr;
|
||||
__u8 reserved;
|
||||
__u8 key_length;
|
||||
__u8 reserved2[2];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct DE_eckd_data {
|
||||
struct {
|
||||
unsigned char perm:2; /* Permissions on this extent */
|
||||
unsigned char reserved:1;
|
||||
unsigned char seek:2; /* Seek control */
|
||||
unsigned char auth:2; /* Access authorization */
|
||||
unsigned char pci:1; /* PCI Fetch mode */
|
||||
} __attribute__ ((packed)) mask;
|
||||
struct {
|
||||
unsigned char mode:2; /* Architecture mode */
|
||||
unsigned char ckd:1; /* CKD Conversion */
|
||||
unsigned char operation:3; /* Operation mode */
|
||||
unsigned char cfw:1; /* Cache fast write */
|
||||
unsigned char dfw:1; /* DASD fast write */
|
||||
} __attribute__ ((packed)) attributes;
|
||||
__u16 blk_size; /* Blocksize */
|
||||
__u16 fast_write_id;
|
||||
__u8 ga_additional; /* Global Attributes Additional */
|
||||
__u8 ga_extended; /* Global Attributes Extended */
|
||||
struct ch_t beg_ext;
|
||||
struct ch_t end_ext;
|
||||
unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
|
||||
__u8 ep_format; /* Extended Parameter format byte */
|
||||
__u8 ep_prio; /* Extended Parameter priority I/O byte */
|
||||
__u8 ep_reserved1; /* Extended Parameter Reserved */
|
||||
__u8 ep_rec_per_track; /* Number of records on a track */
|
||||
__u8 ep_reserved[4]; /* Extended Parameter Reserved */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct LO_eckd_data {
|
||||
struct {
|
||||
unsigned char orientation:2;
|
||||
unsigned char operation:6;
|
||||
} __attribute__ ((packed)) operation;
|
||||
struct {
|
||||
unsigned char last_bytes_used:1;
|
||||
unsigned char reserved:6;
|
||||
unsigned char read_count_suffix:1;
|
||||
} __attribute__ ((packed)) auxiliary;
|
||||
__u8 unused;
|
||||
__u8 count;
|
||||
struct ch_t seek_addr;
|
||||
struct chr_t search_arg;
|
||||
__u8 sector;
|
||||
__u16 length;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct LRE_eckd_data {
|
||||
struct {
|
||||
unsigned char orientation:2;
|
||||
unsigned char operation:6;
|
||||
} __attribute__ ((packed)) operation;
|
||||
struct {
|
||||
unsigned char length_valid:1;
|
||||
unsigned char length_scope:1;
|
||||
unsigned char imbedded_ccw_valid:1;
|
||||
unsigned char check_bytes:2;
|
||||
unsigned char imbedded_count_valid:1;
|
||||
unsigned char reserved:1;
|
||||
unsigned char read_count_suffix:1;
|
||||
} __attribute__ ((packed)) auxiliary;
|
||||
__u8 imbedded_ccw;
|
||||
__u8 count;
|
||||
struct ch_t seek_addr;
|
||||
struct chr_t search_arg;
|
||||
__u8 sector;
|
||||
__u16 length;
|
||||
__u8 imbedded_count;
|
||||
__u8 extended_operation;
|
||||
__u16 extended_parameter_length;
|
||||
__u8 extended_parameter[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Prefix data for format 0x00 and 0x01 */
|
||||
struct PFX_eckd_data {
|
||||
unsigned char format;
|
||||
struct {
|
||||
unsigned char define_extent:1;
|
||||
unsigned char time_stamp:1;
|
||||
unsigned char verify_base:1;
|
||||
unsigned char hyper_pav:1;
|
||||
unsigned char reserved:4;
|
||||
} __attribute__ ((packed)) validity;
|
||||
__u8 base_address;
|
||||
__u8 aux;
|
||||
__u8 base_lss;
|
||||
__u8 reserved[7];
|
||||
struct DE_eckd_data define_extent;
|
||||
struct LRE_eckd_data locate_record;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_eckd_characteristics {
|
||||
__u16 cu_type;
|
||||
struct {
|
||||
unsigned char support:2;
|
||||
unsigned char async:1;
|
||||
unsigned char reserved:1;
|
||||
unsigned char cache_info:1;
|
||||
unsigned char model:3;
|
||||
} __attribute__ ((packed)) cu_model;
|
||||
__u16 dev_type;
|
||||
__u8 dev_model;
|
||||
struct {
|
||||
unsigned char mult_burst:1;
|
||||
unsigned char RT_in_LR:1;
|
||||
unsigned char reserved1:1;
|
||||
unsigned char RD_IN_LR:1;
|
||||
unsigned char reserved2:4;
|
||||
unsigned char reserved3:8;
|
||||
unsigned char defect_wr:1;
|
||||
unsigned char XRC_supported:1;
|
||||
unsigned char reserved4:1;
|
||||
unsigned char striping:1;
|
||||
unsigned char reserved5:4;
|
||||
unsigned char cfw:1;
|
||||
unsigned char reserved6:2;
|
||||
unsigned char cache:1;
|
||||
unsigned char dual_copy:1;
|
||||
unsigned char dfw:1;
|
||||
unsigned char reset_alleg:1;
|
||||
unsigned char sense_down:1;
|
||||
} __attribute__ ((packed)) facilities;
|
||||
__u8 dev_class;
|
||||
__u8 unit_type;
|
||||
__u16 no_cyl;
|
||||
__u16 trk_per_cyl;
|
||||
__u8 sec_per_trk;
|
||||
__u8 byte_per_track[3];
|
||||
__u16 home_bytes;
|
||||
__u8 formula;
|
||||
union {
|
||||
struct {
|
||||
__u8 f1;
|
||||
__u16 f2;
|
||||
__u16 f3;
|
||||
} __attribute__ ((packed)) f_0x01;
|
||||
struct {
|
||||
__u8 f1;
|
||||
__u8 f2;
|
||||
__u8 f3;
|
||||
__u8 f4;
|
||||
__u8 f5;
|
||||
} __attribute__ ((packed)) f_0x02;
|
||||
} __attribute__ ((packed)) factors;
|
||||
__u16 first_alt_trk;
|
||||
__u16 no_alt_trk;
|
||||
__u16 first_dia_trk;
|
||||
__u16 no_dia_trk;
|
||||
__u16 first_sup_trk;
|
||||
__u16 no_sup_trk;
|
||||
__u8 MDR_ID;
|
||||
__u8 OBR_ID;
|
||||
__u8 director;
|
||||
__u8 rd_trk_set;
|
||||
__u16 max_rec_zero;
|
||||
__u8 reserved1;
|
||||
__u8 RWANY_in_LR;
|
||||
__u8 factor6;
|
||||
__u8 factor7;
|
||||
__u8 factor8;
|
||||
__u8 reserved2[3];
|
||||
__u8 reserved3[6];
|
||||
__u32 long_no_cyl;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* elements of the configuration data */
|
||||
struct dasd_ned {
|
||||
struct {
|
||||
__u8 identifier:2;
|
||||
__u8 token_id:1;
|
||||
__u8 sno_valid:1;
|
||||
__u8 subst_sno:1;
|
||||
__u8 recNED:1;
|
||||
__u8 emuNED:1;
|
||||
__u8 reserved:1;
|
||||
} __attribute__ ((packed)) flags;
|
||||
__u8 descriptor;
|
||||
__u8 dev_class;
|
||||
__u8 reserved;
|
||||
__u8 dev_type[6];
|
||||
__u8 dev_model[3];
|
||||
__u8 HDA_manufacturer[3];
|
||||
__u8 HDA_location[2];
|
||||
__u8 HDA_seqno[12];
|
||||
__u8 ID;
|
||||
__u8 unit_addr;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_sneq {
|
||||
struct {
|
||||
__u8 identifier:2;
|
||||
__u8 reserved:6;
|
||||
} __attribute__ ((packed)) flags;
|
||||
__u8 res1;
|
||||
__u16 format;
|
||||
__u8 res2[4]; /* byte 4- 7 */
|
||||
__u8 sua_flags; /* byte 8 */
|
||||
__u8 base_unit_addr; /* byte 9 */
|
||||
__u8 res3[22]; /* byte 10-31 */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct vd_sneq {
|
||||
struct {
|
||||
__u8 identifier:2;
|
||||
__u8 reserved:6;
|
||||
} __attribute__ ((packed)) flags;
|
||||
__u8 res1;
|
||||
__u16 format;
|
||||
__u8 res2[4]; /* byte 4- 7 */
|
||||
__u8 uit[16]; /* byte 8-23 */
|
||||
__u8 res3[8]; /* byte 24-31 */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_gneq {
|
||||
struct {
|
||||
__u8 identifier:2;
|
||||
__u8 reserved:6;
|
||||
} __attribute__ ((packed)) flags;
|
||||
__u8 reserved[5];
|
||||
struct {
|
||||
__u8 value:2;
|
||||
__u8 number:6;
|
||||
} __attribute__ ((packed)) timeout;
|
||||
__u8 reserved3;
|
||||
__u16 subsystemID;
|
||||
__u8 reserved2[22];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_rssd_features {
|
||||
char feature[256];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct dasd_rssd_messages {
|
||||
__u16 length;
|
||||
__u8 format;
|
||||
__u8 code;
|
||||
__u32 message_id;
|
||||
__u8 flags;
|
||||
char messages[4087];
|
||||
} __packed;
|
||||
|
||||
struct dasd_cuir_message {
|
||||
__u16 length;
|
||||
__u8 format;
|
||||
__u8 code;
|
||||
__u32 message_id;
|
||||
__u8 flags;
|
||||
__u8 neq_map[3];
|
||||
__u8 ned_map;
|
||||
__u8 record_selector;
|
||||
} __packed;
|
||||
|
||||
struct dasd_psf_cuir_response {
|
||||
__u8 order;
|
||||
__u8 flags;
|
||||
__u8 cc;
|
||||
__u8 chpid;
|
||||
__u16 device_nr;
|
||||
__u16 reserved;
|
||||
__u32 message_id;
|
||||
__u64 system_id;
|
||||
__u8 cssid;
|
||||
__u8 ssid;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Perform Subsystem Function - Prepare for Read Subsystem Data
|
||||
*/
|
||||
struct dasd_psf_prssd_data {
|
||||
unsigned char order;
|
||||
unsigned char flags;
|
||||
unsigned char reserved[4];
|
||||
unsigned char suborder;
|
||||
unsigned char varies[5];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
* Perform Subsystem Function - Set Subsystem Characteristics
|
||||
*/
|
||||
struct dasd_psf_ssc_data {
|
||||
unsigned char order;
|
||||
unsigned char flags;
|
||||
unsigned char cu_type[4];
|
||||
unsigned char suborder;
|
||||
unsigned char reserved[59];
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
/*
|
||||
* some structures and definitions for alias handling
|
||||
*/
|
||||
struct dasd_unit_address_configuration {
|
||||
struct {
|
||||
char ua_type;
|
||||
char base_ua;
|
||||
} unit[256];
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
#define MAX_DEVICES_PER_LCU 256
|
||||
|
||||
/* flags on the LCU */
|
||||
#define NEED_UAC_UPDATE 0x01
|
||||
#define UPDATE_PENDING 0x02
|
||||
|
||||
enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
|
||||
|
||||
|
||||
struct alias_root {
|
||||
struct list_head serverlist;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct alias_server {
|
||||
struct list_head server;
|
||||
struct dasd_uid uid;
|
||||
struct list_head lculist;
|
||||
};
|
||||
|
||||
struct summary_unit_check_work_data {
|
||||
char reason;
|
||||
struct dasd_device *device;
|
||||
struct work_struct worker;
|
||||
};
|
||||
|
||||
struct read_uac_work_data {
|
||||
struct dasd_device *device;
|
||||
struct delayed_work dwork;
|
||||
};
|
||||
|
||||
struct alias_lcu {
|
||||
struct list_head lcu;
|
||||
struct dasd_uid uid;
|
||||
enum pavtype pav;
|
||||
char flags;
|
||||
spinlock_t lock;
|
||||
struct list_head grouplist;
|
||||
struct list_head active_devices;
|
||||
struct list_head inactive_devices;
|
||||
struct dasd_unit_address_configuration *uac;
|
||||
struct summary_unit_check_work_data suc_data;
|
||||
struct read_uac_work_data ruac_data;
|
||||
struct dasd_ccw_req *rsu_cqr;
|
||||
struct completion lcu_setup;
|
||||
};
|
||||
|
||||
struct alias_pav_group {
|
||||
struct list_head group;
|
||||
struct dasd_uid uid;
|
||||
struct alias_lcu *lcu;
|
||||
struct list_head baselist;
|
||||
struct list_head aliaslist;
|
||||
struct dasd_device *next;
|
||||
};
|
||||
|
||||
struct dasd_eckd_private {
|
||||
struct dasd_eckd_characteristics rdc_data;
|
||||
u8 *conf_data;
|
||||
int conf_len;
|
||||
/* pointers to specific parts in the conf_data */
|
||||
struct dasd_ned *ned;
|
||||
struct dasd_sneq *sneq;
|
||||
struct vd_sneq *vdsneq;
|
||||
struct dasd_gneq *gneq;
|
||||
|
||||
struct eckd_count count_area[5];
|
||||
int init_cqr_status;
|
||||
int uses_cdl;
|
||||
struct attrib_data_t attrib; /* e.g. cache operations */
|
||||
struct dasd_rssd_features features;
|
||||
u32 real_cyl;
|
||||
|
||||
/* alias managemnet */
|
||||
struct dasd_uid uid;
|
||||
struct alias_pav_group *pavgroup;
|
||||
struct alias_lcu *lcu;
|
||||
int count;
|
||||
|
||||
u32 fcx_max_data;
|
||||
};
|
||||
|
||||
|
||||
|
||||
int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
|
||||
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
|
||||
int dasd_alias_add_device(struct dasd_device *);
|
||||
int dasd_alias_remove_device(struct dasd_device *);
|
||||
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
|
||||
void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
|
||||
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
|
||||
void dasd_alias_lcu_setup_complete(struct dasd_device *);
|
||||
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
|
||||
int dasd_alias_update_add_device(struct dasd_device *);
|
||||
#endif /* DASD_ECKD_H */
|
709
drivers/s390/block/dasd_eer.c
Normal file
709
drivers/s390/block/dasd_eer.c
Normal file
|
@ -0,0 +1,709 @@
|
|||
/*
|
||||
* Character device driver for extended error reporting.
|
||||
*
|
||||
* Copyright IBM Corp. 2005
|
||||
* extended error reporting for DASD ECKD devices
|
||||
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd-eckd"
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/ebcdic.h>
|
||||
|
||||
#include "dasd_int.h"
|
||||
#include "dasd_eckd.h"
|
||||
|
||||
#ifdef PRINTK_HEADER
|
||||
#undef PRINTK_HEADER
|
||||
#endif /* PRINTK_HEADER */
|
||||
#define PRINTK_HEADER "dasd(eer):"
|
||||
|
||||
/*
|
||||
* SECTION: the internal buffer
|
||||
*/
|
||||
|
||||
/*
|
||||
* The internal buffer is meant to store obaque blobs of data, so it does
|
||||
* not know of higher level concepts like triggers.
|
||||
* It consists of a number of pages that are used as a ringbuffer. Each data
|
||||
* blob is stored in a simple record that consists of an integer, which
|
||||
* contains the size of the following data, and the data bytes themselfes.
|
||||
*
|
||||
* To allow for multiple independent readers we create one internal buffer
|
||||
* each time the device is opened and destroy the buffer when the file is
|
||||
* closed again. The number of pages used for this buffer is determined by
|
||||
* the module parmeter eer_pages.
|
||||
*
|
||||
* One record can be written to a buffer by using the functions
|
||||
* - dasd_eer_start_record (one time per record to write the size to the
|
||||
* buffer and reserve the space for the data)
|
||||
* - dasd_eer_write_buffer (one or more times per record to write the data)
|
||||
* The data can be written in several steps but you will have to compute
|
||||
* the total size up front for the invocation of dasd_eer_start_record.
|
||||
* If the ringbuffer is full, dasd_eer_start_record will remove the required
|
||||
* number of old records.
|
||||
*
|
||||
* A record is typically read in two steps, first read the integer that
|
||||
* specifies the size of the following data, then read the data.
|
||||
* Both can be done by
|
||||
* - dasd_eer_read_buffer
|
||||
*
|
||||
* For all mentioned functions you need to get the bufferlock first and keep
|
||||
* it until a complete record is written or read.
|
||||
*
|
||||
* All information necessary to keep track of an internal buffer is kept in
|
||||
* a struct eerbuffer. The buffer specific to a file pointer is strored in
|
||||
* the private_data field of that file. To be able to write data to all
|
||||
* existing buffers, each buffer is also added to the bufferlist.
|
||||
* If the user does not want to read a complete record in one go, we have to
|
||||
* keep track of the rest of the record. residual stores the number of bytes
|
||||
* that are still to deliver. If the rest of the record is invalidated between
|
||||
* two reads then residual will be set to -1 so that the next read will fail.
|
||||
* All entries in the eerbuffer structure are protected with the bufferlock.
|
||||
* To avoid races between writing to a buffer on the one side and creating
|
||||
* and destroying buffers on the other side, the bufferlock must also be used
|
||||
* to protect the bufferlist.
|
||||
*/
|
||||
|
||||
static int eer_pages = 5;
|
||||
module_param(eer_pages, int, S_IRUGO|S_IWUSR);
|
||||
|
||||
struct eerbuffer {
|
||||
struct list_head list;
|
||||
char **buffer;
|
||||
int buffersize;
|
||||
int buffer_page_count;
|
||||
int head;
|
||||
int tail;
|
||||
int residual;
|
||||
};
|
||||
|
||||
static LIST_HEAD(bufferlist);
|
||||
static DEFINE_SPINLOCK(bufferlock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
|
||||
|
||||
/*
|
||||
* How many free bytes are available on the buffer.
|
||||
* Needs to be called with bufferlock held.
|
||||
*/
|
||||
static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
|
||||
{
|
||||
if (eerb->head < eerb->tail)
|
||||
return eerb->tail - eerb->head - 1;
|
||||
return eerb->buffersize - eerb->head + eerb->tail -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* How many bytes of buffer space are used.
|
||||
* Needs to be called with bufferlock held.
|
||||
*/
|
||||
static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
|
||||
{
|
||||
|
||||
if (eerb->head >= eerb->tail)
|
||||
return eerb->head - eerb->tail;
|
||||
return eerb->buffersize - eerb->tail + eerb->head;
|
||||
}
|
||||
|
||||
/*
|
||||
* The dasd_eer_write_buffer function just copies count bytes of data
|
||||
* to the buffer. Make sure to call dasd_eer_start_record first, to
|
||||
* make sure that enough free space is available.
|
||||
* Needs to be called with bufferlock held.
|
||||
*/
|
||||
static void dasd_eer_write_buffer(struct eerbuffer *eerb,
|
||||
char *data, int count)
|
||||
{
|
||||
|
||||
unsigned long headindex,localhead;
|
||||
unsigned long rest, len;
|
||||
char *nextdata;
|
||||
|
||||
nextdata = data;
|
||||
rest = count;
|
||||
while (rest > 0) {
|
||||
headindex = eerb->head / PAGE_SIZE;
|
||||
localhead = eerb->head % PAGE_SIZE;
|
||||
len = min(rest, PAGE_SIZE - localhead);
|
||||
memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
|
||||
nextdata += len;
|
||||
rest -= len;
|
||||
eerb->head += len;
|
||||
if (eerb->head == eerb->buffersize)
|
||||
eerb->head = 0; /* wrap around */
|
||||
BUG_ON(eerb->head > eerb->buffersize);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Needs to be called with bufferlock held.
|
||||
*/
|
||||
static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
|
||||
{
|
||||
|
||||
unsigned long tailindex,localtail;
|
||||
unsigned long rest, len, finalcount;
|
||||
char *nextdata;
|
||||
|
||||
finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
|
||||
nextdata = data;
|
||||
rest = finalcount;
|
||||
while (rest > 0) {
|
||||
tailindex = eerb->tail / PAGE_SIZE;
|
||||
localtail = eerb->tail % PAGE_SIZE;
|
||||
len = min(rest, PAGE_SIZE - localtail);
|
||||
memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
|
||||
nextdata += len;
|
||||
rest -= len;
|
||||
eerb->tail += len;
|
||||
if (eerb->tail == eerb->buffersize)
|
||||
eerb->tail = 0; /* wrap around */
|
||||
BUG_ON(eerb->tail > eerb->buffersize);
|
||||
}
|
||||
return finalcount;
|
||||
}
|
||||
|
||||
/*
|
||||
* Whenever you want to write a blob of data to the internal buffer you
|
||||
* have to start by using this function first. It will write the number
|
||||
* of bytes that will be written to the buffer. If necessary it will remove
|
||||
* old records to make room for the new one.
|
||||
* Needs to be called with bufferlock held.
|
||||
*/
|
||||
static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
|
||||
{
|
||||
int tailcount;
|
||||
|
||||
if (count + sizeof(count) > eerb->buffersize)
|
||||
return -ENOMEM;
|
||||
while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
|
||||
if (eerb->residual > 0) {
|
||||
eerb->tail += eerb->residual;
|
||||
if (eerb->tail >= eerb->buffersize)
|
||||
eerb->tail -= eerb->buffersize;
|
||||
eerb->residual = -1;
|
||||
}
|
||||
dasd_eer_read_buffer(eerb, (char *) &tailcount,
|
||||
sizeof(tailcount));
|
||||
eerb->tail += tailcount;
|
||||
if (eerb->tail >= eerb->buffersize)
|
||||
eerb->tail -= eerb->buffersize;
|
||||
}
|
||||
dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
/*
|
||||
* Release pages that are not used anymore.
|
||||
*/
|
||||
static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < no_pages; i++)
|
||||
free_page((unsigned long) buf[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new set of memory pages.
|
||||
*/
|
||||
static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < no_pages; i++) {
|
||||
buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!buf[i]) {
|
||||
dasd_eer_free_buffer_pages(buf, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* SECTION: The extended error reporting functionality
|
||||
*/
|
||||
|
||||
/*
|
||||
* When a DASD device driver wants to report an error, it calls the
|
||||
* function dasd_eer_write and gives the respective trigger ID as
|
||||
* parameter. Currently there are four kinds of triggers:
|
||||
*
|
||||
* DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
|
||||
* DASD_EER_PPRCSUSPEND: PPRC was suspended
|
||||
* DASD_EER_NOPATH: There is no path to the device left.
|
||||
* DASD_EER_STATECHANGE: The state of the device has changed.
|
||||
*
|
||||
* For the first three triggers all required information can be supplied by
|
||||
* the caller. For these triggers a record is written by the function
|
||||
* dasd_eer_write_standard_trigger.
|
||||
*
|
||||
* The DASD_EER_STATECHANGE trigger is special since a sense subsystem
|
||||
* status ccw need to be executed to gather the necessary sense data first.
|
||||
* The dasd_eer_snss function will queue the SNSS request and the request
|
||||
* callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
|
||||
* trigger.
|
||||
*
|
||||
* To avoid memory allocations at runtime, the necessary memory is allocated
|
||||
* when the extended error reporting is enabled for a device (by
|
||||
* dasd_eer_probe). There is one sense subsystem status request for each
|
||||
* eer enabled DASD device. The presence of the cqr in device->eer_cqr
|
||||
* indicates that eer is enable for the device. The use of the snss request
|
||||
* is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
|
||||
* that the cqr is currently in use, dasd_eer_snss cannot start a second
|
||||
* request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
|
||||
* the SNSS request will check the bit and call dasd_eer_snss again.
|
||||
*/
|
||||
|
||||
#define SNSS_DATA_SIZE 44
|
||||
|
||||
#define DASD_EER_BUSID_SIZE 10
|
||||
struct dasd_eer_header {
|
||||
__u32 total_size;
|
||||
__u32 trigger;
|
||||
__u64 tv_sec;
|
||||
__u64 tv_usec;
|
||||
char busid[DASD_EER_BUSID_SIZE];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
* The following function can be used for those triggers that have
|
||||
* all necessary data available when the function is called.
|
||||
* If the parameter cqr is not NULL, the chain of requests will be searched
|
||||
* for valid sense data, and all valid sense data sets will be added to
|
||||
* the triggers data.
|
||||
*/
|
||||
static void dasd_eer_write_standard_trigger(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
int trigger)
|
||||
{
|
||||
struct dasd_ccw_req *temp_cqr;
|
||||
int data_size;
|
||||
struct timeval tv;
|
||||
struct dasd_eer_header header;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
char *sense;
|
||||
|
||||
/* go through cqr chain and count the valid sense data sets */
|
||||
data_size = 0;
|
||||
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
|
||||
if (dasd_get_sense(&temp_cqr->irb))
|
||||
data_size += 32;
|
||||
|
||||
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
|
||||
header.trigger = trigger;
|
||||
do_gettimeofday(&tv);
|
||||
header.tv_sec = tv.tv_sec;
|
||||
header.tv_usec = tv.tv_usec;
|
||||
strncpy(header.busid, dev_name(&device->cdev->dev),
|
||||
DASD_EER_BUSID_SIZE);
|
||||
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
list_for_each_entry(eerb, &bufferlist, list) {
|
||||
dasd_eer_start_record(eerb, header.total_size);
|
||||
dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
|
||||
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
|
||||
sense = dasd_get_sense(&temp_cqr->irb);
|
||||
if (sense)
|
||||
dasd_eer_write_buffer(eerb, sense, 32);
|
||||
}
|
||||
dasd_eer_write_buffer(eerb, "EOR", 4);
|
||||
}
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
wake_up_interruptible(&dasd_eer_read_wait_queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function writes a DASD_EER_STATECHANGE trigger.
|
||||
*/
|
||||
static void dasd_eer_write_snss_trigger(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
int trigger)
|
||||
{
|
||||
int data_size;
|
||||
int snss_rc;
|
||||
struct timeval tv;
|
||||
struct dasd_eer_header header;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
|
||||
snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
|
||||
if (snss_rc)
|
||||
data_size = 0;
|
||||
else
|
||||
data_size = SNSS_DATA_SIZE;
|
||||
|
||||
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
|
||||
header.trigger = DASD_EER_STATECHANGE;
|
||||
do_gettimeofday(&tv);
|
||||
header.tv_sec = tv.tv_sec;
|
||||
header.tv_usec = tv.tv_usec;
|
||||
strncpy(header.busid, dev_name(&device->cdev->dev),
|
||||
DASD_EER_BUSID_SIZE);
|
||||
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
list_for_each_entry(eerb, &bufferlist, list) {
|
||||
dasd_eer_start_record(eerb, header.total_size);
|
||||
dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
|
||||
if (!snss_rc)
|
||||
dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
|
||||
dasd_eer_write_buffer(eerb, "EOR", 4);
|
||||
}
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
wake_up_interruptible(&dasd_eer_read_wait_queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called for all triggers. It calls the appropriate
|
||||
* function that writes the actual trigger records.
|
||||
*/
|
||||
void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
|
||||
unsigned int id)
|
||||
{
|
||||
if (!device->eer_cqr)
|
||||
return;
|
||||
switch (id) {
|
||||
case DASD_EER_FATALERROR:
|
||||
case DASD_EER_PPRCSUSPEND:
|
||||
dasd_eer_write_standard_trigger(device, cqr, id);
|
||||
break;
|
||||
case DASD_EER_NOPATH:
|
||||
dasd_eer_write_standard_trigger(device, NULL, id);
|
||||
break;
|
||||
case DASD_EER_STATECHANGE:
|
||||
dasd_eer_write_snss_trigger(device, cqr, id);
|
||||
break;
|
||||
default: /* unknown trigger, so we write it without any sense data */
|
||||
dasd_eer_write_standard_trigger(device, NULL, id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dasd_eer_write);
|
||||
|
||||
/*
|
||||
* Start a sense subsystem status request.
|
||||
* Needs to be called with the device held.
|
||||
*/
|
||||
void dasd_eer_snss(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
|
||||
cqr = device->eer_cqr;
|
||||
if (!cqr) /* Device not eer enabled. */
|
||||
return;
|
||||
if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
|
||||
/* Sense subsystem status request in use. */
|
||||
set_bit(DASD_FLAG_EER_SNSS, &device->flags);
|
||||
return;
|
||||
}
|
||||
/* cdev is already locked, can't use dasd_add_request_head */
|
||||
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
list_add(&cqr->devlist, &device->ccw_queue);
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback function for use with sense subsystem status request.
|
||||
*/
|
||||
static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
|
||||
{
|
||||
struct dasd_device *device = cqr->startdev;
|
||||
unsigned long flags;
|
||||
|
||||
dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (device->eer_cqr == cqr) {
|
||||
clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
|
||||
if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
|
||||
/* Another SNSS has been requested in the meantime. */
|
||||
dasd_eer_snss(device);
|
||||
cqr = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
if (cqr)
|
||||
/*
|
||||
* Extended error recovery has been switched off while
|
||||
* the SNSS request was running. It could even have
|
||||
* been switched off and on again in which case there
|
||||
* is a new ccw in device->eer_cqr. Free the "old"
|
||||
* snss request now.
|
||||
*/
|
||||
dasd_kfree_request(cqr, device);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable error reporting on a given device.
|
||||
*/
|
||||
int dasd_eer_enable(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
unsigned long flags;
|
||||
struct ccw1 *ccw;
|
||||
|
||||
if (device->eer_cqr)
|
||||
return 0;
|
||||
|
||||
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
|
||||
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
|
||||
|
||||
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
|
||||
SNSS_DATA_SIZE, device);
|
||||
if (IS_ERR(cqr))
|
||||
return -ENOMEM;
|
||||
|
||||
cqr->startdev = device;
|
||||
cqr->retries = 255;
|
||||
cqr->expires = 10 * HZ;
|
||||
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
|
||||
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
|
||||
ccw->count = SNSS_DATA_SIZE;
|
||||
ccw->flags = 0;
|
||||
ccw->cda = (__u32)(addr_t) cqr->data;
|
||||
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
cqr->callback = dasd_eer_snss_cb;
|
||||
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (!device->eer_cqr) {
|
||||
device->eer_cqr = cqr;
|
||||
cqr = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
if (cqr)
|
||||
dasd_kfree_request(cqr, device);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable error reporting on a given device.
|
||||
*/
|
||||
void dasd_eer_disable(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
unsigned long flags;
|
||||
int in_use;
|
||||
|
||||
if (!device->eer_cqr)
|
||||
return;
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
cqr = device->eer_cqr;
|
||||
device->eer_cqr = NULL;
|
||||
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
|
||||
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
if (cqr && !in_use)
|
||||
dasd_kfree_request(cqr, device);
|
||||
}
|
||||
|
||||
/*
|
||||
* SECTION: the device operations
|
||||
*/
|
||||
|
||||
/*
|
||||
* On the one side we need a lock to access our internal buffer, on the
|
||||
* other side a copy_to_user can sleep. So we need to copy the data we have
|
||||
* to transfer in a readbuffer, which is protected by the readbuffer_mutex.
|
||||
*/
|
||||
static char readbuffer[PAGE_SIZE];
|
||||
static DEFINE_MUTEX(readbuffer_mutex);
|
||||
|
||||
static int dasd_eer_open(struct inode *inp, struct file *filp)
|
||||
{
|
||||
struct eerbuffer *eerb;
|
||||
unsigned long flags;
|
||||
|
||||
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
|
||||
if (!eerb)
|
||||
return -ENOMEM;
|
||||
eerb->buffer_page_count = eer_pages;
|
||||
if (eerb->buffer_page_count < 1 ||
|
||||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
|
||||
kfree(eerb);
|
||||
DBF_EVENT(DBF_WARNING, "can't open device since module "
|
||||
"parameter eer_pages is smaller than 1 or"
|
||||
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
|
||||
return -EINVAL;
|
||||
}
|
||||
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
|
||||
eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
|
||||
GFP_KERNEL);
|
||||
if (!eerb->buffer) {
|
||||
kfree(eerb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
|
||||
eerb->buffer_page_count)) {
|
||||
kfree(eerb->buffer);
|
||||
kfree(eerb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
filp->private_data = eerb;
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
list_add(&eerb->list, &bufferlist);
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
|
||||
return nonseekable_open(inp,filp);
|
||||
}
|
||||
|
||||
static int dasd_eer_close(struct inode *inp, struct file *filp)
|
||||
{
|
||||
struct eerbuffer *eerb;
|
||||
unsigned long flags;
|
||||
|
||||
eerb = (struct eerbuffer *) filp->private_data;
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
list_del(&eerb->list);
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
|
||||
kfree(eerb->buffer);
|
||||
kfree(eerb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int tc,rc;
|
||||
int tailcount,effective_count;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
|
||||
eerb = (struct eerbuffer *) filp->private_data;
|
||||
if (mutex_lock_interruptible(&readbuffer_mutex))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
|
||||
if (eerb->residual < 0) { /* the remainder of this record */
|
||||
/* has been deleted */
|
||||
eerb->residual = 0;
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
mutex_unlock(&readbuffer_mutex);
|
||||
return -EIO;
|
||||
} else if (eerb->residual > 0) {
|
||||
/* OK we still have a second half of a record to deliver */
|
||||
effective_count = min(eerb->residual, (int) count);
|
||||
eerb->residual -= effective_count;
|
||||
} else {
|
||||
tc = 0;
|
||||
while (!tc) {
|
||||
tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
|
||||
sizeof(tailcount));
|
||||
if (!tc) {
|
||||
/* no data available */
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
mutex_unlock(&readbuffer_mutex);
|
||||
if (filp->f_flags & O_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
rc = wait_event_interruptible(
|
||||
dasd_eer_read_wait_queue,
|
||||
eerb->head != eerb->tail);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (mutex_lock_interruptible(&readbuffer_mutex))
|
||||
return -ERESTARTSYS;
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
}
|
||||
}
|
||||
WARN_ON(tc != sizeof(tailcount));
|
||||
effective_count = min(tailcount,(int)count);
|
||||
eerb->residual = tailcount - effective_count;
|
||||
}
|
||||
|
||||
tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
|
||||
WARN_ON(tc != effective_count);
|
||||
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
|
||||
if (copy_to_user(buf, readbuffer, effective_count)) {
|
||||
mutex_unlock(&readbuffer_mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mutex_unlock(&readbuffer_mutex);
|
||||
return effective_count;
|
||||
}
|
||||
|
||||
static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
|
||||
{
|
||||
unsigned int mask;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
|
||||
eerb = (struct eerbuffer *) filp->private_data;
|
||||
poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
|
||||
spin_lock_irqsave(&bufferlock, flags);
|
||||
if (eerb->head != eerb->tail)
|
||||
mask = POLLIN | POLLRDNORM ;
|
||||
else
|
||||
mask = 0;
|
||||
spin_unlock_irqrestore(&bufferlock, flags);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static const struct file_operations dasd_eer_fops = {
|
||||
.open = &dasd_eer_open,
|
||||
.release = &dasd_eer_close,
|
||||
.read = &dasd_eer_read,
|
||||
.poll = &dasd_eer_poll,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct miscdevice *dasd_eer_dev = NULL;
|
||||
|
||||
int __init dasd_eer_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
|
||||
if (!dasd_eer_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
|
||||
dasd_eer_dev->name = "dasd_eer";
|
||||
dasd_eer_dev->fops = &dasd_eer_fops;
|
||||
|
||||
rc = misc_register(dasd_eer_dev);
|
||||
if (rc) {
|
||||
kfree(dasd_eer_dev);
|
||||
dasd_eer_dev = NULL;
|
||||
DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
|
||||
"register misc device");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dasd_eer_exit(void)
|
||||
{
|
||||
if (dasd_eer_dev) {
|
||||
misc_deregister(dasd_eer_dev);
|
||||
kfree(dasd_eer_dev);
|
||||
dasd_eer_dev = NULL;
|
||||
}
|
||||
}
|
202
drivers/s390/block/dasd_erp.c
Normal file
202
drivers/s390/block/dasd_erp.c
Normal file
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Carsten Otte <Cotte@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2001
|
||||
*
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* This is ugly... */
|
||||
#define PRINTK_HEADER "dasd_erp:"
|
||||
|
||||
#include "dasd_int.h"
|
||||
|
||||
struct dasd_ccw_req *
|
||||
dasd_alloc_erp_request(char *magic, int cplength, int datasize,
|
||||
struct dasd_device * device)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dasd_ccw_req *cqr;
|
||||
char *data;
|
||||
int size;
|
||||
|
||||
/* Sanity checks */
|
||||
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
|
||||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
|
||||
|
||||
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
|
||||
if (cplength > 0)
|
||||
size += cplength * sizeof(struct ccw1);
|
||||
if (datasize > 0)
|
||||
size += datasize;
|
||||
spin_lock_irqsave(&device->mem_lock, flags);
|
||||
cqr = (struct dasd_ccw_req *)
|
||||
dasd_alloc_chunk(&device->erp_chunks, size);
|
||||
spin_unlock_irqrestore(&device->mem_lock, flags);
|
||||
if (cqr == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
memset(cqr, 0, sizeof(struct dasd_ccw_req));
|
||||
INIT_LIST_HEAD(&cqr->devlist);
|
||||
INIT_LIST_HEAD(&cqr->blocklist);
|
||||
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
|
||||
cqr->cpaddr = NULL;
|
||||
if (cplength > 0) {
|
||||
cqr->cpaddr = (struct ccw1 *) data;
|
||||
data += cplength*sizeof(struct ccw1);
|
||||
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
|
||||
}
|
||||
cqr->data = NULL;
|
||||
if (datasize > 0) {
|
||||
cqr->data = data;
|
||||
memset(cqr->data, 0, datasize);
|
||||
}
|
||||
strncpy((char *) &cqr->magic, magic, 4);
|
||||
ASCEBC((char *) &cqr->magic, 4);
|
||||
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
dasd_get_device(device);
|
||||
return cqr;
|
||||
}
|
||||
|
||||
void
|
||||
dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&device->mem_lock, flags);
|
||||
dasd_free_chunk(&device->erp_chunks, cqr);
|
||||
spin_unlock_irqrestore(&device->mem_lock, flags);
|
||||
atomic_dec(&device->ref_count);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* dasd_default_erp_action just retries the current cqr
|
||||
*/
|
||||
struct dasd_ccw_req *
|
||||
dasd_default_erp_action(struct dasd_ccw_req *cqr)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
|
||||
device = cqr->startdev;
|
||||
|
||||
/* just retry - there is nothing to save ... I got no sense data.... */
|
||||
if (cqr->retries > 0) {
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP called (%i retries left)",
|
||||
cqr->retries);
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
} else {
|
||||
pr_err("%s: default ERP has run out of retries and failed\n",
|
||||
dev_name(&device->cdev->dev));
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->stopclk = get_tod_clock();
|
||||
}
|
||||
return cqr;
|
||||
} /* end dasd_default_erp_action */
|
||||
|
||||
/*
|
||||
* DESCRIPTION
|
||||
* Frees all ERPs of the current ERP Chain and set the status
|
||||
* of the original CQR either to DASD_CQR_DONE if ERP was successful
|
||||
* or to DASD_CQR_FAILED if ERP was NOT successful.
|
||||
* NOTE: This function is only called if no discipline postaction
|
||||
* is available
|
||||
*
|
||||
* PARAMETER
|
||||
* erp current erp_head
|
||||
*
|
||||
* RETURN VALUES
|
||||
* cqr pointer to the original CQR
|
||||
*/
|
||||
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
|
||||
{
|
||||
int success;
|
||||
unsigned long long startclk, stopclk;
|
||||
struct dasd_device *startdev;
|
||||
|
||||
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
|
||||
|
||||
success = cqr->status == DASD_CQR_DONE;
|
||||
startclk = cqr->startclk;
|
||||
stopclk = cqr->stopclk;
|
||||
startdev = cqr->startdev;
|
||||
|
||||
/* free all ERPs - but NOT the original cqr */
|
||||
while (cqr->refers != NULL) {
|
||||
struct dasd_ccw_req *refers;
|
||||
|
||||
refers = cqr->refers;
|
||||
/* remove the request from the block queue */
|
||||
list_del(&cqr->blocklist);
|
||||
/* free the finished erp request */
|
||||
dasd_free_erp_request(cqr, cqr->memdev);
|
||||
cqr = refers;
|
||||
}
|
||||
|
||||
/* set corresponding status to original cqr */
|
||||
cqr->startclk = startclk;
|
||||
cqr->stopclk = stopclk;
|
||||
cqr->startdev = startdev;
|
||||
if (success)
|
||||
cqr->status = DASD_CQR_DONE;
|
||||
else {
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->stopclk = get_tod_clock();
|
||||
}
|
||||
|
||||
return cqr;
|
||||
|
||||
} /* end default_erp_postaction */
|
||||
|
||||
void
|
||||
dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
|
||||
device = cqr->startdev;
|
||||
if (cqr->intrc == -ETIMEDOUT) {
|
||||
dev_err(&device->cdev->dev,
|
||||
"A timeout error occurred for cqr %p", cqr);
|
||||
return;
|
||||
}
|
||||
if (cqr->intrc == -ENOLINK) {
|
||||
dev_err(&device->cdev->dev,
|
||||
"A transport error occurred for cqr %p", cqr);
|
||||
return;
|
||||
}
|
||||
/* dump sense data */
|
||||
if (device->discipline && device->discipline->dump_sense)
|
||||
device->discipline->dump_sense(device, cqr, irb);
|
||||
}
|
||||
|
||||
void
|
||||
dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
|
||||
device = cqr->startdev;
|
||||
/* dump sense data to s390 debugfeature*/
|
||||
if (device->discipline && device->discipline->dump_sense_dbf)
|
||||
device->discipline->dump_sense_dbf(device, irb, "log");
|
||||
}
|
||||
EXPORT_SYMBOL(dasd_log_sense_dbf);
|
||||
|
||||
EXPORT_SYMBOL(dasd_default_erp_action);
|
||||
EXPORT_SYMBOL(dasd_default_erp_postaction);
|
||||
EXPORT_SYMBOL(dasd_alloc_erp_request);
|
||||
EXPORT_SYMBOL(dasd_free_erp_request);
|
||||
EXPORT_SYMBOL(dasd_log_sense);
|
||||
|
635
drivers/s390/block/dasd_fba.c
Normal file
635
drivers/s390/block/dasd_fba.c
Normal file
|
@ -0,0 +1,635 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd-fba"
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hdreg.h> /* HDIO_GETGEO */
|
||||
#include <linux/bio.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/idals.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/ccwdev.h>
|
||||
|
||||
#include "dasd_int.h"
|
||||
#include "dasd_fba.h"
|
||||
|
||||
#ifdef PRINTK_HEADER
|
||||
#undef PRINTK_HEADER
|
||||
#endif /* PRINTK_HEADER */
|
||||
#define PRINTK_HEADER "dasd(fba):"
|
||||
|
||||
#define FBA_DEFAULT_RETRIES 32
|
||||
|
||||
#define DASD_FBA_CCW_WRITE 0x41
|
||||
#define DASD_FBA_CCW_READ 0x42
|
||||
#define DASD_FBA_CCW_LOCATE 0x43
|
||||
#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static struct dasd_discipline dasd_fba_discipline;
|
||||
|
||||
struct dasd_fba_private {
|
||||
struct dasd_fba_characteristics rdc_data;
|
||||
};
|
||||
|
||||
static struct ccw_device_id dasd_fba_ids[] = {
|
||||
{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
|
||||
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
|
||||
|
||||
static struct ccw_driver dasd_fba_driver; /* see below */
|
||||
static int
|
||||
dasd_fba_probe(struct ccw_device *cdev)
|
||||
{
|
||||
return dasd_generic_probe(cdev, &dasd_fba_discipline);
|
||||
}
|
||||
|
||||
static int
|
||||
dasd_fba_set_online(struct ccw_device *cdev)
|
||||
{
|
||||
return dasd_generic_set_online(cdev, &dasd_fba_discipline);
|
||||
}
|
||||
|
||||
static struct ccw_driver dasd_fba_driver = {
|
||||
.driver = {
|
||||
.name = "dasd-fba",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.ids = dasd_fba_ids,
|
||||
.probe = dasd_fba_probe,
|
||||
.remove = dasd_generic_remove,
|
||||
.set_offline = dasd_generic_set_offline,
|
||||
.set_online = dasd_fba_set_online,
|
||||
.notify = dasd_generic_notify,
|
||||
.path_event = dasd_generic_path_event,
|
||||
.freeze = dasd_generic_pm_freeze,
|
||||
.thaw = dasd_generic_restore_device,
|
||||
.restore = dasd_generic_restore_device,
|
||||
.int_class = IRQIO_DAS,
|
||||
};
|
||||
|
||||
static void
|
||||
define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
|
||||
int blksize, int beg, int nr)
|
||||
{
|
||||
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
|
||||
ccw->flags = 0;
|
||||
ccw->count = 16;
|
||||
ccw->cda = (__u32) __pa(data);
|
||||
memset(data, 0, sizeof (struct DE_fba_data));
|
||||
if (rw == WRITE)
|
||||
(data->mask).perm = 0x0;
|
||||
else if (rw == READ)
|
||||
(data->mask).perm = 0x1;
|
||||
else
|
||||
data->mask.perm = 0x2;
|
||||
data->blk_size = blksize;
|
||||
data->ext_loc = beg;
|
||||
data->ext_end = nr - 1;
|
||||
}
|
||||
|
||||
static void
|
||||
locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
|
||||
int block_nr, int block_ct)
|
||||
{
|
||||
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
|
||||
ccw->flags = 0;
|
||||
ccw->count = 8;
|
||||
ccw->cda = (__u32) __pa(data);
|
||||
memset(data, 0, sizeof (struct LO_fba_data));
|
||||
if (rw == WRITE)
|
||||
data->operation.cmd = 0x5;
|
||||
else if (rw == READ)
|
||||
data->operation.cmd = 0x6;
|
||||
else
|
||||
data->operation.cmd = 0x8;
|
||||
data->blk_nr = block_nr;
|
||||
data->blk_ct = block_ct;
|
||||
}
|
||||
|
||||
static int
|
||||
dasd_fba_check_characteristics(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_block *block;
|
||||
struct dasd_fba_private *private;
|
||||
struct ccw_device *cdev = device->cdev;
|
||||
int rc;
|
||||
int readonly;
|
||||
|
||||
private = (struct dasd_fba_private *) device->private;
|
||||
if (!private) {
|
||||
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
|
||||
if (!private) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"Allocating memory for private DASD "
|
||||
"data failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
device->private = (void *) private;
|
||||
} else {
|
||||
memset(private, 0, sizeof(*private));
|
||||
}
|
||||
block = dasd_alloc_block();
|
||||
if (IS_ERR(block)) {
|
||||
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
|
||||
"dasd block structure");
|
||||
device->private = NULL;
|
||||
kfree(private);
|
||||
return PTR_ERR(block);
|
||||
}
|
||||
device->block = block;
|
||||
block->base = device;
|
||||
|
||||
/* Read Device Characteristics */
|
||||
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
|
||||
&private->rdc_data, 32);
|
||||
if (rc) {
|
||||
DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
|
||||
"characteristics returned error %d", rc);
|
||||
device->block = NULL;
|
||||
dasd_free_block(block);
|
||||
device->private = NULL;
|
||||
kfree(private);
|
||||
return rc;
|
||||
}
|
||||
|
||||
device->default_expires = DASD_EXPIRES;
|
||||
device->default_retries = FBA_DEFAULT_RETRIES;
|
||||
device->path_data.opm = LPM_ANYPATH;
|
||||
|
||||
readonly = dasd_device_is_ro(device);
|
||||
if (readonly)
|
||||
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
|
||||
|
||||
dev_info(&device->cdev->dev,
|
||||
"New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
|
||||
"and %d B/blk%s\n",
|
||||
cdev->id.dev_type,
|
||||
cdev->id.dev_model,
|
||||
cdev->id.cu_type,
|
||||
cdev->id.cu_model,
|
||||
((private->rdc_data.blk_bdsa *
|
||||
(private->rdc_data.blk_size >> 9)) >> 11),
|
||||
private->rdc_data.blk_size,
|
||||
readonly ? ", read-only device" : "");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_fba_do_analysis(struct dasd_block *block)
|
||||
{
|
||||
struct dasd_fba_private *private;
|
||||
int sb, rc;
|
||||
|
||||
private = (struct dasd_fba_private *) block->base->private;
|
||||
rc = dasd_check_blocksize(private->rdc_data.blk_size);
|
||||
if (rc) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
|
||||
private->rdc_data.blk_size);
|
||||
return rc;
|
||||
}
|
||||
block->blocks = private->rdc_data.blk_bdsa;
|
||||
block->bp_block = private->rdc_data.blk_size;
|
||||
block->s2b_shift = 0; /* bits to shift 512 to get a block */
|
||||
for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
|
||||
block->s2b_shift++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_fba_fill_geometry(struct dasd_block *block,
|
||||
struct hd_geometry *geo)
|
||||
{
|
||||
if (dasd_check_blocksize(block->bp_block) != 0)
|
||||
return -EINVAL;
|
||||
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
|
||||
geo->heads = 16;
|
||||
geo->sectors = 128 >> block->s2b_shift;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dasd_erp_fn_t
|
||||
dasd_fba_erp_action(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
return dasd_default_erp_action;
|
||||
}
|
||||
|
||||
static dasd_erp_fn_t
|
||||
dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
|
||||
{
|
||||
if (cqr->function == dasd_default_erp_action)
|
||||
return dasd_default_erp_postaction;
|
||||
|
||||
DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
|
||||
cqr->function);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void dasd_fba_check_for_device_change(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
struct irb *irb)
|
||||
{
|
||||
char mask;
|
||||
|
||||
/* first of all check for state change pending interrupt */
|
||||
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
|
||||
if ((irb->scsw.cmd.dstat & mask) == mask)
|
||||
dasd_generic_handle_state_change(device);
|
||||
};
|
||||
|
||||
static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
|
||||
struct dasd_block *block,
|
||||
struct request *req)
|
||||
{
|
||||
struct dasd_fba_private *private;
|
||||
unsigned long *idaws;
|
||||
struct LO_fba_data *LO_data;
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct ccw1 *ccw;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
char *dst;
|
||||
int count, cidaw, cplength, datasize;
|
||||
sector_t recid, first_rec, last_rec;
|
||||
unsigned int blksize, off;
|
||||
unsigned char cmd;
|
||||
|
||||
private = (struct dasd_fba_private *) block->base->private;
|
||||
if (rq_data_dir(req) == READ) {
|
||||
cmd = DASD_FBA_CCW_READ;
|
||||
} else if (rq_data_dir(req) == WRITE) {
|
||||
cmd = DASD_FBA_CCW_WRITE;
|
||||
} else
|
||||
return ERR_PTR(-EINVAL);
|
||||
blksize = block->bp_block;
|
||||
/* Calculate record id of first and last block. */
|
||||
first_rec = blk_rq_pos(req) >> block->s2b_shift;
|
||||
last_rec =
|
||||
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
|
||||
/* Check struct bio and count the number of blocks for the request. */
|
||||
count = 0;
|
||||
cidaw = 0;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
if (bv.bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv.bv_len >> (block->s2b_shift + 9);
|
||||
#if defined(CONFIG_64BIT)
|
||||
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
|
||||
cidaw += bv.bv_len / blksize;
|
||||
#endif
|
||||
}
|
||||
/* Paranoia. */
|
||||
if (count != last_rec - first_rec + 1)
|
||||
return ERR_PTR(-EINVAL);
|
||||
/* 1x define extent + 1x locate record + number of blocks */
|
||||
cplength = 2 + count;
|
||||
/* 1x define extent + 1x locate record */
|
||||
datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
|
||||
cidaw * sizeof(unsigned long);
|
||||
/*
|
||||
* Find out number of additional locate record ccws if the device
|
||||
* can't do data chaining.
|
||||
*/
|
||||
if (private->rdc_data.mode.bits.data_chain == 0) {
|
||||
cplength += count - 1;
|
||||
datasize += (count - 1)*sizeof(struct LO_fba_data);
|
||||
}
|
||||
/* Allocate the ccw request. */
|
||||
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
|
||||
if (IS_ERR(cqr))
|
||||
return cqr;
|
||||
ccw = cqr->cpaddr;
|
||||
/* First ccw is define extent. */
|
||||
define_extent(ccw++, cqr->data, rq_data_dir(req),
|
||||
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
|
||||
/* Build locate_record + read/write ccws. */
|
||||
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
|
||||
LO_data = (struct LO_fba_data *) (idaws + cidaw);
|
||||
/* Locate record for all blocks for smart devices. */
|
||||
if (private->rdc_data.mode.bits.data_chain != 0) {
|
||||
ccw[-1].flags |= CCW_FLAG_CC;
|
||||
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
|
||||
}
|
||||
recid = first_rec;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = page_address(bv.bv_page) + bv.bv_offset;
|
||||
if (dasd_page_cache) {
|
||||
char *copy = kmem_cache_alloc(dasd_page_cache,
|
||||
GFP_DMA | __GFP_NOWARN);
|
||||
if (copy && rq_data_dir(req) == WRITE)
|
||||
memcpy(copy + bv.bv_offset, dst, bv.bv_len);
|
||||
if (copy)
|
||||
dst = copy + bv.bv_offset;
|
||||
}
|
||||
for (off = 0; off < bv.bv_len; off += blksize) {
|
||||
/* Locate record for stupid devices. */
|
||||
if (private->rdc_data.mode.bits.data_chain == 0) {
|
||||
ccw[-1].flags |= CCW_FLAG_CC;
|
||||
locate_record(ccw, LO_data++,
|
||||
rq_data_dir(req),
|
||||
recid - first_rec, 1);
|
||||
ccw->flags = CCW_FLAG_CC;
|
||||
ccw++;
|
||||
} else {
|
||||
if (recid > first_rec)
|
||||
ccw[-1].flags |= CCW_FLAG_DC;
|
||||
else
|
||||
ccw[-1].flags |= CCW_FLAG_CC;
|
||||
}
|
||||
ccw->cmd_code = cmd;
|
||||
ccw->count = block->bp_block;
|
||||
if (idal_is_needed(dst, blksize)) {
|
||||
ccw->cda = (__u32)(addr_t) idaws;
|
||||
ccw->flags = CCW_FLAG_IDA;
|
||||
idaws = idal_create_words(idaws, dst, blksize);
|
||||
} else {
|
||||
ccw->cda = (__u32)(addr_t) dst;
|
||||
ccw->flags = 0;
|
||||
}
|
||||
ccw++;
|
||||
dst += blksize;
|
||||
recid++;
|
||||
}
|
||||
}
|
||||
if (blk_noretry_request(req) ||
|
||||
block->base->features & DASD_FEATURE_FAILFAST)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->startdev = memdev;
|
||||
cqr->memdev = memdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->retries = memdev->default_retries;
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
|
||||
static int
|
||||
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
|
||||
{
|
||||
struct dasd_fba_private *private;
|
||||
struct ccw1 *ccw;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
char *dst, *cda;
|
||||
unsigned int blksize, off;
|
||||
int status;
|
||||
|
||||
if (!dasd_page_cache)
|
||||
goto out;
|
||||
private = (struct dasd_fba_private *) cqr->block->base->private;
|
||||
blksize = cqr->block->bp_block;
|
||||
ccw = cqr->cpaddr;
|
||||
/* Skip over define extent & locate record. */
|
||||
ccw++;
|
||||
if (private->rdc_data.mode.bits.data_chain != 0)
|
||||
ccw++;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = page_address(bv.bv_page) + bv.bv_offset;
|
||||
for (off = 0; off < bv.bv_len; off += blksize) {
|
||||
/* Skip locate record. */
|
||||
if (private->rdc_data.mode.bits.data_chain == 0)
|
||||
ccw++;
|
||||
if (dst) {
|
||||
if (ccw->flags & CCW_FLAG_IDA)
|
||||
cda = *((char **)((addr_t) ccw->cda));
|
||||
else
|
||||
cda = (char *)((addr_t) ccw->cda);
|
||||
if (dst != cda) {
|
||||
if (rq_data_dir(req) == READ)
|
||||
memcpy(dst, cda, bv.bv_len);
|
||||
kmem_cache_free(dasd_page_cache,
|
||||
(void *)((addr_t)cda & PAGE_MASK));
|
||||
}
|
||||
dst = NULL;
|
||||
}
|
||||
ccw++;
|
||||
}
|
||||
}
|
||||
out:
|
||||
status = cqr->status == DASD_CQR_DONE;
|
||||
dasd_sfree_request(cqr, cqr->memdev);
|
||||
return status;
|
||||
}
|
||||
|
||||
static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
|
||||
{
|
||||
if (cqr->retries < 0)
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
else
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
};
|
||||
|
||||
static int
|
||||
dasd_fba_fill_info(struct dasd_device * device,
|
||||
struct dasd_information2_t * info)
|
||||
{
|
||||
info->label_block = 1;
|
||||
info->FBA_layout = 1;
|
||||
info->format = DASD_FORMAT_LDL;
|
||||
info->characteristics_size = sizeof(struct dasd_fba_characteristics);
|
||||
memcpy(info->characteristics,
|
||||
&((struct dasd_fba_private *) device->private)->rdc_data,
|
||||
sizeof (struct dasd_fba_characteristics));
|
||||
info->confdata_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
|
||||
char *reason)
|
||||
{
|
||||
u64 *sense;
|
||||
|
||||
sense = (u64 *) dasd_get_sense(irb);
|
||||
if (sense) {
|
||||
DBF_DEV_EVENT(DBF_EMERG, device,
|
||||
"%s: %s %02x%02x%02x %016llx %016llx %016llx "
|
||||
"%016llx", reason,
|
||||
scsw_is_tm(&irb->scsw) ? "t" : "c",
|
||||
scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
|
||||
scsw_dstat(&irb->scsw), sense[0], sense[1],
|
||||
sense[2], sense[3]);
|
||||
} else {
|
||||
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
|
||||
"SORRY - NO VALID SENSE AVAILABLE\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
|
||||
struct irb *irb)
|
||||
{
|
||||
char *page;
|
||||
struct ccw1 *act, *end, *last;
|
||||
int len, sl, sct, count;
|
||||
|
||||
page = (char *) get_zeroed_page(GFP_ATOMIC);
|
||||
if (page == NULL) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"No memory to dump sense data");
|
||||
return;
|
||||
}
|
||||
len = sprintf(page, PRINTK_HEADER
|
||||
" I/O status report for device %s:\n",
|
||||
dev_name(&device->cdev->dev));
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
|
||||
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" device %s: Failing CCW: %p\n",
|
||||
dev_name(&device->cdev->dev),
|
||||
(void *) (addr_t) irb->scsw.cmd.cpa);
|
||||
if (irb->esw.esw0.erw.cons) {
|
||||
for (sl = 0; sl < 4; sl++) {
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" Sense(hex) %2d-%2d:",
|
||||
(8 * sl), ((8 * sl) + 7));
|
||||
|
||||
for (sct = 0; sct < 8; sct++) {
|
||||
len += sprintf(page + len, " %02x",
|
||||
irb->ecw[8 * sl + sct]);
|
||||
}
|
||||
len += sprintf(page + len, "\n");
|
||||
}
|
||||
} else {
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" SORRY - NO VALID SENSE AVAILABLE\n");
|
||||
}
|
||||
printk(KERN_ERR "%s", page);
|
||||
|
||||
/* dump the Channel Program */
|
||||
/* print first CCWs (maximum 8) */
|
||||
act = req->cpaddr;
|
||||
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
|
||||
end = min(act + 8, last);
|
||||
len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
|
||||
while (act <= end) {
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" CCW %p: %08X %08X DAT:",
|
||||
act, ((int *) act)[0], ((int *) act)[1]);
|
||||
for (count = 0; count < 32 && count < act->count;
|
||||
count += sizeof(int))
|
||||
len += sprintf(page + len, " %08X",
|
||||
((int *) (addr_t) act->cda)
|
||||
[(count>>2)]);
|
||||
len += sprintf(page + len, "\n");
|
||||
act++;
|
||||
}
|
||||
printk(KERN_ERR "%s", page);
|
||||
|
||||
|
||||
/* print failing CCW area */
|
||||
len = 0;
|
||||
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
|
||||
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
|
||||
len += sprintf(page + len, PRINTK_HEADER "......\n");
|
||||
}
|
||||
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
|
||||
while (act <= end) {
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" CCW %p: %08X %08X DAT:",
|
||||
act, ((int *) act)[0], ((int *) act)[1]);
|
||||
for (count = 0; count < 32 && count < act->count;
|
||||
count += sizeof(int))
|
||||
len += sprintf(page + len, " %08X",
|
||||
((int *) (addr_t) act->cda)
|
||||
[(count>>2)]);
|
||||
len += sprintf(page + len, "\n");
|
||||
act++;
|
||||
}
|
||||
|
||||
/* print last CCWs */
|
||||
if (act < last - 2) {
|
||||
act = last - 2;
|
||||
len += sprintf(page + len, PRINTK_HEADER "......\n");
|
||||
}
|
||||
while (act <= last) {
|
||||
len += sprintf(page + len, PRINTK_HEADER
|
||||
" CCW %p: %08X %08X DAT:",
|
||||
act, ((int *) act)[0], ((int *) act)[1]);
|
||||
for (count = 0; count < 32 && count < act->count;
|
||||
count += sizeof(int))
|
||||
len += sprintf(page + len, " %08X",
|
||||
((int *) (addr_t) act->cda)
|
||||
[(count>>2)]);
|
||||
len += sprintf(page + len, "\n");
|
||||
act++;
|
||||
}
|
||||
if (len > 0)
|
||||
printk(KERN_ERR "%s", page);
|
||||
free_page((unsigned long) page);
|
||||
}
|
||||
|
||||
/*
|
||||
* max_blocks is dependent on the amount of storage that is available
|
||||
* in the static io buffer for each device. Currently each device has
|
||||
* 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
|
||||
* 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
|
||||
* up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
|
||||
* addition we have one define extent ccw + 16 bytes of data and a
|
||||
* locate record ccw for each block (stupid devices!) + 16 bytes of data.
|
||||
* That makes:
|
||||
* (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
|
||||
* We want to fit two into the available memory so that we can immediately
|
||||
* start the next request if one finishes off. That makes 100.1 blocks
|
||||
* for one request. Give a little safety and the result is 96.
|
||||
*/
|
||||
static struct dasd_discipline dasd_fba_discipline = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "FBA ",
|
||||
.ebcname = "FBA ",
|
||||
.max_blocks = 96,
|
||||
.check_device = dasd_fba_check_characteristics,
|
||||
.do_analysis = dasd_fba_do_analysis,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_fba_fill_geometry,
|
||||
.start_IO = dasd_start_IO,
|
||||
.term_IO = dasd_term_IO,
|
||||
.handle_terminated_request = dasd_fba_handle_terminated_request,
|
||||
.erp_action = dasd_fba_erp_action,
|
||||
.erp_postaction = dasd_fba_erp_postaction,
|
||||
.check_for_device_change = dasd_fba_check_for_device_change,
|
||||
.build_cp = dasd_fba_build_cp,
|
||||
.free_cp = dasd_fba_free_cp,
|
||||
.dump_sense = dasd_fba_dump_sense,
|
||||
.dump_sense_dbf = dasd_fba_dump_sense_dbf,
|
||||
.fill_info = dasd_fba_fill_info,
|
||||
};
|
||||
|
||||
static int __init
|
||||
dasd_fba_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ASCEBC(dasd_fba_discipline.ebcname, 4);
|
||||
ret = ccw_driver_register(&dasd_fba_driver);
|
||||
if (!ret)
|
||||
wait_for_device_probe();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit
|
||||
dasd_fba_cleanup(void)
|
||||
{
|
||||
ccw_driver_unregister(&dasd_fba_driver);
|
||||
}
|
||||
|
||||
module_init(dasd_fba_init);
|
||||
module_exit(dasd_fba_cleanup);
|
71
drivers/s390/block/dasd_fba.h
Normal file
71
drivers/s390/block/dasd_fba.h
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Coypright IBM Corp. 1999, 2000
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DASD_FBA_H
|
||||
#define DASD_FBA_H
|
||||
|
||||
struct DE_fba_data {
|
||||
struct {
|
||||
unsigned char perm:2; /* Permissions on this extent */
|
||||
unsigned char zero:2; /* Must be zero */
|
||||
unsigned char da:1; /* usually zero */
|
||||
unsigned char diag:1; /* allow diagnose */
|
||||
unsigned char zero2:2; /* zero */
|
||||
} __attribute__ ((packed)) mask;
|
||||
__u8 zero; /* Must be zero */
|
||||
__u16 blk_size; /* Blocksize */
|
||||
__u32 ext_loc; /* Extent locator */
|
||||
__u32 ext_beg; /* logical number of block 0 in extent */
|
||||
__u32 ext_end; /* logocal number of last block in extent */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct LO_fba_data {
|
||||
struct {
|
||||
unsigned char zero:4;
|
||||
unsigned char cmd:4;
|
||||
} __attribute__ ((packed)) operation;
|
||||
__u8 auxiliary;
|
||||
__u16 blk_ct;
|
||||
__u32 blk_nr;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_fba_characteristics {
|
||||
union {
|
||||
__u8 c;
|
||||
struct {
|
||||
unsigned char reserved:1;
|
||||
unsigned char overrunnable:1;
|
||||
unsigned char burst_byte:1;
|
||||
unsigned char data_chain:1;
|
||||
unsigned char zeros:4;
|
||||
} __attribute__ ((packed)) bits;
|
||||
} __attribute__ ((packed)) mode;
|
||||
union {
|
||||
__u8 c;
|
||||
struct {
|
||||
unsigned char zero0:1;
|
||||
unsigned char removable:1;
|
||||
unsigned char shared:1;
|
||||
unsigned char zero1:1;
|
||||
unsigned char mam:1;
|
||||
unsigned char zeros:3;
|
||||
} __attribute__ ((packed)) bits;
|
||||
} __attribute__ ((packed)) features;
|
||||
__u8 dev_class;
|
||||
__u8 unit_type;
|
||||
__u16 blk_size;
|
||||
__u32 blk_per_cycl;
|
||||
__u32 blk_per_bound;
|
||||
__u32 blk_bdsa;
|
||||
__u32 reserved0;
|
||||
__u16 reserved1;
|
||||
__u16 blk_ce;
|
||||
__u32 reserved2;
|
||||
__u16 reserved3;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#endif /* DASD_FBA_H */
|
178
drivers/s390/block/dasd_genhd.c
Normal file
178
drivers/s390/block/dasd_genhd.c
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Carsten Otte <Cotte@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2001
|
||||
*
|
||||
* gendisk related functions for the dasd driver.
|
||||
*
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkpg.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* This is ugly... */
|
||||
#define PRINTK_HEADER "dasd_gendisk:"
|
||||
|
||||
#include "dasd_int.h"
|
||||
|
||||
/*
|
||||
* Allocate and register gendisk structure for device.
|
||||
*/
|
||||
int dasd_gendisk_alloc(struct dasd_block *block)
|
||||
{
|
||||
struct gendisk *gdp;
|
||||
struct dasd_device *base;
|
||||
int len;
|
||||
|
||||
/* Make sure the minor for this device exists. */
|
||||
base = block->base;
|
||||
if (base->devindex >= DASD_PER_MAJOR)
|
||||
return -EBUSY;
|
||||
|
||||
gdp = alloc_disk(1 << DASD_PARTN_BITS);
|
||||
if (!gdp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Initialize gendisk structure. */
|
||||
gdp->major = DASD_MAJOR;
|
||||
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
|
||||
gdp->fops = &dasd_device_operations;
|
||||
gdp->driverfs_dev = &base->cdev->dev;
|
||||
|
||||
/*
|
||||
* Set device name.
|
||||
* dasda - dasdz : 26 devices
|
||||
* dasdaa - dasdzz : 676 devices, added up = 702
|
||||
* dasdaaa - dasdzzz : 17576 devices, added up = 18278
|
||||
* dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
|
||||
*/
|
||||
len = sprintf(gdp->disk_name, "dasd");
|
||||
if (base->devindex > 25) {
|
||||
if (base->devindex > 701) {
|
||||
if (base->devindex > 18277)
|
||||
len += sprintf(gdp->disk_name + len, "%c",
|
||||
'a'+(((base->devindex-18278)
|
||||
/17576)%26));
|
||||
len += sprintf(gdp->disk_name + len, "%c",
|
||||
'a'+(((base->devindex-702)/676)%26));
|
||||
}
|
||||
len += sprintf(gdp->disk_name + len, "%c",
|
||||
'a'+(((base->devindex-26)/26)%26));
|
||||
}
|
||||
len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
|
||||
|
||||
if (base->features & DASD_FEATURE_READONLY ||
|
||||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
|
||||
set_disk_ro(gdp, 1);
|
||||
dasd_add_link_to_gendisk(gdp, base);
|
||||
gdp->queue = block->request_queue;
|
||||
block->gdp = gdp;
|
||||
set_capacity(block->gdp, 0);
|
||||
add_disk(block->gdp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unregister and free gendisk structure for device.
|
||||
*/
|
||||
void dasd_gendisk_free(struct dasd_block *block)
|
||||
{
|
||||
if (block->gdp) {
|
||||
del_gendisk(block->gdp);
|
||||
block->gdp->private_data = NULL;
|
||||
put_disk(block->gdp);
|
||||
block->gdp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger a partition detection.
|
||||
*/
|
||||
int dasd_scan_partitions(struct dasd_block *block)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = bdget_disk(block->gdp, 0);
|
||||
if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
|
||||
return -ENODEV;
|
||||
/*
|
||||
* See fs/partition/check.c:register_disk,rescan_partitions
|
||||
* Can't call rescan_partitions directly. Use ioctl.
|
||||
*/
|
||||
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
||||
/*
|
||||
* Since the matching blkdev_put call to the blkdev_get in
|
||||
* this function is not called before dasd_destroy_partitions
|
||||
* the offline open_count limit needs to be increased from
|
||||
* 0 to 1. This is done by setting device->bdev (see
|
||||
* dasd_generic_set_offline). As long as the partition
|
||||
* detection is running no offline should be allowed. That
|
||||
* is why the assignment to device->bdev is done AFTER
|
||||
* the BLKRRPART ioctl.
|
||||
*/
|
||||
block->bdev = bdev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove all inodes in the system for a device, delete the
|
||||
* partitions and make device unusable by setting its size to zero.
|
||||
*/
|
||||
void dasd_destroy_partitions(struct dasd_block *block)
|
||||
{
|
||||
/* The two structs have 168/176 byte on 31/64 bit. */
|
||||
struct blkpg_partition bpart;
|
||||
struct blkpg_ioctl_arg barg;
|
||||
struct block_device *bdev;
|
||||
|
||||
/*
|
||||
* Get the bdev pointer from the device structure and clear
|
||||
* device->bdev to lower the offline open_count limit again.
|
||||
*/
|
||||
bdev = block->bdev;
|
||||
block->bdev = NULL;
|
||||
|
||||
/*
|
||||
* See fs/partition/check.c:delete_partition
|
||||
* Can't call delete_partitions directly. Use ioctl.
|
||||
* The ioctl also does locking and invalidation.
|
||||
*/
|
||||
memset(&bpart, 0, sizeof(struct blkpg_partition));
|
||||
memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
|
||||
barg.data = (void __force __user *) &bpart;
|
||||
barg.op = BLKPG_DEL_PARTITION;
|
||||
for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
|
||||
ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
|
||||
|
||||
invalidate_partition(block->gdp, 0);
|
||||
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
|
||||
blkdev_put(bdev, FMODE_READ);
|
||||
set_capacity(block->gdp, 0);
|
||||
}
|
||||
|
||||
int dasd_gendisk_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Register to static dasd major 94 */
|
||||
rc = register_blkdev(DASD_MAJOR, "dasd");
|
||||
if (rc != 0) {
|
||||
pr_warning("Registering the device driver with major number "
|
||||
"%d failed\n", DASD_MAJOR);
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dasd_gendisk_exit(void)
|
||||
{
|
||||
unregister_blkdev(DASD_MAJOR, "dasd");
|
||||
}
|
815
drivers/s390/block/dasd_int.h
Normal file
815
drivers/s390/block/dasd_int.h
Normal file
|
@ -0,0 +1,815 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
*/
|
||||
|
||||
#ifndef DASD_INT_H
|
||||
#define DASD_INT_H
|
||||
|
||||
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
|
||||
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
|
||||
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
|
||||
|
||||
/*
|
||||
* States a dasd device can have:
|
||||
* new: the dasd_device structure is allocated.
|
||||
* known: the discipline for the device is identified.
|
||||
* basic: the device can do basic i/o.
|
||||
* unfmt: the device could not be analyzed (format is unknown).
|
||||
* ready: partition detection is done and the device is can do block io.
|
||||
* online: the device accepts requests from the block device queue.
|
||||
*
|
||||
* Things to do for startup state transitions:
|
||||
* new -> known: find discipline for the device and create devfs entries.
|
||||
* known -> basic: request irq line for the device.
|
||||
* basic -> ready: do the initial analysis, e.g. format detection,
|
||||
* do block device setup and detect partitions.
|
||||
* ready -> online: schedule the device tasklet.
|
||||
* Things to do for shutdown state transitions:
|
||||
* online -> ready: just set the new device state.
|
||||
* ready -> basic: flush requests from the block device layer, clear
|
||||
* partition information and reset format information.
|
||||
* basic -> known: terminate all requests and free irq.
|
||||
* known -> new: remove devfs entries and forget discipline.
|
||||
*/
|
||||
|
||||
#define DASD_STATE_NEW 0
|
||||
#define DASD_STATE_KNOWN 1
|
||||
#define DASD_STATE_BASIC 2
|
||||
#define DASD_STATE_UNFMT 3
|
||||
#define DASD_STATE_READY 4
|
||||
#define DASD_STATE_ONLINE 5
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/log2.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/dasd.h>
|
||||
#include <asm/idals.h>
|
||||
|
||||
/* DASD discipline magic */
|
||||
#define DASD_ECKD_MAGIC 0xC5C3D2C4
|
||||
#define DASD_DIAG_MAGIC 0xC4C9C1C7
|
||||
#define DASD_FBA_MAGIC 0xC6C2C140
|
||||
|
||||
/*
|
||||
* SECTION: Type definitions
|
||||
*/
|
||||
struct dasd_device;
|
||||
struct dasd_block;
|
||||
|
||||
/* BIT DEFINITIONS FOR SENSE DATA */
|
||||
#define DASD_SENSE_BIT_0 0x80
|
||||
#define DASD_SENSE_BIT_1 0x40
|
||||
#define DASD_SENSE_BIT_2 0x20
|
||||
#define DASD_SENSE_BIT_3 0x10
|
||||
|
||||
/* BIT DEFINITIONS FOR SIM SENSE */
|
||||
#define DASD_SIM_SENSE 0x0F
|
||||
#define DASD_SIM_MSG_TO_OP 0x03
|
||||
#define DASD_SIM_LOG 0x0C
|
||||
|
||||
/* lock class for nested cdev lock */
|
||||
#define CDEV_NESTED_FIRST 1
|
||||
#define CDEV_NESTED_SECOND 2
|
||||
|
||||
/*
|
||||
* SECTION: MACROs for klogd and s390 debug feature (dbf)
|
||||
*/
|
||||
#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
|
||||
do { \
|
||||
debug_sprintf_event(d_device->debug_area, \
|
||||
d_level, \
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
|
||||
do { \
|
||||
debug_sprintf_exception(d_device->debug_area, \
|
||||
d_level, \
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
#define DBF_EVENT(d_level, d_str, d_data...)\
|
||||
do { \
|
||||
debug_sprintf_event(dasd_debug_area, \
|
||||
d_level,\
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
|
||||
do { \
|
||||
struct ccw_dev_id __dev_id; \
|
||||
ccw_device_get_id(d_cdev, &__dev_id); \
|
||||
debug_sprintf_event(dasd_debug_area, \
|
||||
d_level, \
|
||||
"0.%x.%04x " d_str "\n", \
|
||||
__dev_id.ssid, __dev_id.devno, d_data); \
|
||||
} while (0)
|
||||
|
||||
#define DBF_EXC(d_level, d_str, d_data...)\
|
||||
do { \
|
||||
debug_sprintf_exception(dasd_debug_area, \
|
||||
d_level,\
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
/* limit size for an errorstring */
|
||||
#define ERRORLENGTH 30
|
||||
|
||||
/* definition of dbf debug levels */
|
||||
#define DBF_EMERG 0 /* system is unusable */
|
||||
#define DBF_ALERT 1 /* action must be taken immediately */
|
||||
#define DBF_CRIT 2 /* critical conditions */
|
||||
#define DBF_ERR 3 /* error conditions */
|
||||
#define DBF_WARNING 4 /* warning conditions */
|
||||
#define DBF_NOTICE 5 /* normal but significant condition */
|
||||
#define DBF_INFO 6 /* informational */
|
||||
#define DBF_DEBUG 6 /* debug-level messages */
|
||||
|
||||
/* messages to be written via klogd and dbf */
|
||||
#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
|
||||
do { \
|
||||
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
|
||||
dev_name(&d_device->cdev->dev), d_args); \
|
||||
DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
|
||||
} while(0)
|
||||
|
||||
#define MESSAGE(d_loglevel,d_string,d_args...)\
|
||||
do { \
|
||||
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
|
||||
DBF_EVENT(DBF_ALERT, d_string, d_args); \
|
||||
} while(0)
|
||||
|
||||
/* messages to be written via klogd only */
|
||||
#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
|
||||
do { \
|
||||
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
|
||||
dev_name(&d_device->cdev->dev), d_args); \
|
||||
} while(0)
|
||||
|
||||
#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
|
||||
do { \
|
||||
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
|
||||
} while(0)
|
||||
|
||||
struct dasd_ccw_req {
|
||||
unsigned int magic; /* Eye catcher */
|
||||
struct list_head devlist; /* for dasd_device request queue */
|
||||
struct list_head blocklist; /* for dasd_block request queue */
|
||||
|
||||
/* Where to execute what... */
|
||||
struct dasd_block *block; /* the originating block device */
|
||||
struct dasd_device *memdev; /* the device used to allocate this */
|
||||
struct dasd_device *startdev; /* device the request is started on */
|
||||
struct dasd_device *basedev; /* base device if no block->base */
|
||||
void *cpaddr; /* address of ccw or tcw */
|
||||
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
|
||||
char status; /* status of this request */
|
||||
short retries; /* A retry counter */
|
||||
unsigned long flags; /* flags of this request */
|
||||
|
||||
/* ... and how */
|
||||
unsigned long starttime; /* jiffies time of request start */
|
||||
unsigned long expires; /* expiration period in jiffies */
|
||||
char lpm; /* logical path mask */
|
||||
void *data; /* pointer to data area */
|
||||
|
||||
/* these are important for recovering erroneous requests */
|
||||
int intrc; /* internal error, e.g. from start_IO */
|
||||
struct irb irb; /* device status in case of an error */
|
||||
struct dasd_ccw_req *refers; /* ERP-chain queueing. */
|
||||
void *function; /* originating ERP action */
|
||||
|
||||
/* these are for statistics only */
|
||||
unsigned long long buildclk; /* TOD-clock of request generation */
|
||||
unsigned long long startclk; /* TOD-clock of request start */
|
||||
unsigned long long stopclk; /* TOD-clock of request interrupt */
|
||||
unsigned long long endclk; /* TOD-clock of request termination */
|
||||
|
||||
/* Callback that is called after reaching final status. */
|
||||
void (*callback)(struct dasd_ccw_req *, void *data);
|
||||
void *callback_data;
|
||||
};
|
||||
|
||||
/*
|
||||
* dasd_ccw_req -> status can be:
|
||||
*/
|
||||
#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
|
||||
#define DASD_CQR_DONE 0x01 /* request is completed successfully */
|
||||
#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
|
||||
#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
|
||||
#define DASD_CQR_FAILED 0x04 /* request is finally failed */
|
||||
#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
|
||||
|
||||
#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
|
||||
#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
|
||||
#define DASD_CQR_ERROR 0x82 /* request is completed with error */
|
||||
#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
|
||||
#define DASD_CQR_CLEARED 0x84 /* request was cleared */
|
||||
#define DASD_CQR_SUCCESS 0x85 /* request was successful */
|
||||
|
||||
/* default expiration time*/
|
||||
#define DASD_EXPIRES 300
|
||||
#define DASD_EXPIRES_MAX 40000000
|
||||
#define DASD_RETRIES 256
|
||||
#define DASD_RETRIES_MAX 32768
|
||||
|
||||
/* per dasd_ccw_req flags */
|
||||
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
|
||||
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
|
||||
#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
|
||||
#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
|
||||
* stolen. Should not be combined with
|
||||
* DASD_CQR_FLAGS_USE_ERP
|
||||
*/
|
||||
|
||||
/* Signature for error recovery functions. */
|
||||
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
|
||||
|
||||
/*
|
||||
* Unique identifier for dasd device.
|
||||
*/
|
||||
#define UA_NOT_CONFIGURED 0x00
|
||||
#define UA_BASE_DEVICE 0x01
|
||||
#define UA_BASE_PAV_ALIAS 0x02
|
||||
#define UA_HYPER_PAV_ALIAS 0x03
|
||||
|
||||
struct dasd_uid {
|
||||
__u8 type;
|
||||
char vendor[4];
|
||||
char serial[15];
|
||||
__u16 ssid;
|
||||
__u8 real_unit_addr;
|
||||
__u8 base_unit_addr;
|
||||
char vduit[33];
|
||||
};
|
||||
|
||||
/*
|
||||
* the struct dasd_discipline is
|
||||
* sth like a table of virtual functions, if you think of dasd_eckd
|
||||
* inheriting dasd...
|
||||
* no, currently we are not planning to reimplement the driver in C++
|
||||
*/
|
||||
struct dasd_discipline {
|
||||
struct module *owner;
|
||||
char ebcname[8]; /* a name used for tagging and printks */
|
||||
char name[8]; /* a name used for tagging and printks */
|
||||
int max_blocks; /* maximum number of blocks to be chained */
|
||||
|
||||
struct list_head list; /* used for list of disciplines */
|
||||
|
||||
/*
|
||||
* Device recognition functions. check_device is used to verify
|
||||
* the sense data and the information returned by read device
|
||||
* characteristics. It returns 0 if the discipline can be used
|
||||
* for the device in question. uncheck_device is called during
|
||||
* device shutdown to deregister a device from its discipline.
|
||||
*/
|
||||
int (*check_device) (struct dasd_device *);
|
||||
void (*uncheck_device) (struct dasd_device *);
|
||||
|
||||
/*
|
||||
* do_analysis is used in the step from device state "basic" to
|
||||
* state "accept". It returns 0 if the device can be made ready,
|
||||
* it returns -EMEDIUMTYPE if the device can't be made ready or
|
||||
* -EAGAIN if do_analysis started a ccw that needs to complete
|
||||
* before the analysis may be repeated.
|
||||
*/
|
||||
int (*do_analysis) (struct dasd_block *);
|
||||
|
||||
/*
|
||||
* This function is called, when new paths become available.
|
||||
* Disciplins may use this callback to do necessary setup work,
|
||||
* e.g. verify that new path is compatible with the current
|
||||
* configuration.
|
||||
*/
|
||||
int (*verify_path)(struct dasd_device *, __u8);
|
||||
|
||||
/*
|
||||
* Last things to do when a device is set online, and first things
|
||||
* when it is set offline.
|
||||
*/
|
||||
int (*basic_to_ready) (struct dasd_device *);
|
||||
int (*online_to_ready) (struct dasd_device *);
|
||||
int (*basic_to_known)(struct dasd_device *);
|
||||
|
||||
/* (struct dasd_device *);
|
||||
* Device operation functions. build_cp creates a ccw chain for
|
||||
* a block device request, start_io starts the request and
|
||||
* term_IO cancels it (e.g. in case of a timeout). format_device
|
||||
* returns a ccw chain to be used to format the device.
|
||||
* handle_terminated_request allows to examine a cqr and prepare
|
||||
* it for retry.
|
||||
*/
|
||||
struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
|
||||
struct dasd_block *,
|
||||
struct request *);
|
||||
int (*start_IO) (struct dasd_ccw_req *);
|
||||
int (*term_IO) (struct dasd_ccw_req *);
|
||||
void (*handle_terminated_request) (struct dasd_ccw_req *);
|
||||
int (*format_device) (struct dasd_device *,
|
||||
struct format_data_t *, int enable_pav);
|
||||
int (*free_cp) (struct dasd_ccw_req *, struct request *);
|
||||
|
||||
/*
|
||||
* Error recovery functions. examine_error() returns a value that
|
||||
* indicates what to do for an error condition. If examine_error()
|
||||
* returns 'dasd_era_recover' erp_action() is called to create a
|
||||
* special error recovery ccw. erp_postaction() is called after
|
||||
* an error recovery ccw has finished its execution. dump_sense
|
||||
* is called for every error condition to print the sense data
|
||||
* to the console.
|
||||
*/
|
||||
dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
|
||||
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
|
||||
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
|
||||
struct irb *);
|
||||
void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
|
||||
void (*check_for_device_change) (struct dasd_device *,
|
||||
struct dasd_ccw_req *,
|
||||
struct irb *);
|
||||
|
||||
/* i/o control functions. */
|
||||
int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
|
||||
int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
|
||||
int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
|
||||
|
||||
/* suspend/resume functions */
|
||||
int (*freeze) (struct dasd_device *);
|
||||
int (*restore) (struct dasd_device *);
|
||||
|
||||
/* reload device after state change */
|
||||
int (*reload) (struct dasd_device *);
|
||||
|
||||
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
|
||||
void (*kick_validate) (struct dasd_device *);
|
||||
int (*check_attention)(struct dasd_device *, __u8);
|
||||
};
|
||||
|
||||
extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
||||
|
||||
/*
|
||||
* Notification numbers for extended error reporting notifications:
|
||||
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
|
||||
* eer pointer) is freed. The error reporting module needs to do all necessary
|
||||
* cleanup steps.
|
||||
* The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
|
||||
*/
|
||||
#define DASD_EER_DISABLE 0
|
||||
#define DASD_EER_TRIGGER 1
|
||||
|
||||
/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
|
||||
#define DASD_EER_FATALERROR 1
|
||||
#define DASD_EER_NOPATH 2
|
||||
#define DASD_EER_STATECHANGE 3
|
||||
#define DASD_EER_PPRCSUSPEND 4
|
||||
|
||||
struct dasd_path {
|
||||
__u8 opm;
|
||||
__u8 tbvpm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
/* paths that are not used because of a special condition */
|
||||
__u8 cablepm; /* miss-cabled */
|
||||
__u8 hpfpm; /* the HPF requirements of the other paths are not met */
|
||||
__u8 cuirpm; /* CUIR varied offline */
|
||||
};
|
||||
|
||||
struct dasd_profile_info {
|
||||
/* legacy part of profile data, as in dasd_profile_info_t */
|
||||
unsigned int dasd_io_reqs; /* number of requests processed */
|
||||
unsigned int dasd_io_sects; /* number of sectors processed */
|
||||
unsigned int dasd_io_secs[32]; /* histogram of request's sizes */
|
||||
unsigned int dasd_io_times[32]; /* histogram of requests's times */
|
||||
unsigned int dasd_io_timps[32]; /* h. of requests's times per sector */
|
||||
unsigned int dasd_io_time1[32]; /* hist. of time from build to start */
|
||||
unsigned int dasd_io_time2[32]; /* hist. of time from start to irq */
|
||||
unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
|
||||
unsigned int dasd_io_time3[32]; /* hist. of time from irq to end */
|
||||
unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
|
||||
|
||||
/* new data */
|
||||
struct timespec starttod; /* time of start or last reset */
|
||||
unsigned int dasd_io_alias; /* requests using an alias */
|
||||
unsigned int dasd_io_tpm; /* requests using transport mode */
|
||||
unsigned int dasd_read_reqs; /* total number of read requests */
|
||||
unsigned int dasd_read_sects; /* total number read sectors */
|
||||
unsigned int dasd_read_alias; /* read request using an alias */
|
||||
unsigned int dasd_read_tpm; /* read requests in transport mode */
|
||||
unsigned int dasd_read_secs[32]; /* histogram of request's sizes */
|
||||
unsigned int dasd_read_times[32]; /* histogram of requests's times */
|
||||
unsigned int dasd_read_time1[32]; /* hist. time from build to start */
|
||||
unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */
|
||||
unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */
|
||||
unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
|
||||
};
|
||||
|
||||
struct dasd_profile {
|
||||
struct dentry *dentry;
|
||||
struct dasd_profile_info *data;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct dasd_device {
|
||||
/* Block device stuff. */
|
||||
struct dasd_block *block;
|
||||
|
||||
unsigned int devindex;
|
||||
unsigned long flags; /* per device flags */
|
||||
unsigned short features; /* copy of devmap-features (read-only!) */
|
||||
|
||||
/* extended error reporting stuff (eer) */
|
||||
struct dasd_ccw_req *eer_cqr;
|
||||
|
||||
/* Device discipline stuff. */
|
||||
struct dasd_discipline *discipline;
|
||||
struct dasd_discipline *base_discipline;
|
||||
char *private;
|
||||
struct dasd_path path_data;
|
||||
|
||||
/* Device state and target state. */
|
||||
int state, target;
|
||||
struct mutex state_mutex;
|
||||
int stopped; /* device (ccw_device_start) was stopped */
|
||||
|
||||
/* reference count. */
|
||||
atomic_t ref_count;
|
||||
|
||||
/* ccw queue and memory for static ccw/erp buffers. */
|
||||
struct list_head ccw_queue;
|
||||
spinlock_t mem_lock;
|
||||
void *ccw_mem;
|
||||
void *erp_mem;
|
||||
struct list_head ccw_chunks;
|
||||
struct list_head erp_chunks;
|
||||
|
||||
atomic_t tasklet_scheduled;
|
||||
struct tasklet_struct tasklet;
|
||||
struct work_struct kick_work;
|
||||
struct work_struct restore_device;
|
||||
struct work_struct reload_device;
|
||||
struct work_struct kick_validate;
|
||||
struct timer_list timer;
|
||||
|
||||
debug_info_t *debug_area;
|
||||
|
||||
struct ccw_device *cdev;
|
||||
|
||||
/* hook for alias management */
|
||||
struct list_head alias_list;
|
||||
|
||||
/* default expiration time in s */
|
||||
unsigned long default_expires;
|
||||
unsigned long default_retries;
|
||||
|
||||
unsigned long blk_timeout;
|
||||
|
||||
struct dentry *debugfs_dentry;
|
||||
struct dasd_profile profile;
|
||||
};
|
||||
|
||||
struct dasd_block {
|
||||
/* Block device stuff. */
|
||||
struct gendisk *gdp;
|
||||
struct request_queue *request_queue;
|
||||
spinlock_t request_queue_lock;
|
||||
struct block_device *bdev;
|
||||
atomic_t open_count;
|
||||
|
||||
unsigned long long blocks; /* size of volume in blocks */
|
||||
unsigned int bp_block; /* bytes per block */
|
||||
unsigned int s2b_shift; /* log2 (bp_block/512) */
|
||||
|
||||
struct dasd_device *base;
|
||||
struct list_head ccw_queue;
|
||||
spinlock_t queue_lock;
|
||||
|
||||
atomic_t tasklet_scheduled;
|
||||
struct tasklet_struct tasklet;
|
||||
struct timer_list timer;
|
||||
|
||||
struct dentry *debugfs_dentry;
|
||||
struct dasd_profile profile;
|
||||
};
|
||||
|
||||
struct dasd_attention_data {
|
||||
struct dasd_device *device;
|
||||
__u8 lpum;
|
||||
};
|
||||
|
||||
/* reasons why device (ccw_device_start) was stopped */
|
||||
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
|
||||
#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
|
||||
#define DASD_STOPPED_PENDING 4 /* long busy */
|
||||
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
|
||||
#define DASD_STOPPED_SU 16 /* summary unit check handling */
|
||||
#define DASD_STOPPED_PM 32 /* pm state transition */
|
||||
#define DASD_UNRESUMED_PM 64 /* pm resume failed state */
|
||||
|
||||
/* per device flags */
|
||||
#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
|
||||
#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
|
||||
#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
|
||||
#define DASD_FLAG_DEVICE_RO 6 /* The device itself is read-only. Don't
|
||||
* confuse this with the user specified
|
||||
* read-only feature.
|
||||
*/
|
||||
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
|
||||
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
|
||||
#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
|
||||
#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
|
||||
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
|
||||
#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
|
||||
|
||||
#define DASD_SLEEPON_START_TAG ((void *) 1)
|
||||
#define DASD_SLEEPON_END_TAG ((void *) 2)
|
||||
|
||||
void dasd_put_device_wake(struct dasd_device *);
|
||||
|
||||
/*
|
||||
* Reference count inliners
|
||||
*/
|
||||
static inline void
|
||||
dasd_get_device(struct dasd_device *device)
|
||||
{
|
||||
atomic_inc(&device->ref_count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dasd_put_device(struct dasd_device *device)
|
||||
{
|
||||
if (atomic_dec_return(&device->ref_count) == 0)
|
||||
dasd_put_device_wake(device);
|
||||
}
|
||||
|
||||
/*
|
||||
* The static memory in ccw_mem and erp_mem is managed by a sorted
|
||||
* list of free memory chunks.
|
||||
*/
|
||||
struct dasd_mchunk
|
||||
{
|
||||
struct list_head list;
|
||||
unsigned long size;
|
||||
} __attribute__ ((aligned(8)));
|
||||
|
||||
static inline void
|
||||
dasd_init_chunklist(struct list_head *chunk_list, void *mem,
|
||||
unsigned long size)
|
||||
{
|
||||
struct dasd_mchunk *chunk;
|
||||
|
||||
INIT_LIST_HEAD(chunk_list);
|
||||
chunk = (struct dasd_mchunk *) mem;
|
||||
chunk->size = size - sizeof(struct dasd_mchunk);
|
||||
list_add(&chunk->list, chunk_list);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
|
||||
{
|
||||
struct dasd_mchunk *chunk, *tmp;
|
||||
|
||||
size = (size + 7L) & -8L;
|
||||
list_for_each_entry(chunk, chunk_list, list) {
|
||||
if (chunk->size < size)
|
||||
continue;
|
||||
if (chunk->size > size + sizeof(struct dasd_mchunk)) {
|
||||
char *endaddr = (char *) (chunk + 1) + chunk->size;
|
||||
tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
|
||||
tmp->size = size;
|
||||
chunk->size -= size + sizeof(struct dasd_mchunk);
|
||||
chunk = tmp;
|
||||
} else
|
||||
list_del(&chunk->list);
|
||||
return (void *) (chunk + 1);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dasd_free_chunk(struct list_head *chunk_list, void *mem)
|
||||
{
|
||||
struct dasd_mchunk *chunk, *tmp;
|
||||
struct list_head *p, *left;
|
||||
|
||||
chunk = (struct dasd_mchunk *)
|
||||
((char *) mem - sizeof(struct dasd_mchunk));
|
||||
/* Find out the left neighbour in chunk_list. */
|
||||
left = chunk_list;
|
||||
list_for_each(p, chunk_list) {
|
||||
if (list_entry(p, struct dasd_mchunk, list) > chunk)
|
||||
break;
|
||||
left = p;
|
||||
}
|
||||
/* Try to merge with right neighbour = next element from left. */
|
||||
if (left->next != chunk_list) {
|
||||
tmp = list_entry(left->next, struct dasd_mchunk, list);
|
||||
if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
|
||||
list_del(&tmp->list);
|
||||
chunk->size += tmp->size + sizeof(struct dasd_mchunk);
|
||||
}
|
||||
}
|
||||
/* Try to merge with left neighbour. */
|
||||
if (left != chunk_list) {
|
||||
tmp = list_entry(left, struct dasd_mchunk, list);
|
||||
if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
|
||||
tmp->size += chunk->size + sizeof(struct dasd_mchunk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
__list_add(&chunk->list, left, left->next);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if bsize is in { 512, 1024, 2048, 4096 }
|
||||
*/
|
||||
static inline int
|
||||
dasd_check_blocksize(int bsize)
|
||||
{
|
||||
if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
|
||||
return -EMEDIUMTYPE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* externals in dasd.c */
|
||||
#define DASD_PROFILE_OFF 0
|
||||
#define DASD_PROFILE_ON 1
|
||||
#define DASD_PROFILE_GLOBAL_ONLY 2
|
||||
|
||||
extern debug_info_t *dasd_debug_area;
|
||||
extern struct dasd_profile_info dasd_global_profile_data;
|
||||
extern unsigned int dasd_global_profile_level;
|
||||
extern const struct block_device_operations dasd_device_operations;
|
||||
|
||||
extern struct kmem_cache *dasd_page_cache;
|
||||
|
||||
struct dasd_ccw_req *
|
||||
dasd_kmalloc_request(int , int, int, struct dasd_device *);
|
||||
struct dasd_ccw_req *
|
||||
dasd_smalloc_request(int , int, int, struct dasd_device *);
|
||||
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
|
||||
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
|
||||
void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
|
||||
|
||||
static inline int
|
||||
dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
|
||||
{
|
||||
return set_normalized_cda(ccw, cda);
|
||||
}
|
||||
|
||||
struct dasd_device *dasd_alloc_device(void);
|
||||
void dasd_free_device(struct dasd_device *);
|
||||
|
||||
struct dasd_block *dasd_alloc_block(void);
|
||||
void dasd_free_block(struct dasd_block *);
|
||||
|
||||
enum blk_eh_timer_return dasd_times_out(struct request *req);
|
||||
|
||||
void dasd_enable_device(struct dasd_device *);
|
||||
void dasd_set_target_state(struct dasd_device *, int);
|
||||
void dasd_kick_device(struct dasd_device *);
|
||||
void dasd_restore_device(struct dasd_device *);
|
||||
void dasd_reload_device(struct dasd_device *);
|
||||
|
||||
void dasd_add_request_head(struct dasd_ccw_req *);
|
||||
void dasd_add_request_tail(struct dasd_ccw_req *);
|
||||
int dasd_start_IO(struct dasd_ccw_req *);
|
||||
int dasd_term_IO(struct dasd_ccw_req *);
|
||||
void dasd_schedule_device_bh(struct dasd_device *);
|
||||
void dasd_schedule_block_bh(struct dasd_block *);
|
||||
int dasd_sleep_on(struct dasd_ccw_req *);
|
||||
int dasd_sleep_on_queue(struct list_head *);
|
||||
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
|
||||
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
|
||||
void dasd_device_set_timer(struct dasd_device *, int);
|
||||
void dasd_device_clear_timer(struct dasd_device *);
|
||||
void dasd_block_set_timer(struct dasd_block *, int);
|
||||
void dasd_block_clear_timer(struct dasd_block *);
|
||||
int dasd_cancel_req(struct dasd_ccw_req *);
|
||||
int dasd_flush_device_queue(struct dasd_device *);
|
||||
int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
|
||||
void dasd_generic_remove (struct ccw_device *cdev);
|
||||
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
|
||||
int dasd_generic_set_offline (struct ccw_device *cdev);
|
||||
int dasd_generic_notify(struct ccw_device *, int);
|
||||
int dasd_generic_last_path_gone(struct dasd_device *);
|
||||
int dasd_generic_path_operational(struct dasd_device *);
|
||||
void dasd_generic_shutdown(struct ccw_device *);
|
||||
|
||||
void dasd_generic_handle_state_change(struct dasd_device *);
|
||||
int dasd_generic_pm_freeze(struct ccw_device *);
|
||||
int dasd_generic_restore_device(struct ccw_device *);
|
||||
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
|
||||
void dasd_generic_path_event(struct ccw_device *, int *);
|
||||
int dasd_generic_verify_path(struct dasd_device *, __u8);
|
||||
|
||||
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
|
||||
char *dasd_get_sense(struct irb *);
|
||||
|
||||
void dasd_device_set_stop_bits(struct dasd_device *, int);
|
||||
void dasd_device_remove_stop_bits(struct dasd_device *, int);
|
||||
|
||||
int dasd_device_is_ro(struct dasd_device *);
|
||||
|
||||
void dasd_profile_reset(struct dasd_profile *);
|
||||
int dasd_profile_on(struct dasd_profile *);
|
||||
void dasd_profile_off(struct dasd_profile *);
|
||||
void dasd_global_profile_reset(void);
|
||||
char *dasd_get_user_string(const char __user *, size_t);
|
||||
|
||||
/* externals in dasd_devmap.c */
|
||||
extern int dasd_max_devindex;
|
||||
extern int dasd_probeonly;
|
||||
extern int dasd_autodetect;
|
||||
extern int dasd_nopav;
|
||||
extern int dasd_nofcx;
|
||||
|
||||
int dasd_devmap_init(void);
|
||||
void dasd_devmap_exit(void);
|
||||
|
||||
struct dasd_device *dasd_create_device(struct ccw_device *);
|
||||
void dasd_delete_device(struct dasd_device *);
|
||||
|
||||
int dasd_get_feature(struct ccw_device *, int);
|
||||
int dasd_set_feature(struct ccw_device *, int, int);
|
||||
|
||||
int dasd_add_sysfs_files(struct ccw_device *);
|
||||
void dasd_remove_sysfs_files(struct ccw_device *);
|
||||
|
||||
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
|
||||
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
|
||||
struct dasd_device *dasd_device_from_devindex(int);
|
||||
|
||||
void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
|
||||
struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
|
||||
|
||||
int dasd_parse(void);
|
||||
int dasd_busid_known(const char *);
|
||||
|
||||
/* externals in dasd_gendisk.c */
|
||||
int dasd_gendisk_init(void);
|
||||
void dasd_gendisk_exit(void);
|
||||
int dasd_gendisk_alloc(struct dasd_block *);
|
||||
void dasd_gendisk_free(struct dasd_block *);
|
||||
int dasd_scan_partitions(struct dasd_block *);
|
||||
void dasd_destroy_partitions(struct dasd_block *);
|
||||
|
||||
/* externals in dasd_ioctl.c */
|
||||
int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
|
||||
|
||||
/* externals in dasd_proc.c */
|
||||
int dasd_proc_init(void);
|
||||
void dasd_proc_exit(void);
|
||||
|
||||
/* externals in dasd_erp.c */
|
||||
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
|
||||
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
|
||||
struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
|
||||
struct dasd_device *);
|
||||
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
|
||||
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
|
||||
void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
|
||||
|
||||
/* externals in dasd_3990_erp.c */
|
||||
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
|
||||
void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
|
||||
|
||||
/* externals in dasd_eer.c */
|
||||
#ifdef CONFIG_DASD_EER
|
||||
int dasd_eer_init(void);
|
||||
void dasd_eer_exit(void);
|
||||
int dasd_eer_enable(struct dasd_device *);
|
||||
void dasd_eer_disable(struct dasd_device *);
|
||||
void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
|
||||
unsigned int id);
|
||||
void dasd_eer_snss(struct dasd_device *);
|
||||
|
||||
static inline int dasd_eer_enabled(struct dasd_device *device)
|
||||
{
|
||||
return device->eer_cqr != NULL;
|
||||
}
|
||||
#else
|
||||
#define dasd_eer_init() (0)
|
||||
#define dasd_eer_exit() do { } while (0)
|
||||
#define dasd_eer_enable(d) (0)
|
||||
#define dasd_eer_disable(d) do { } while (0)
|
||||
#define dasd_eer_write(d,c,i) do { } while (0)
|
||||
#define dasd_eer_snss(d) do { } while (0)
|
||||
#define dasd_eer_enabled(d) (0)
|
||||
#endif /* CONFIG_DASD_ERR */
|
||||
|
||||
#endif /* DASD_H */
|
579
drivers/s390/block/dasd_ioctl.c
Normal file
579
drivers/s390/block/dasd_ioctl.c
Normal file
|
@ -0,0 +1,579 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Carsten Otte <Cotte@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Copyright IBM Corp. 1999, 2001
|
||||
*
|
||||
* i/o controls for the dasd driver.
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/schid.h>
|
||||
#include <asm/cmb.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* This is ugly... */
|
||||
#define PRINTK_HEADER "dasd_ioctl:"
|
||||
|
||||
#include "dasd_int.h"
|
||||
|
||||
|
||||
static int
|
||||
dasd_ioctl_api_version(void __user *argp)
|
||||
{
|
||||
int ver = DASD_API_VERSION;
|
||||
return put_user(ver, (int __user *)argp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable device.
|
||||
* used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
|
||||
*/
|
||||
static int
|
||||
dasd_ioctl_enable(struct block_device *bdev)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
|
||||
dasd_enable_device(base);
|
||||
/* Formatting the dasd device can change the capacity. */
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
i_size_write(bdev->bd_inode,
|
||||
(loff_t)get_capacity(base->block->gdp) << 9);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
dasd_put_device(base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable device.
|
||||
* Used by dasdfmt. Disable I/O operations but allow ioctls.
|
||||
*/
|
||||
static int
|
||||
dasd_ioctl_disable(struct block_device *bdev)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
/*
|
||||
* Man this is sick. We don't do a real disable but only downgrade
|
||||
* the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
|
||||
* BIODASDDISABLE to disable accesses to the device via the block
|
||||
* device layer but it still wants to do i/o on the device by
|
||||
* using the BIODASDFMT ioctl. Therefore the correct state for the
|
||||
* device is DASD_STATE_BASIC that allows to do basic i/o.
|
||||
*/
|
||||
dasd_set_target_state(base, DASD_STATE_BASIC);
|
||||
/*
|
||||
* Set i_size to zero, since read, write, etc. check against this
|
||||
* value.
|
||||
*/
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
i_size_write(bdev->bd_inode, 0);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
dasd_put_device(base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Quiesce device.
|
||||
*/
|
||||
static int dasd_ioctl_quiesce(struct dasd_block *block)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dasd_device *base;
|
||||
|
||||
base = block->base;
|
||||
if (!capable (CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
pr_info("%s: The DASD has been put in the quiesce "
|
||||
"state\n", dev_name(&base->cdev->dev));
|
||||
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
|
||||
dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Resume device.
|
||||
*/
|
||||
static int dasd_ioctl_resume(struct dasd_block *block)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dasd_device *base;
|
||||
|
||||
base = block->base;
|
||||
if (!capable (CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
pr_info("%s: I/O operations have been resumed "
|
||||
"on the DASD\n", dev_name(&base->cdev->dev));
|
||||
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
|
||||
dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
|
||||
|
||||
dasd_schedule_block_bh(block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort all failfast I/O on a device.
|
||||
*/
|
||||
static int dasd_ioctl_abortio(struct dasd_block *block)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dasd_device *base;
|
||||
struct dasd_ccw_req *cqr, *n;
|
||||
|
||||
base = block->base;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
|
||||
return 0;
|
||||
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
|
||||
|
||||
spin_lock_irqsave(&block->request_queue_lock, flags);
|
||||
spin_lock(&block->queue_lock);
|
||||
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
|
||||
if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
|
||||
cqr->callback_data &&
|
||||
cqr->callback_data != DASD_SLEEPON_START_TAG &&
|
||||
cqr->callback_data != DASD_SLEEPON_END_TAG) {
|
||||
spin_unlock(&block->queue_lock);
|
||||
blk_abort_request(cqr->callback_data);
|
||||
spin_lock(&block->queue_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&block->queue_lock);
|
||||
spin_unlock_irqrestore(&block->request_queue_lock, flags);
|
||||
|
||||
dasd_schedule_block_bh(block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow I/O on a device
|
||||
*/
|
||||
static int dasd_ioctl_allowio(struct dasd_block *block)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
|
||||
base = block->base;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
|
||||
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* performs formatting of _device_ according to _fdata_
|
||||
* Note: The discipline's format_function is assumed to deliver formatting
|
||||
* commands to format multiple units of the device. In terms of the ECKD
|
||||
* devices this means CCWs are generated to format multiple tracks.
|
||||
*/
|
||||
static int
|
||||
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
int enable_pav = 1;
|
||||
int rc, retries;
|
||||
int start, stop;
|
||||
|
||||
base = block->base;
|
||||
if (base->discipline->format_device == NULL)
|
||||
return -EPERM;
|
||||
|
||||
if (base->state != DASD_STATE_BASIC) {
|
||||
pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
|
||||
dev_name(&base->cdev->dev));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
DBF_DEV_EVENT(DBF_NOTICE, base,
|
||||
"formatting units %u to %u (%u B blocks) flags %u",
|
||||
fdata->start_unit,
|
||||
fdata->stop_unit, fdata->blksize, fdata->intensity);
|
||||
|
||||
/* Since dasdfmt keeps the device open after it was disabled,
|
||||
* there still exists an inode for this device.
|
||||
* We must update i_blkbits, otherwise we might get errors when
|
||||
* enabling the device later.
|
||||
*/
|
||||
if (fdata->start_unit == 0) {
|
||||
struct block_device *bdev = bdget_disk(block->gdp, 0);
|
||||
bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
|
||||
bdput(bdev);
|
||||
}
|
||||
|
||||
retries = 255;
|
||||
/* backup start- and endtrack for retries */
|
||||
start = fdata->start_unit;
|
||||
stop = fdata->stop_unit;
|
||||
do {
|
||||
rc = base->discipline->format_device(base, fdata, enable_pav);
|
||||
if (rc) {
|
||||
if (rc == -EAGAIN) {
|
||||
retries--;
|
||||
/* disable PAV in case of errors */
|
||||
enable_pav = 0;
|
||||
fdata->start_unit = start;
|
||||
fdata->stop_unit = stop;
|
||||
} else
|
||||
return rc;
|
||||
} else
|
||||
/* success */
|
||||
break;
|
||||
} while (retries);
|
||||
|
||||
if (!retries)
|
||||
return -EIO;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Format device.
|
||||
*/
|
||||
static int
|
||||
dasd_ioctl_format(struct block_device *bdev, void __user *argp)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
struct format_data_t fdata;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
if (base->features & DASD_FEATURE_READONLY ||
|
||||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
|
||||
dasd_put_device(base);
|
||||
return -EROFS;
|
||||
}
|
||||
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
|
||||
dasd_put_device(base);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (bdev != bdev->bd_contains) {
|
||||
pr_warning("%s: The specified DASD is a partition and cannot "
|
||||
"be formatted\n",
|
||||
dev_name(&base->cdev->dev));
|
||||
dasd_put_device(base);
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = dasd_format(base->block, &fdata);
|
||||
dasd_put_device(base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DASD_PROFILE
|
||||
/*
|
||||
* Reset device profile information
|
||||
*/
|
||||
static int dasd_ioctl_reset_profile(struct dasd_block *block)
|
||||
{
|
||||
dasd_profile_reset(&block->profile);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return device profile information
|
||||
*/
|
||||
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
|
||||
{
|
||||
struct dasd_profile_info_t *data;
|
||||
int rc = 0;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&block->profile.lock);
|
||||
if (block->profile.data) {
|
||||
data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
|
||||
data->dasd_io_sects = block->profile.data->dasd_io_sects;
|
||||
memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
|
||||
sizeof(data->dasd_io_secs));
|
||||
memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
|
||||
sizeof(data->dasd_io_times));
|
||||
memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
|
||||
sizeof(data->dasd_io_timps));
|
||||
memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
|
||||
sizeof(data->dasd_io_time1));
|
||||
memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
|
||||
sizeof(data->dasd_io_time2));
|
||||
memcpy(data->dasd_io_time2ps,
|
||||
block->profile.data->dasd_io_time2ps,
|
||||
sizeof(data->dasd_io_time2ps));
|
||||
memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
|
||||
sizeof(data->dasd_io_time3));
|
||||
memcpy(data->dasd_io_nr_req,
|
||||
block->profile.data->dasd_io_nr_req,
|
||||
sizeof(data->dasd_io_nr_req));
|
||||
spin_unlock_bh(&block->profile.lock);
|
||||
} else {
|
||||
spin_unlock_bh(&block->profile.lock);
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (copy_to_user(argp, data, sizeof(*data)))
|
||||
rc = -EFAULT;
|
||||
out:
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
#else
|
||||
static int dasd_ioctl_reset_profile(struct dasd_block *block)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
|
||||
*/
|
||||
static int dasd_ioctl_information(struct dasd_block *block,
|
||||
unsigned int cmd, void __user *argp)
|
||||
{
|
||||
struct dasd_information2_t *dasd_info;
|
||||
struct subchannel_id sch_id;
|
||||
struct ccw_dev_id dev_id;
|
||||
struct dasd_device *base;
|
||||
struct ccw_device *cdev;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
base = block->base;
|
||||
if (!base->discipline || !base->discipline->fill_info)
|
||||
return -EINVAL;
|
||||
|
||||
dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
|
||||
if (dasd_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = base->discipline->fill_info(base, dasd_info);
|
||||
if (rc) {
|
||||
kfree(dasd_info);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cdev = base->cdev;
|
||||
ccw_device_get_id(cdev, &dev_id);
|
||||
ccw_device_get_schid(cdev, &sch_id);
|
||||
|
||||
dasd_info->devno = dev_id.devno;
|
||||
dasd_info->schid = sch_id.sch_no;
|
||||
dasd_info->cu_type = cdev->id.cu_type;
|
||||
dasd_info->cu_model = cdev->id.cu_model;
|
||||
dasd_info->dev_type = cdev->id.dev_type;
|
||||
dasd_info->dev_model = cdev->id.dev_model;
|
||||
dasd_info->status = base->state;
|
||||
/*
|
||||
* The open_count is increased for every opener, that includes
|
||||
* the blkdev_get in dasd_scan_partitions.
|
||||
* This must be hidden from user-space.
|
||||
*/
|
||||
dasd_info->open_count = atomic_read(&block->open_count);
|
||||
if (!block->bdev)
|
||||
dasd_info->open_count++;
|
||||
|
||||
/*
|
||||
* check if device is really formatted
|
||||
* LDL / CDL was returned by 'fill_info'
|
||||
*/
|
||||
if ((base->state < DASD_STATE_READY) ||
|
||||
(dasd_check_blocksize(block->bp_block)))
|
||||
dasd_info->format = DASD_FORMAT_NONE;
|
||||
|
||||
dasd_info->features |=
|
||||
((base->features & DASD_FEATURE_READONLY) != 0);
|
||||
|
||||
memcpy(dasd_info->type, base->discipline->name, 4);
|
||||
|
||||
if (block->request_queue->request_fn) {
|
||||
struct list_head *l;
|
||||
#ifdef DASD_EXTENDED_PROFILING
|
||||
{
|
||||
struct list_head *l;
|
||||
spin_lock_irqsave(&block->lock, flags);
|
||||
list_for_each(l, &block->request_queue->queue_head)
|
||||
dasd_info->req_queue_len++;
|
||||
spin_unlock_irqrestore(&block->lock, flags);
|
||||
}
|
||||
#endif /* DASD_EXTENDED_PROFILING */
|
||||
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
|
||||
list_for_each(l, &base->ccw_queue)
|
||||
dasd_info->chanq_len++;
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
|
||||
flags);
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
if (copy_to_user(argp, dasd_info,
|
||||
((cmd == (unsigned int) BIODASDINFO2) ?
|
||||
sizeof(struct dasd_information2_t) :
|
||||
sizeof(struct dasd_information_t))))
|
||||
rc = -EFAULT;
|
||||
kfree(dasd_info);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set read only
|
||||
*/
|
||||
static int
|
||||
dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
int intval, rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (bdev != bdev->bd_contains)
|
||||
// ro setting is not allowed for partitions
|
||||
return -EINVAL;
|
||||
if (get_user(intval, (int __user *)argp))
|
||||
return -EFAULT;
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
|
||||
dasd_put_device(base);
|
||||
return -EROFS;
|
||||
}
|
||||
set_disk_ro(bdev->bd_disk, intval);
|
||||
rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
|
||||
dasd_put_device(base);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
|
||||
struct cmbdata __user *argp)
|
||||
{
|
||||
size_t size = _IOC_SIZE(cmd);
|
||||
struct cmbdata data;
|
||||
int ret;
|
||||
|
||||
ret = cmf_readall(block->base->cdev, &data);
|
||||
if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dasd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct dasd_block *block;
|
||||
struct dasd_device *base;
|
||||
void __user *argp;
|
||||
int rc;
|
||||
|
||||
if (is_compat_task())
|
||||
argp = compat_ptr(arg);
|
||||
else
|
||||
argp = (void __user *)arg;
|
||||
|
||||
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
|
||||
PRINT_DEBUG("empty data ptr");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
block = base->block;
|
||||
rc = 0;
|
||||
switch (cmd) {
|
||||
case BIODASDDISABLE:
|
||||
rc = dasd_ioctl_disable(bdev);
|
||||
break;
|
||||
case BIODASDENABLE:
|
||||
rc = dasd_ioctl_enable(bdev);
|
||||
break;
|
||||
case BIODASDQUIESCE:
|
||||
rc = dasd_ioctl_quiesce(block);
|
||||
break;
|
||||
case BIODASDRESUME:
|
||||
rc = dasd_ioctl_resume(block);
|
||||
break;
|
||||
case BIODASDABORTIO:
|
||||
rc = dasd_ioctl_abortio(block);
|
||||
break;
|
||||
case BIODASDALLOWIO:
|
||||
rc = dasd_ioctl_allowio(block);
|
||||
break;
|
||||
case BIODASDFMT:
|
||||
rc = dasd_ioctl_format(bdev, argp);
|
||||
break;
|
||||
case BIODASDINFO:
|
||||
rc = dasd_ioctl_information(block, cmd, argp);
|
||||
break;
|
||||
case BIODASDINFO2:
|
||||
rc = dasd_ioctl_information(block, cmd, argp);
|
||||
break;
|
||||
case BIODASDPRRD:
|
||||
rc = dasd_ioctl_read_profile(block, argp);
|
||||
break;
|
||||
case BIODASDPRRST:
|
||||
rc = dasd_ioctl_reset_profile(block);
|
||||
break;
|
||||
case BLKROSET:
|
||||
rc = dasd_ioctl_set_ro(bdev, argp);
|
||||
break;
|
||||
case DASDAPIVER:
|
||||
rc = dasd_ioctl_api_version(argp);
|
||||
break;
|
||||
case BIODASDCMFENABLE:
|
||||
rc = enable_cmf(base->cdev);
|
||||
break;
|
||||
case BIODASDCMFDISABLE:
|
||||
rc = disable_cmf(base->cdev);
|
||||
break;
|
||||
case BIODASDREADALLCMB:
|
||||
rc = dasd_ioctl_readall_cmb(block, cmd, argp);
|
||||
break;
|
||||
default:
|
||||
/* if the discipline has an ioctl method try it. */
|
||||
rc = -ENOTTY;
|
||||
if (base->discipline->ioctl)
|
||||
rc = base->discipline->ioctl(block, cmd, argp);
|
||||
}
|
||||
dasd_put_device(base);
|
||||
return rc;
|
||||
}
|
376
drivers/s390/block/dasd_proc.c
Normal file
376
drivers/s390/block/dasd_proc.c
Normal file
|
@ -0,0 +1,376 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Horst Hummel <Horst.Hummel@de.ibm.com>
|
||||
* Carsten Otte <Cotte@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Coypright IBM Corp. 1999, 2002
|
||||
*
|
||||
* /proc interface for the dasd driver.
|
||||
*
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "dasd"
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* This is ugly... */
|
||||
#define PRINTK_HEADER "dasd_proc:"
|
||||
|
||||
#include "dasd_int.h"
|
||||
|
||||
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
|
||||
static struct proc_dir_entry *dasd_devices_entry = NULL;
|
||||
static struct proc_dir_entry *dasd_statistics_entry = NULL;
|
||||
|
||||
static int
|
||||
dasd_devices_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
struct dasd_block *block;
|
||||
char *substr;
|
||||
|
||||
device = dasd_device_from_devindex((unsigned long) v - 1);
|
||||
if (IS_ERR(device))
|
||||
return 0;
|
||||
if (device->block)
|
||||
block = device->block;
|
||||
else {
|
||||
dasd_put_device(device);
|
||||
return 0;
|
||||
}
|
||||
/* Print device number. */
|
||||
seq_printf(m, "%s", dev_name(&device->cdev->dev));
|
||||
/* Print discipline string. */
|
||||
if (device->discipline != NULL)
|
||||
seq_printf(m, "(%s)", device->discipline->name);
|
||||
else
|
||||
seq_printf(m, "(none)");
|
||||
/* Print kdev. */
|
||||
if (block->gdp)
|
||||
seq_printf(m, " at (%3d:%6d)",
|
||||
MAJOR(disk_devt(block->gdp)),
|
||||
MINOR(disk_devt(block->gdp)));
|
||||
else
|
||||
seq_printf(m, " at (???:??????)");
|
||||
/* Print device name. */
|
||||
if (block->gdp)
|
||||
seq_printf(m, " is %-8s", block->gdp->disk_name);
|
||||
else
|
||||
seq_printf(m, " is ????????");
|
||||
/* Print devices features. */
|
||||
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
|
||||
seq_printf(m, "%4s: ", substr);
|
||||
/* Print device status information. */
|
||||
switch (device->state) {
|
||||
case DASD_STATE_NEW:
|
||||
seq_printf(m, "new");
|
||||
break;
|
||||
case DASD_STATE_KNOWN:
|
||||
seq_printf(m, "detected");
|
||||
break;
|
||||
case DASD_STATE_BASIC:
|
||||
seq_printf(m, "basic");
|
||||
break;
|
||||
case DASD_STATE_UNFMT:
|
||||
seq_printf(m, "unformatted");
|
||||
break;
|
||||
case DASD_STATE_READY:
|
||||
case DASD_STATE_ONLINE:
|
||||
seq_printf(m, "active ");
|
||||
if (dasd_check_blocksize(block->bp_block))
|
||||
seq_printf(m, "n/f ");
|
||||
else
|
||||
seq_printf(m,
|
||||
"at blocksize: %d, %lld blocks, %lld MB",
|
||||
block->bp_block, block->blocks,
|
||||
((block->bp_block >> 9) *
|
||||
block->blocks) >> 11);
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "no stat");
|
||||
break;
|
||||
}
|
||||
dasd_put_device(device);
|
||||
if (dasd_probeonly)
|
||||
seq_printf(m, "(probeonly)");
|
||||
seq_printf(m, "\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos >= dasd_max_devindex)
|
||||
return NULL;
|
||||
return (void *)((unsigned long) *pos + 1);
|
||||
}
|
||||
|
||||
static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
++*pos;
|
||||
return dasd_devices_start(m, pos);
|
||||
}
|
||||
|
||||
static void dasd_devices_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct seq_operations dasd_devices_seq_ops = {
|
||||
.start = dasd_devices_start,
|
||||
.next = dasd_devices_next,
|
||||
.stop = dasd_devices_stop,
|
||||
.show = dasd_devices_show,
|
||||
};
|
||||
|
||||
static int dasd_devices_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &dasd_devices_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations dasd_devices_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dasd_devices_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DASD_PROFILE
|
||||
static int dasd_stats_all_block_on(void)
|
||||
{
|
||||
int i, rc;
|
||||
struct dasd_device *device;
|
||||
|
||||
rc = 0;
|
||||
for (i = 0; i < dasd_max_devindex; ++i) {
|
||||
device = dasd_device_from_devindex(i);
|
||||
if (IS_ERR(device))
|
||||
continue;
|
||||
if (device->block)
|
||||
rc = dasd_profile_on(&device->block->profile);
|
||||
dasd_put_device(device);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dasd_stats_all_block_off(void)
|
||||
{
|
||||
int i;
|
||||
struct dasd_device *device;
|
||||
|
||||
for (i = 0; i < dasd_max_devindex; ++i) {
|
||||
device = dasd_device_from_devindex(i);
|
||||
if (IS_ERR(device))
|
||||
continue;
|
||||
if (device->block)
|
||||
dasd_profile_off(&device->block->profile);
|
||||
dasd_put_device(device);
|
||||
}
|
||||
}
|
||||
|
||||
static void dasd_stats_all_block_reset(void)
|
||||
{
|
||||
int i;
|
||||
struct dasd_device *device;
|
||||
|
||||
for (i = 0; i < dasd_max_devindex; ++i) {
|
||||
device = dasd_device_from_devindex(i);
|
||||
if (IS_ERR(device))
|
||||
continue;
|
||||
if (device->block)
|
||||
dasd_profile_reset(&device->block->profile);
|
||||
dasd_put_device(device);
|
||||
}
|
||||
}
|
||||
|
||||
static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
seq_printf(m, "%7d ", array[i] / factor);
|
||||
if (i == 15)
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
#endif /* CONFIG_DASD_PROFILE */
|
||||
|
||||
static int dasd_stats_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
#ifdef CONFIG_DASD_PROFILE
|
||||
struct dasd_profile_info *prof;
|
||||
int factor;
|
||||
|
||||
/* check for active profiling */
|
||||
if (!dasd_global_profile_level) {
|
||||
seq_printf(m, "Statistics are off - they might be "
|
||||
"switched on using 'echo set on > "
|
||||
"/proc/dasd/statistics'\n");
|
||||
return 0;
|
||||
}
|
||||
prof = &dasd_global_profile_data;
|
||||
|
||||
/* prevent counter 'overflow' on output */
|
||||
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
|
||||
factor *= 10);
|
||||
|
||||
seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
|
||||
seq_printf(m, "with %u sectors(512B each)\n",
|
||||
prof->dasd_io_sects);
|
||||
seq_printf(m, "Scale Factor is %d\n", factor);
|
||||
seq_printf(m,
|
||||
" __<4 ___8 __16 __32 __64 _128 "
|
||||
" _256 _512 __1k __2k __4k __8k "
|
||||
" _16k _32k _64k 128k\n");
|
||||
seq_printf(m,
|
||||
" _256 _512 __1M __2M __4M __8M "
|
||||
" _16M _32M _64M 128M 256M 512M "
|
||||
" __1G __2G __4G " " _>4G\n");
|
||||
|
||||
seq_printf(m, "Histogram of sizes (512B secs)\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_secs, factor);
|
||||
seq_printf(m, "Histogram of I/O times (microseconds)\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_times, factor);
|
||||
seq_printf(m, "Histogram of I/O times per sector\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_timps, factor);
|
||||
seq_printf(m, "Histogram of I/O time till ssch\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_time1, factor);
|
||||
seq_printf(m, "Histogram of I/O time between ssch and irq\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_time2, factor);
|
||||
seq_printf(m, "Histogram of I/O time between ssch "
|
||||
"and irq per sector\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
|
||||
seq_printf(m, "Histogram of I/O time between irq and end\n");
|
||||
dasd_statistics_array(m, prof->dasd_io_time3, factor);
|
||||
seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
|
||||
dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
|
||||
#else
|
||||
seq_printf(m, "Statistics are not activated in this kernel\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_stats_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dasd_stats_proc_show, NULL);
|
||||
}
|
||||
|
||||
static ssize_t dasd_stats_proc_write(struct file *file,
|
||||
const char __user *user_buf, size_t user_len, loff_t *pos)
|
||||
{
|
||||
#ifdef CONFIG_DASD_PROFILE
|
||||
char *buffer, *str;
|
||||
int rc;
|
||||
|
||||
if (user_len > 65536)
|
||||
user_len = 65536;
|
||||
buffer = dasd_get_user_string(user_buf, user_len);
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
|
||||
/* check for valid verbs */
|
||||
str = skip_spaces(buffer);
|
||||
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
|
||||
/* 'set xxx' was given */
|
||||
str = skip_spaces(str + 4);
|
||||
if (strcmp(str, "on") == 0) {
|
||||
/* switch on statistics profiling */
|
||||
rc = dasd_stats_all_block_on();
|
||||
if (rc) {
|
||||
dasd_stats_all_block_off();
|
||||
goto out_error;
|
||||
}
|
||||
dasd_global_profile_reset();
|
||||
dasd_global_profile_level = DASD_PROFILE_ON;
|
||||
pr_info("The statistics feature has been switched "
|
||||
"on\n");
|
||||
} else if (strcmp(str, "off") == 0) {
|
||||
/* switch off and reset statistics profiling */
|
||||
dasd_global_profile_level = DASD_PROFILE_OFF;
|
||||
dasd_global_profile_reset();
|
||||
dasd_stats_all_block_off();
|
||||
pr_info("The statistics feature has been switched "
|
||||
"off\n");
|
||||
} else
|
||||
goto out_parse_error;
|
||||
} else if (strncmp(str, "reset", 5) == 0) {
|
||||
/* reset the statistics */
|
||||
dasd_global_profile_reset();
|
||||
dasd_stats_all_block_reset();
|
||||
pr_info("The statistics have been reset\n");
|
||||
} else
|
||||
goto out_parse_error;
|
||||
vfree(buffer);
|
||||
return user_len;
|
||||
out_parse_error:
|
||||
rc = -EINVAL;
|
||||
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
|
||||
str);
|
||||
out_error:
|
||||
vfree(buffer);
|
||||
return rc;
|
||||
#else
|
||||
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
|
||||
return user_len;
|
||||
#endif /* CONFIG_DASD_PROFILE */
|
||||
}
|
||||
|
||||
static const struct file_operations dasd_stats_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dasd_stats_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = dasd_stats_proc_write,
|
||||
};
|
||||
|
||||
/*
|
||||
* Create dasd proc-fs entries.
|
||||
* In case creation failed, cleanup and return -ENOENT.
|
||||
*/
|
||||
int
|
||||
dasd_proc_init(void)
|
||||
{
|
||||
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
|
||||
if (!dasd_proc_root_entry)
|
||||
goto out_nodasd;
|
||||
dasd_devices_entry = proc_create("devices",
|
||||
S_IFREG | S_IRUGO | S_IWUSR,
|
||||
dasd_proc_root_entry,
|
||||
&dasd_devices_file_ops);
|
||||
if (!dasd_devices_entry)
|
||||
goto out_nodevices;
|
||||
dasd_statistics_entry = proc_create("statistics",
|
||||
S_IFREG | S_IRUGO | S_IWUSR,
|
||||
dasd_proc_root_entry,
|
||||
&dasd_stats_proc_fops);
|
||||
if (!dasd_statistics_entry)
|
||||
goto out_nostatistics;
|
||||
return 0;
|
||||
|
||||
out_nostatistics:
|
||||
remove_proc_entry("devices", dasd_proc_root_entry);
|
||||
out_nodevices:
|
||||
remove_proc_entry("dasd", NULL);
|
||||
out_nodasd:
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
void
|
||||
dasd_proc_exit(void)
|
||||
{
|
||||
remove_proc_entry("devices", dasd_proc_root_entry);
|
||||
remove_proc_entry("statistics", dasd_proc_root_entry);
|
||||
remove_proc_entry("dasd", NULL);
|
||||
}
|
1084
drivers/s390/block/dcssblk.c
Normal file
1084
drivers/s390/block/dcssblk.c
Normal file
File diff suppressed because it is too large
Load diff
494
drivers/s390/block/scm_blk.c
Normal file
494
drivers/s390/block/scm_blk.c
Normal file
|
@ -0,0 +1,494 @@
|
|||
/*
|
||||
* Block driver for s390 storage class memory.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "scm_block"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <asm/eadm.h>
|
||||
#include "scm_blk.h"
|
||||
|
||||
debug_info_t *scm_debug;
|
||||
static int scm_major;
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
static LIST_HEAD(inactive_requests);
|
||||
static unsigned int nr_requests = 64;
|
||||
static atomic_t nr_devices = ATOMIC_INIT(0);
|
||||
module_param(nr_requests, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
|
||||
|
||||
MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("scm:scmdev*");
|
||||
|
||||
static void __scm_free_rq(struct scm_request *scmrq)
|
||||
{
|
||||
struct aob_rq_header *aobrq = to_aobrq(scmrq);
|
||||
|
||||
free_page((unsigned long) scmrq->aob);
|
||||
free_page((unsigned long) scmrq->aidaw);
|
||||
__scm_free_rq_cluster(scmrq);
|
||||
kfree(aobrq);
|
||||
}
|
||||
|
||||
static void scm_free_rqs(void)
|
||||
{
|
||||
struct list_head *iter, *safe;
|
||||
struct scm_request *scmrq;
|
||||
|
||||
spin_lock_irq(&list_lock);
|
||||
list_for_each_safe(iter, safe, &inactive_requests) {
|
||||
scmrq = list_entry(iter, struct scm_request, list);
|
||||
list_del(&scmrq->list);
|
||||
__scm_free_rq(scmrq);
|
||||
}
|
||||
spin_unlock_irq(&list_lock);
|
||||
}
|
||||
|
||||
static int __scm_alloc_rq(void)
|
||||
{
|
||||
struct aob_rq_header *aobrq;
|
||||
struct scm_request *scmrq;
|
||||
|
||||
aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
|
||||
if (!aobrq)
|
||||
return -ENOMEM;
|
||||
|
||||
scmrq = (void *) aobrq->data;
|
||||
scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
|
||||
scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
|
||||
if (!scmrq->aob || !scmrq->aidaw) {
|
||||
__scm_free_rq(scmrq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (__scm_alloc_rq_cluster(scmrq)) {
|
||||
__scm_free_rq(scmrq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&scmrq->list);
|
||||
spin_lock_irq(&list_lock);
|
||||
list_add(&scmrq->list, &inactive_requests);
|
||||
spin_unlock_irq(&list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scm_alloc_rqs(unsigned int nrqs)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
while (nrqs-- && !ret)
|
||||
ret = __scm_alloc_rq();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct scm_request *scm_request_fetch(void)
|
||||
{
|
||||
struct scm_request *scmrq = NULL;
|
||||
|
||||
spin_lock(&list_lock);
|
||||
if (list_empty(&inactive_requests))
|
||||
goto out;
|
||||
scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
|
||||
list_del(&scmrq->list);
|
||||
out:
|
||||
spin_unlock(&list_lock);
|
||||
return scmrq;
|
||||
}
|
||||
|
||||
static void scm_request_done(struct scm_request *scmrq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_add(&scmrq->list, &inactive_requests);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
|
||||
{
|
||||
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
|
||||
}
|
||||
|
||||
static void scm_request_prepare(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
struct scm_device *scmdev = bdev->gendisk->private_data;
|
||||
struct aidaw *aidaw = scmrq->aidaw;
|
||||
struct msb *msb = &scmrq->aob->msb[0];
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
msb->bs = MSB_BS_4K;
|
||||
scmrq->aob->request.msb_count = 1;
|
||||
msb->scm_addr = scmdev->address +
|
||||
((u64) blk_rq_pos(scmrq->request) << 9);
|
||||
msb->oc = (rq_data_dir(scmrq->request) == READ) ?
|
||||
MSB_OC_READ : MSB_OC_WRITE;
|
||||
msb->flags |= MSB_FLAG_IDA;
|
||||
msb->data_addr = (u64) aidaw;
|
||||
|
||||
rq_for_each_segment(bv, scmrq->request, iter) {
|
||||
WARN_ON(bv.bv_offset);
|
||||
msb->blk_count += bv.bv_len >> 12;
|
||||
aidaw->data_addr = (u64) page_address(bv.bv_page);
|
||||
aidaw++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void scm_request_init(struct scm_blk_dev *bdev,
|
||||
struct scm_request *scmrq,
|
||||
struct request *req)
|
||||
{
|
||||
struct aob_rq_header *aobrq = to_aobrq(scmrq);
|
||||
struct aob *aob = scmrq->aob;
|
||||
|
||||
memset(aob, 0, sizeof(*aob));
|
||||
memset(scmrq->aidaw, 0, PAGE_SIZE);
|
||||
aobrq->scmdev = bdev->scmdev;
|
||||
aob->request.cmd_code = ARQB_CMD_MOVE;
|
||||
aob->request.data = (u64) aobrq;
|
||||
scmrq->request = req;
|
||||
scmrq->bdev = bdev;
|
||||
scmrq->retries = 4;
|
||||
scmrq->error = 0;
|
||||
scm_request_cluster_init(scmrq);
|
||||
}
|
||||
|
||||
static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
|
||||
{
|
||||
if (atomic_read(&bdev->queued_reqs)) {
|
||||
/* Queue restart is triggered by the next interrupt. */
|
||||
return;
|
||||
}
|
||||
blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
|
||||
}
|
||||
|
||||
void scm_request_requeue(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
|
||||
scm_release_cluster(scmrq);
|
||||
blk_requeue_request(bdev->rq, scmrq->request);
|
||||
atomic_dec(&bdev->queued_reqs);
|
||||
scm_request_done(scmrq);
|
||||
scm_ensure_queue_restart(bdev);
|
||||
}
|
||||
|
||||
void scm_request_finish(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
|
||||
scm_release_cluster(scmrq);
|
||||
blk_end_request_all(scmrq->request, scmrq->error);
|
||||
atomic_dec(&bdev->queued_reqs);
|
||||
scm_request_done(scmrq);
|
||||
}
|
||||
|
||||
static void scm_blk_request(struct request_queue *rq)
|
||||
{
|
||||
struct scm_device *scmdev = rq->queuedata;
|
||||
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
|
||||
struct scm_request *scmrq;
|
||||
struct request *req;
|
||||
int ret;
|
||||
|
||||
while ((req = blk_peek_request(rq))) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
blk_start_request(req);
|
||||
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
|
||||
blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!scm_permit_request(bdev, req)) {
|
||||
scm_ensure_queue_restart(bdev);
|
||||
return;
|
||||
}
|
||||
scmrq = scm_request_fetch();
|
||||
if (!scmrq) {
|
||||
SCM_LOG(5, "no request");
|
||||
scm_ensure_queue_restart(bdev);
|
||||
return;
|
||||
}
|
||||
scm_request_init(bdev, scmrq, req);
|
||||
if (!scm_reserve_cluster(scmrq)) {
|
||||
SCM_LOG(5, "cluster busy");
|
||||
scm_request_done(scmrq);
|
||||
return;
|
||||
}
|
||||
if (scm_need_cluster_request(scmrq)) {
|
||||
atomic_inc(&bdev->queued_reqs);
|
||||
blk_start_request(req);
|
||||
scm_initiate_cluster_request(scmrq);
|
||||
return;
|
||||
}
|
||||
scm_request_prepare(scmrq);
|
||||
atomic_inc(&bdev->queued_reqs);
|
||||
blk_start_request(req);
|
||||
|
||||
ret = eadm_start_aob(scmrq->aob);
|
||||
if (ret) {
|
||||
SCM_LOG(5, "no subchannel");
|
||||
scm_request_requeue(scmrq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __scmrq_log_error(struct scm_request *scmrq)
|
||||
{
|
||||
struct aob *aob = scmrq->aob;
|
||||
|
||||
if (scmrq->error == -ETIMEDOUT)
|
||||
SCM_LOG(1, "Request timeout");
|
||||
else {
|
||||
SCM_LOG(1, "Request error");
|
||||
SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
|
||||
}
|
||||
if (scmrq->retries)
|
||||
SCM_LOG(1, "Retry request");
|
||||
else
|
||||
pr_err("An I/O operation to SCM failed with rc=%d\n",
|
||||
scmrq->error);
|
||||
}
|
||||
|
||||
void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
|
||||
{
|
||||
struct scm_request *scmrq = data;
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
|
||||
scmrq->error = error;
|
||||
if (error)
|
||||
__scmrq_log_error(scmrq);
|
||||
|
||||
spin_lock(&bdev->lock);
|
||||
list_add_tail(&scmrq->list, &bdev->finished_requests);
|
||||
spin_unlock(&bdev->lock);
|
||||
tasklet_hi_schedule(&bdev->tasklet);
|
||||
}
|
||||
|
||||
static void scm_blk_handle_error(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
unsigned long flags;
|
||||
|
||||
if (scmrq->error != -EIO)
|
||||
goto restart;
|
||||
|
||||
/* For -EIO the response block is valid. */
|
||||
switch (scmrq->aob->response.eqc) {
|
||||
case EQC_WR_PROHIBIT:
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state != SCM_WR_PROHIBIT)
|
||||
pr_info("%lx: Write access to the SCM increment is suspended\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_WR_PROHIBIT;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
goto requeue;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
restart:
|
||||
if (!eadm_start_aob(scmrq->aob))
|
||||
return;
|
||||
|
||||
requeue:
|
||||
spin_lock_irqsave(&bdev->rq_lock, flags);
|
||||
scm_request_requeue(scmrq);
|
||||
spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
||||
}
|
||||
|
||||
static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
||||
{
|
||||
struct scm_request *scmrq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
while (!list_empty(&bdev->finished_requests)) {
|
||||
scmrq = list_first_entry(&bdev->finished_requests,
|
||||
struct scm_request, list);
|
||||
list_del(&scmrq->list);
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
|
||||
if (scmrq->error && scmrq->retries-- > 0) {
|
||||
scm_blk_handle_error(scmrq);
|
||||
|
||||
/* Request restarted or requeued, handle next. */
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (scm_test_cluster_request(scmrq)) {
|
||||
scm_cluster_request_irq(scmrq);
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
scm_request_finish(scmrq);
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
/* Look out for more requests. */
|
||||
blk_run_queue(bdev->rq);
|
||||
}
|
||||
|
||||
static const struct block_device_operations scm_blk_devops = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
||||
{
|
||||
struct request_queue *rq;
|
||||
int len, ret = -ENOMEM;
|
||||
unsigned int devindex, nr_max_blk;
|
||||
|
||||
devindex = atomic_inc_return(&nr_devices) - 1;
|
||||
/* scma..scmz + scmaa..scmzz */
|
||||
if (devindex > 701) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bdev->scmdev = scmdev;
|
||||
bdev->state = SCM_OPER;
|
||||
spin_lock_init(&bdev->rq_lock);
|
||||
spin_lock_init(&bdev->lock);
|
||||
INIT_LIST_HEAD(&bdev->finished_requests);
|
||||
atomic_set(&bdev->queued_reqs, 0);
|
||||
tasklet_init(&bdev->tasklet,
|
||||
(void (*)(unsigned long)) scm_blk_tasklet,
|
||||
(unsigned long) bdev);
|
||||
|
||||
rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
|
||||
if (!rq)
|
||||
goto out;
|
||||
|
||||
bdev->rq = rq;
|
||||
nr_max_blk = min(scmdev->nr_max_block,
|
||||
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
|
||||
|
||||
blk_queue_logical_block_size(rq, 1 << 12);
|
||||
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
|
||||
blk_queue_max_segments(rq, nr_max_blk);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
|
||||
scm_blk_dev_cluster_setup(bdev);
|
||||
|
||||
bdev->gendisk = alloc_disk(SCM_NR_PARTS);
|
||||
if (!bdev->gendisk)
|
||||
goto out_queue;
|
||||
|
||||
rq->queuedata = scmdev;
|
||||
bdev->gendisk->driverfs_dev = &scmdev->dev;
|
||||
bdev->gendisk->private_data = scmdev;
|
||||
bdev->gendisk->fops = &scm_blk_devops;
|
||||
bdev->gendisk->queue = rq;
|
||||
bdev->gendisk->major = scm_major;
|
||||
bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
|
||||
|
||||
len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
|
||||
if (devindex > 25) {
|
||||
len += snprintf(bdev->gendisk->disk_name + len,
|
||||
DISK_NAME_LEN - len, "%c",
|
||||
'a' + (devindex / 26) - 1);
|
||||
devindex = devindex % 26;
|
||||
}
|
||||
snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
|
||||
'a' + devindex);
|
||||
|
||||
/* 512 byte sectors */
|
||||
set_capacity(bdev->gendisk, scmdev->size >> 9);
|
||||
add_disk(bdev->gendisk);
|
||||
return 0;
|
||||
|
||||
out_queue:
|
||||
blk_cleanup_queue(rq);
|
||||
out:
|
||||
atomic_dec(&nr_devices);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
|
||||
{
|
||||
tasklet_kill(&bdev->tasklet);
|
||||
del_gendisk(bdev->gendisk);
|
||||
blk_cleanup_queue(bdev->gendisk->queue);
|
||||
put_disk(bdev->gendisk);
|
||||
}
|
||||
|
||||
void scm_blk_set_available(struct scm_blk_dev *bdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state == SCM_WR_PROHIBIT)
|
||||
pr_info("%lx: Write access to the SCM increment is restored\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_OPER;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
}
|
||||
|
||||
static int __init scm_blk_init(void)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!scm_cluster_size_valid())
|
||||
goto out;
|
||||
|
||||
ret = register_blkdev(0, "scm");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
scm_major = ret;
|
||||
ret = scm_alloc_rqs(nr_requests);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
scm_debug = debug_register("scm_log", 16, 1, 16);
|
||||
if (!scm_debug) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
debug_register_view(scm_debug, &debug_hex_ascii_view);
|
||||
debug_set_level(scm_debug, 2);
|
||||
|
||||
ret = scm_drv_init();
|
||||
if (ret)
|
||||
goto out_dbf;
|
||||
|
||||
return ret;
|
||||
|
||||
out_dbf:
|
||||
debug_unregister(scm_debug);
|
||||
out_free:
|
||||
scm_free_rqs();
|
||||
unregister_blkdev(scm_major, "scm");
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
module_init(scm_blk_init);
|
||||
|
||||
static void __exit scm_blk_cleanup(void)
|
||||
{
|
||||
scm_drv_cleanup();
|
||||
debug_unregister(scm_debug);
|
||||
scm_free_rqs();
|
||||
unregister_blkdev(scm_major, "scm");
|
||||
}
|
||||
module_exit(scm_blk_cleanup);
|
134
drivers/s390/block/scm_blk.h
Normal file
134
drivers/s390/block/scm_blk.h
Normal file
|
@ -0,0 +1,134 @@
|
|||
#ifndef SCM_BLK_H
|
||||
#define SCM_BLK_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/eadm.h>
|
||||
|
||||
#define SCM_NR_PARTS 8
|
||||
#define SCM_QUEUE_DELAY 5
|
||||
|
||||
struct scm_blk_dev {
|
||||
struct tasklet_struct tasklet;
|
||||
struct request_queue *rq;
|
||||
struct gendisk *gendisk;
|
||||
struct scm_device *scmdev;
|
||||
spinlock_t rq_lock; /* guard the request queue */
|
||||
spinlock_t lock; /* guard the rest of the blockdev */
|
||||
atomic_t queued_reqs;
|
||||
enum {SCM_OPER, SCM_WR_PROHIBIT} state;
|
||||
struct list_head finished_requests;
|
||||
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
|
||||
struct list_head cluster_list;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct scm_request {
|
||||
struct scm_blk_dev *bdev;
|
||||
struct request *request;
|
||||
struct aidaw *aidaw;
|
||||
struct aob *aob;
|
||||
struct list_head list;
|
||||
u8 retries;
|
||||
int error;
|
||||
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
|
||||
struct {
|
||||
enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
|
||||
struct list_head list;
|
||||
void **buf;
|
||||
} cluster;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
|
||||
|
||||
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
|
||||
void scm_blk_dev_cleanup(struct scm_blk_dev *);
|
||||
void scm_blk_set_available(struct scm_blk_dev *);
|
||||
void scm_blk_irq(struct scm_device *, void *, int);
|
||||
|
||||
void scm_request_finish(struct scm_request *);
|
||||
void scm_request_requeue(struct scm_request *);
|
||||
|
||||
int scm_drv_init(void);
|
||||
void scm_drv_cleanup(void);
|
||||
|
||||
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
|
||||
void __scm_free_rq_cluster(struct scm_request *);
|
||||
int __scm_alloc_rq_cluster(struct scm_request *);
|
||||
void scm_request_cluster_init(struct scm_request *);
|
||||
bool scm_reserve_cluster(struct scm_request *);
|
||||
void scm_release_cluster(struct scm_request *);
|
||||
void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
|
||||
bool scm_need_cluster_request(struct scm_request *);
|
||||
void scm_initiate_cluster_request(struct scm_request *);
|
||||
void scm_cluster_request_irq(struct scm_request *);
|
||||
bool scm_test_cluster_request(struct scm_request *);
|
||||
bool scm_cluster_size_valid(void);
|
||||
#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
|
||||
static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
|
||||
static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
|
||||
static inline bool scm_reserve_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void scm_release_cluster(struct scm_request *scmrq) {}
|
||||
static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
|
||||
static inline bool scm_need_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
|
||||
static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
|
||||
static inline bool scm_test_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool scm_cluster_size_valid(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
|
||||
|
||||
extern debug_info_t *scm_debug;
|
||||
|
||||
#define SCM_LOG(imp, txt) do { \
|
||||
debug_text_event(scm_debug, imp, txt); \
|
||||
} while (0)
|
||||
|
||||
static inline void SCM_LOG_HEX(int level, void *data, int length)
|
||||
{
|
||||
if (!debug_level_enabled(scm_debug, level))
|
||||
return;
|
||||
while (length > 0) {
|
||||
debug_event(scm_debug, level, data, length);
|
||||
length -= scm_debug->buf_size;
|
||||
data += scm_debug->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
|
||||
{
|
||||
struct {
|
||||
u64 address;
|
||||
u8 oper_state;
|
||||
u8 rank;
|
||||
} __packed data = {
|
||||
.address = scmdev->address,
|
||||
.oper_state = scmdev->attrs.oper_state,
|
||||
.rank = scmdev->attrs.rank,
|
||||
};
|
||||
|
||||
SCM_LOG_HEX(level, &data, sizeof(data));
|
||||
}
|
||||
|
||||
#endif /* SCM_BLK_H */
|
230
drivers/s390/block/scm_blk_cluster.c
Normal file
230
drivers/s390/block/scm_blk_cluster.c
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Block driver for s390 storage class memory.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <asm/eadm.h>
|
||||
#include "scm_blk.h"
|
||||
|
||||
static unsigned int write_cluster_size = 64;
|
||||
module_param(write_cluster_size, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(write_cluster_size,
|
||||
"Number of pages used for contiguous writes.");
|
||||
|
||||
#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
|
||||
|
||||
void __scm_free_rq_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!scmrq->cluster.buf)
|
||||
return;
|
||||
|
||||
for (i = 0; i < 2 * write_cluster_size; i++)
|
||||
free_page((unsigned long) scmrq->cluster.buf[i]);
|
||||
|
||||
kfree(scmrq->cluster.buf);
|
||||
}
|
||||
|
||||
int __scm_alloc_rq_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
int i;
|
||||
|
||||
scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
|
||||
GFP_KERNEL);
|
||||
if (!scmrq->cluster.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < 2 * write_cluster_size; i++) {
|
||||
scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
|
||||
if (!scmrq->cluster.buf[i])
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&scmrq->cluster.list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void scm_request_cluster_init(struct scm_request *scmrq)
|
||||
{
|
||||
scmrq->cluster.state = CLUSTER_NONE;
|
||||
}
|
||||
|
||||
static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
|
||||
{
|
||||
unsigned long firstA, lastA, firstB, lastB;
|
||||
|
||||
firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
|
||||
lastA = (((u64) blk_rq_pos(A->request) << 9) +
|
||||
blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
|
||||
|
||||
firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
|
||||
lastB = (((u64) blk_rq_pos(B->request) << 9) +
|
||||
blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
|
||||
|
||||
return (firstB <= lastA && firstA <= lastB);
|
||||
}
|
||||
|
||||
bool scm_reserve_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
struct scm_request *iter;
|
||||
|
||||
if (write_cluster_size == 0)
|
||||
return true;
|
||||
|
||||
spin_lock(&bdev->lock);
|
||||
list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
|
||||
if (clusters_intersect(scmrq, iter) &&
|
||||
(rq_data_dir(scmrq->request) == WRITE ||
|
||||
rq_data_dir(iter->request) == WRITE)) {
|
||||
spin_unlock(&bdev->lock);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
list_add(&scmrq->cluster.list, &bdev->cluster_list);
|
||||
spin_unlock(&bdev->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void scm_release_cluster(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
unsigned long flags;
|
||||
|
||||
if (write_cluster_size == 0)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
list_del(&scmrq->cluster.list);
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
}
|
||||
|
||||
void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
|
||||
{
|
||||
INIT_LIST_HEAD(&bdev->cluster_list);
|
||||
blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
|
||||
}
|
||||
|
||||
static void scm_prepare_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
struct scm_device *scmdev = bdev->gendisk->private_data;
|
||||
struct request *req = scmrq->request;
|
||||
struct aidaw *aidaw = scmrq->aidaw;
|
||||
struct msb *msb = &scmrq->aob->msb[0];
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
int i = 0;
|
||||
u64 addr;
|
||||
|
||||
switch (scmrq->cluster.state) {
|
||||
case CLUSTER_NONE:
|
||||
scmrq->cluster.state = CLUSTER_READ;
|
||||
/* fall through */
|
||||
case CLUSTER_READ:
|
||||
scmrq->aob->request.msb_count = 1;
|
||||
msb->bs = MSB_BS_4K;
|
||||
msb->oc = MSB_OC_READ;
|
||||
msb->flags = MSB_FLAG_IDA;
|
||||
msb->data_addr = (u64) aidaw;
|
||||
msb->blk_count = write_cluster_size;
|
||||
|
||||
addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
|
||||
msb->scm_addr = round_down(addr, CLUSTER_SIZE);
|
||||
|
||||
if (msb->scm_addr !=
|
||||
round_down(addr + (u64) blk_rq_bytes(req) - 1,
|
||||
CLUSTER_SIZE))
|
||||
msb->blk_count = 2 * write_cluster_size;
|
||||
|
||||
for (i = 0; i < msb->blk_count; i++) {
|
||||
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
|
||||
aidaw++;
|
||||
}
|
||||
|
||||
break;
|
||||
case CLUSTER_WRITE:
|
||||
msb->oc = MSB_OC_WRITE;
|
||||
|
||||
for (addr = msb->scm_addr;
|
||||
addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
|
||||
addr += PAGE_SIZE) {
|
||||
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
|
||||
aidaw++;
|
||||
i++;
|
||||
}
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
aidaw->data_addr = (u64) page_address(bv.bv_page);
|
||||
aidaw++;
|
||||
i++;
|
||||
}
|
||||
for (; i < msb->blk_count; i++) {
|
||||
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
|
||||
aidaw++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool scm_need_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
if (rq_data_dir(scmrq->request) == READ)
|
||||
return false;
|
||||
|
||||
return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
|
||||
}
|
||||
|
||||
/* Called with queue lock held. */
|
||||
void scm_initiate_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
scm_prepare_cluster_request(scmrq);
|
||||
if (eadm_start_aob(scmrq->aob))
|
||||
scm_request_requeue(scmrq);
|
||||
}
|
||||
|
||||
bool scm_test_cluster_request(struct scm_request *scmrq)
|
||||
{
|
||||
return scmrq->cluster.state != CLUSTER_NONE;
|
||||
}
|
||||
|
||||
void scm_cluster_request_irq(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
unsigned long flags;
|
||||
|
||||
switch (scmrq->cluster.state) {
|
||||
case CLUSTER_NONE:
|
||||
BUG();
|
||||
break;
|
||||
case CLUSTER_READ:
|
||||
if (scmrq->error) {
|
||||
scm_request_finish(scmrq);
|
||||
break;
|
||||
}
|
||||
scmrq->cluster.state = CLUSTER_WRITE;
|
||||
spin_lock_irqsave(&bdev->rq_lock, flags);
|
||||
scm_initiate_cluster_request(scmrq);
|
||||
spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
||||
break;
|
||||
case CLUSTER_WRITE:
|
||||
scm_request_finish(scmrq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool scm_cluster_size_valid(void)
|
||||
{
|
||||
if (write_cluster_size == 1 || write_cluster_size > 128)
|
||||
return false;
|
||||
|
||||
return !(write_cluster_size & (write_cluster_size - 1));
|
||||
}
|
92
drivers/s390/block/scm_drv.c
Normal file
92
drivers/s390/block/scm_drv.c
Normal file
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Device driver for s390 storage class memory.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "scm_block"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/eadm.h>
|
||||
#include "scm_blk.h"
|
||||
|
||||
static void scm_notify(struct scm_device *scmdev, enum scm_event event)
|
||||
{
|
||||
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
|
||||
|
||||
switch (event) {
|
||||
case SCM_CHANGE:
|
||||
pr_info("%lx: The capabilities of the SCM increment changed\n",
|
||||
(unsigned long) scmdev->address);
|
||||
SCM_LOG(2, "State changed");
|
||||
SCM_LOG_STATE(2, scmdev);
|
||||
break;
|
||||
case SCM_AVAIL:
|
||||
SCM_LOG(2, "Increment available");
|
||||
SCM_LOG_STATE(2, scmdev);
|
||||
scm_blk_set_available(bdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int scm_probe(struct scm_device *scmdev)
|
||||
{
|
||||
struct scm_blk_dev *bdev;
|
||||
int ret;
|
||||
|
||||
SCM_LOG(2, "probe");
|
||||
SCM_LOG_STATE(2, scmdev);
|
||||
|
||||
if (scmdev->attrs.oper_state != OP_STATE_GOOD)
|
||||
return -EINVAL;
|
||||
|
||||
bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
|
||||
if (!bdev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_set_drvdata(&scmdev->dev, bdev);
|
||||
ret = scm_blk_dev_setup(bdev, scmdev);
|
||||
if (ret) {
|
||||
dev_set_drvdata(&scmdev->dev, NULL);
|
||||
kfree(bdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scm_remove(struct scm_device *scmdev)
|
||||
{
|
||||
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
|
||||
|
||||
scm_blk_dev_cleanup(bdev);
|
||||
dev_set_drvdata(&scmdev->dev, NULL);
|
||||
kfree(bdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scm_driver scm_drv = {
|
||||
.drv = {
|
||||
.name = "scm_block",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.notify = scm_notify,
|
||||
.probe = scm_probe,
|
||||
.remove = scm_remove,
|
||||
.handler = scm_blk_irq,
|
||||
};
|
||||
|
||||
int __init scm_drv_init(void)
|
||||
{
|
||||
return scm_driver_register(&scm_drv);
|
||||
}
|
||||
|
||||
void scm_drv_cleanup(void)
|
||||
{
|
||||
scm_driver_unregister(&scm_drv);
|
||||
}
|
479
drivers/s390/block/xpram.c
Normal file
479
drivers/s390/block/xpram.c
Normal file
|
@ -0,0 +1,479 @@
|
|||
/*
|
||||
* Xpram.c -- the S/390 expanded memory RAM-disk
|
||||
*
|
||||
* significant parts of this code are based on
|
||||
* the sbull device driver presented in
|
||||
* A. Rubini: Linux Device Drivers
|
||||
*
|
||||
* Author of XPRAM specific coding: Reinhard Buendgen
|
||||
* buendgen@de.ibm.com
|
||||
* Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*
|
||||
* External interfaces:
|
||||
* Interfaces to linux kernel
|
||||
* xpram_setup: read kernel parameters
|
||||
* Device specific file operations
|
||||
* xpram_iotcl
|
||||
* xpram_open
|
||||
*
|
||||
* "ad-hoc" partitioning:
|
||||
* the expanded memory can be partitioned among several devices
|
||||
* (with different minors). The partitioning set up can be
|
||||
* set by kernel or module parameters (int devs & int sizes[])
|
||||
*
|
||||
* Potential future improvements:
|
||||
* generic hard disk support to replace ad-hoc partitioning
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "xpram"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/ctype.h> /* isdigit, isxdigit */
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/hdreg.h> /* HDIO_GETGEO */
|
||||
#include <linux/device.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define XPRAM_NAME "xpram"
|
||||
#define XPRAM_DEVS 1 /* one partition */
|
||||
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
|
||||
|
||||
typedef struct {
|
||||
unsigned int size; /* size of xpram segment in pages */
|
||||
unsigned int offset; /* start page of xpram segment */
|
||||
} xpram_device_t;
|
||||
|
||||
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
|
||||
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
|
||||
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
|
||||
static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
|
||||
static unsigned int xpram_pages;
|
||||
static int xpram_devs;
|
||||
|
||||
/*
|
||||
* Parameter parsing functions.
|
||||
*/
|
||||
static int devs = XPRAM_DEVS;
|
||||
static char *sizes[XPRAM_MAX_DEVS];
|
||||
|
||||
module_param(devs, int, 0);
|
||||
module_param_array(sizes, charp, NULL, 0);
|
||||
|
||||
MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
|
||||
"the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
|
||||
MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
|
||||
"the defaults are 0s \n" \
|
||||
"All devices with size 0 equally partition the "
|
||||
"remaining space on the expanded strorage not "
|
||||
"claimed by explicit sizes\n");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/*
|
||||
* Copy expanded memory page (4kB) into main memory
|
||||
* Arguments
|
||||
* page_addr: address of target page
|
||||
* xpage_index: index of expandeded memory page
|
||||
* Return value
|
||||
* 0: if operation succeeds
|
||||
* -EIO: if pgin failed
|
||||
* -ENXIO: if xpram has vanished
|
||||
*/
|
||||
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
|
||||
{
|
||||
int cc = 2; /* return unused cc 2 if pgin traps */
|
||||
|
||||
asm volatile(
|
||||
" .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
|
||||
if (cc == 3)
|
||||
return -ENXIO;
|
||||
if (cc == 2)
|
||||
return -ENXIO;
|
||||
if (cc == 1)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy a 4kB page of main memory to an expanded memory page
|
||||
* Arguments
|
||||
* page_addr: address of source page
|
||||
* xpage_index: index of expandeded memory page
|
||||
* Return value
|
||||
* 0: if operation succeeds
|
||||
* -EIO: if pgout failed
|
||||
* -ENXIO: if xpram has vanished
|
||||
*/
|
||||
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
|
||||
{
|
||||
int cc = 2; /* return unused cc 2 if pgin traps */
|
||||
|
||||
asm volatile(
|
||||
" .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
|
||||
if (cc == 3)
|
||||
return -ENXIO;
|
||||
if (cc == 2)
|
||||
return -ENXIO;
|
||||
if (cc == 1)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if xpram is available.
|
||||
*/
|
||||
static int xpram_present(void)
|
||||
{
|
||||
unsigned long mem_page;
|
||||
int rc;
|
||||
|
||||
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
|
||||
if (!mem_page)
|
||||
return -ENOMEM;
|
||||
rc = xpram_page_in(mem_page, 0);
|
||||
free_page(mem_page);
|
||||
return rc ? -ENXIO : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return index of the last available xpram page.
|
||||
*/
|
||||
static unsigned long xpram_highest_page_index(void)
|
||||
{
|
||||
unsigned int page_index, add_bit;
|
||||
unsigned long mem_page;
|
||||
|
||||
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
|
||||
if (!mem_page)
|
||||
return 0;
|
||||
|
||||
page_index = 0;
|
||||
add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
|
||||
while (add_bit > 0) {
|
||||
if (xpram_page_in(mem_page, page_index | add_bit) == 0)
|
||||
page_index |= add_bit;
|
||||
add_bit >>= 1;
|
||||
}
|
||||
|
||||
free_page (mem_page);
|
||||
|
||||
return page_index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Block device make request function.
|
||||
*/
|
||||
static void xpram_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
unsigned int index;
|
||||
unsigned long page_addr;
|
||||
unsigned long bytes;
|
||||
|
||||
if ((bio->bi_iter.bi_sector & 7) != 0 ||
|
||||
(bio->bi_iter.bi_size & 4095) != 0)
|
||||
/* Request is not page-aligned. */
|
||||
goto fail;
|
||||
if ((bio->bi_iter.bi_size >> 12) > xdev->size)
|
||||
/* Request size is no page-aligned. */
|
||||
goto fail;
|
||||
if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
|
||||
goto fail;
|
||||
index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
page_addr = (unsigned long)
|
||||
kmap(bvec.bv_page) + bvec.bv_offset;
|
||||
bytes = bvec.bv_len;
|
||||
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
|
||||
/* More paranoia. */
|
||||
goto fail;
|
||||
while (bytes > 0) {
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
if (xpram_page_in(page_addr, index) != 0)
|
||||
goto fail;
|
||||
} else {
|
||||
if (xpram_page_out(page_addr, index) != 0)
|
||||
goto fail;
|
||||
}
|
||||
page_addr += 4096;
|
||||
bytes -= 4096;
|
||||
index++;
|
||||
}
|
||||
}
|
||||
set_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
bio_endio(bio, 0);
|
||||
return;
|
||||
fail:
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/*
|
||||
* get geometry: we have to fake one... trim the size to a
|
||||
* multiple of 64 (32k): tell we have 16 sectors, 4 heads,
|
||||
* whatever cylinders. Tell also that data starts at sector. 4.
|
||||
*/
|
||||
size = (xpram_pages * 8) & ~0x3f;
|
||||
geo->cylinders = size >> 6;
|
||||
geo->heads = 4;
|
||||
geo->sectors = 16;
|
||||
geo->start = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct block_device_operations xpram_devops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.getgeo = xpram_getgeo,
|
||||
};
|
||||
|
||||
/*
|
||||
* Setup xpram_sizes array.
|
||||
*/
|
||||
static int __init xpram_setup_sizes(unsigned long pages)
|
||||
{
|
||||
unsigned long mem_needed;
|
||||
unsigned long mem_auto;
|
||||
unsigned long long size;
|
||||
char *sizes_end;
|
||||
int mem_auto_no;
|
||||
int i;
|
||||
|
||||
/* Check number of devices. */
|
||||
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
|
||||
pr_err("%d is not a valid number of XPRAM devices\n",devs);
|
||||
return -EINVAL;
|
||||
}
|
||||
xpram_devs = devs;
|
||||
|
||||
/*
|
||||
* Copy sizes array to xpram_sizes and align partition
|
||||
* sizes to page boundary.
|
||||
*/
|
||||
mem_needed = 0;
|
||||
mem_auto_no = 0;
|
||||
for (i = 0; i < xpram_devs; i++) {
|
||||
if (sizes[i]) {
|
||||
size = simple_strtoull(sizes[i], &sizes_end, 0);
|
||||
switch (*sizes_end) {
|
||||
case 'g':
|
||||
case 'G':
|
||||
size <<= 20;
|
||||
break;
|
||||
case 'm':
|
||||
case 'M':
|
||||
size <<= 10;
|
||||
}
|
||||
xpram_sizes[i] = (size + 3) & -4UL;
|
||||
}
|
||||
if (xpram_sizes[i])
|
||||
mem_needed += xpram_sizes[i];
|
||||
else
|
||||
mem_auto_no++;
|
||||
}
|
||||
|
||||
pr_info(" number of devices (partitions): %d \n", xpram_devs);
|
||||
for (i = 0; i < xpram_devs; i++) {
|
||||
if (xpram_sizes[i])
|
||||
pr_info(" size of partition %d: %u kB\n",
|
||||
i, xpram_sizes[i]);
|
||||
else
|
||||
pr_info(" size of partition %d to be set "
|
||||
"automatically\n",i);
|
||||
}
|
||||
pr_info(" memory needed (for sized partitions): %lu kB\n",
|
||||
mem_needed);
|
||||
pr_info(" partitions to be sized automatically: %d\n",
|
||||
mem_auto_no);
|
||||
|
||||
if (mem_needed > pages * 4) {
|
||||
pr_err("Not enough expanded memory available\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* partitioning:
|
||||
* xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
|
||||
* else: ; all partitions with zero xpram_sizes[i]
|
||||
* partition equally the remaining space
|
||||
*/
|
||||
if (mem_auto_no) {
|
||||
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
|
||||
pr_info(" automatically determined "
|
||||
"partition size: %lu kB\n", mem_auto);
|
||||
for (i = 0; i < xpram_devs; i++)
|
||||
if (xpram_sizes[i] == 0)
|
||||
xpram_sizes[i] = mem_auto;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xpram_setup_blkdev(void)
|
||||
{
|
||||
unsigned long offset;
|
||||
int i, rc = -ENOMEM;
|
||||
|
||||
for (i = 0; i < xpram_devs; i++) {
|
||||
xpram_disks[i] = alloc_disk(1);
|
||||
if (!xpram_disks[i])
|
||||
goto out;
|
||||
xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!xpram_queues[i]) {
|
||||
put_disk(xpram_disks[i]);
|
||||
goto out;
|
||||
}
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
|
||||
blk_queue_make_request(xpram_queues[i], xpram_make_request);
|
||||
blk_queue_logical_block_size(xpram_queues[i], 4096);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register xpram major.
|
||||
*/
|
||||
rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Setup device structures.
|
||||
*/
|
||||
offset = 0;
|
||||
for (i = 0; i < xpram_devs; i++) {
|
||||
struct gendisk *disk = xpram_disks[i];
|
||||
|
||||
xpram_devices[i].size = xpram_sizes[i] / 4;
|
||||
xpram_devices[i].offset = offset;
|
||||
offset += xpram_devices[i].size;
|
||||
disk->major = XPRAM_MAJOR;
|
||||
disk->first_minor = i;
|
||||
disk->fops = &xpram_devops;
|
||||
disk->private_data = &xpram_devices[i];
|
||||
disk->queue = xpram_queues[i];
|
||||
sprintf(disk->disk_name, "slram%d", i);
|
||||
set_capacity(disk, xpram_sizes[i] << 1);
|
||||
add_disk(disk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
while (i--) {
|
||||
blk_cleanup_queue(xpram_queues[i]);
|
||||
put_disk(xpram_disks[i]);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resume failed: Print error message and call panic.
|
||||
*/
|
||||
static void xpram_resume_error(const char *message)
|
||||
{
|
||||
pr_err("Resuming the system failed: %s\n", message);
|
||||
panic("xpram resume error\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if xpram setup changed between suspend and resume.
|
||||
*/
|
||||
static int xpram_restore(struct device *dev)
|
||||
{
|
||||
if (!xpram_pages)
|
||||
return 0;
|
||||
if (xpram_present() != 0)
|
||||
xpram_resume_error("xpram disappeared");
|
||||
if (xpram_pages != xpram_highest_page_index() + 1)
|
||||
xpram_resume_error("Size of xpram changed");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops xpram_pm_ops = {
|
||||
.restore = xpram_restore,
|
||||
};
|
||||
|
||||
static struct platform_driver xpram_pdrv = {
|
||||
.driver = {
|
||||
.name = XPRAM_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &xpram_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *xpram_pdev;
|
||||
|
||||
/*
|
||||
* Finally, the init/exit functions.
|
||||
*/
|
||||
static void __exit xpram_exit(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < xpram_devs; i++) {
|
||||
del_gendisk(xpram_disks[i]);
|
||||
blk_cleanup_queue(xpram_queues[i]);
|
||||
put_disk(xpram_disks[i]);
|
||||
}
|
||||
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
|
||||
platform_device_unregister(xpram_pdev);
|
||||
platform_driver_unregister(&xpram_pdrv);
|
||||
}
|
||||
|
||||
static int __init xpram_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Find out size of expanded memory. */
|
||||
if (xpram_present() != 0) {
|
||||
pr_err("No expanded memory available\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
xpram_pages = xpram_highest_page_index() + 1;
|
||||
pr_info(" %u pages expanded memory found (%lu KB).\n",
|
||||
xpram_pages, (unsigned long) xpram_pages*4);
|
||||
rc = xpram_setup_sizes(xpram_pages);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = platform_driver_register(&xpram_pdrv);
|
||||
if (rc)
|
||||
return rc;
|
||||
xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
|
||||
if (IS_ERR(xpram_pdev)) {
|
||||
rc = PTR_ERR(xpram_pdev);
|
||||
goto fail_platform_driver_unregister;
|
||||
}
|
||||
rc = xpram_setup_blkdev();
|
||||
if (rc)
|
||||
goto fail_platform_device_unregister;
|
||||
return 0;
|
||||
|
||||
fail_platform_device_unregister:
|
||||
platform_device_unregister(xpram_pdev);
|
||||
fail_platform_driver_unregister:
|
||||
platform_driver_unregister(&xpram_pdrv);
|
||||
return rc;
|
||||
}
|
||||
|
||||
module_init(xpram_init);
|
||||
module_exit(xpram_exit);
|
Loading…
Add table
Add a link
Reference in a new issue