Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

8
drivers/s390/Makefile Normal file
View file

@ -0,0 +1,8 @@
#
# Makefile for the S/390 specific device drivers
#
obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o

View file

@ -0,0 +1,90 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
config BLK_DEV_XPRAM
def_tristate m
prompt "XPRAM disk support"
depends on S390 && BLOCK
help
Select this option if you want to use your expanded storage on S/390
or zSeries as a disk. This is useful as a _fast_ swap device if you
want to access more than 2G of memory when running in 31 bit mode.
This option is also available as a module which will be called
xpram. If unsure, say "N".
config DCSSBLK
def_tristate m
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
Support for dcss block device
config DASD
def_tristate y
prompt "Support for DASD devices"
depends on CCW && BLOCK
select IOSCHED_DEADLINE
help
Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running
natively on a single image or an LPAR.
config DASD_PROFILE
def_bool y
prompt "Profiling support for dasd devices"
depends on DASD
help
Enable this option if you want to see profiling information
in /proc/dasd/statistics.
config DASD_ECKD
def_tristate y
prompt "Support for ECKD Disks"
depends on DASD
help
ECKD devices are the most commonly used devices. You should enable
this option unless you are very sure to have no ECKD device.
config DASD_FBA
def_tristate y
prompt "Support for FBA Disks"
depends on DASD
help
Select this option to be able to access FBA devices. It is safe to
say "Y".
config DASD_DIAG
def_tristate y
prompt "Support for DIAG access to Disks"
depends on DASD
help
Select this option if you want to use Diagnose250 command to access
Disks under VM. If you are not running under VM or unsure what it is,
say "N".
config DASD_EER
def_bool y
prompt "Extended error reporting (EER)"
depends on DASD
help
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.
config SCM_BLOCK
def_tristate m
prompt "Support for Storage Class Memory"
depends on S390 && BLOCK && EADM_SCH && SCM_BUS
help
Block device driver for Storage Class Memory (SCM). This driver
provides a block device interface for each available SCM increment.
To compile this driver as a module, choose M here: the
module will be called scm_block.
config SCM_BLOCK_CLUSTER_WRITE
def_bool y
prompt "SCM force cluster writes"
depends on SCM_BLOCK
help
Force writes to Storage Class Memory (SCM) to be in done in clusters.

View file

@ -0,0 +1,25 @@
#
# S/390 block devices
#
dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
dasd_fba_mod-objs := dasd_fba.o
dasd_diag_mod-objs := dasd_diag.o
dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
dasd_genhd.o dasd_erp.o
ifdef CONFIG_DASD_EER
dasd_mod-objs += dasd_eer.o
endif
obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
scm_block-objs += scm_blk_cluster.o
endif
obj-$(CONFIG_SCM_BLOCK) += scm_block.o

3931
drivers/s390/block/dasd.c Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,990 @@
/*
* PAV alias management for the DASD ECKD discipline
*
* Copyright IBM Corp. 2007
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eckd):"
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
* - A device is connected to an lcu as long as the device exists.
* dasd_alias_make_device_known_to_lcu will be called wenn the
* device is checked by the eckd discipline and
* dasd_alias_disconnect_device_from_lcu will be called
* before the device is deleted.
* - The dasd_alias_add_device / dasd_alias_remove_device
* functions mark the point when a device is 'ready for service'.
* - A summary unit check is a rare occasion, but it is mandatory to
* support it. It requires some complex recovery actions before the
* devices can be used again (see dasd_alias_handle_summary_unit_check).
* - dasd_alias_get_start_dev will find an alias device that can be used
* instead of the base device and does some (very simple) load balancing.
* This is the function that gets called for each I/O, so when improving
* something, this function should get faster or better, the rest has just
* to be correct.
*/
static void summary_unit_check_handling_work(struct work_struct *);
static void lcu_update_work(struct work_struct *);
static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
static struct alias_root aliastree = {
.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
};
static struct alias_server *_find_server(struct dasd_uid *uid)
{
struct alias_server *pos;
list_for_each_entry(pos, &aliastree.serverlist, server) {
if (!strncmp(pos->uid.vendor, uid->vendor,
sizeof(uid->vendor))
&& !strncmp(pos->uid.serial, uid->serial,
sizeof(uid->serial)))
return pos;
};
return NULL;
}
static struct alias_lcu *_find_lcu(struct alias_server *server,
struct dasd_uid *uid)
{
struct alias_lcu *pos;
list_for_each_entry(pos, &server->lculist, lcu) {
if (pos->uid.ssid == uid->ssid)
return pos;
};
return NULL;
}
static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
struct dasd_uid *uid)
{
struct alias_pav_group *pos;
__u8 search_unit_addr;
/* for hyper pav there is only one group */
if (lcu->pav == HYPER_PAV) {
if (list_empty(&lcu->grouplist))
return NULL;
else
return list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
}
/* for base pav we have to find the group that matches the base */
if (uid->type == UA_BASE_DEVICE)
search_unit_addr = uid->real_unit_addr;
else
search_unit_addr = uid->base_unit_addr;
list_for_each_entry(pos, &lcu->grouplist, group) {
if (pos->uid.base_unit_addr == search_unit_addr &&
!strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
return pos;
};
return NULL;
}
static struct alias_server *_allocate_server(struct dasd_uid *uid)
{
struct alias_server *server;
server = kzalloc(sizeof(*server), GFP_KERNEL);
if (!server)
return ERR_PTR(-ENOMEM);
memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
INIT_LIST_HEAD(&server->server);
INIT_LIST_HEAD(&server->lculist);
return server;
}
static void _free_server(struct alias_server *server)
{
kfree(server);
}
static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
{
struct alias_lcu *lcu;
lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
if (!lcu)
return ERR_PTR(-ENOMEM);
lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
if (!lcu->uac)
goto out_err1;
lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr)
goto out_err2;
lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->cpaddr)
goto out_err3;
lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->data)
goto out_err4;
memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
lcu->uid.ssid = uid->ssid;
lcu->pav = NO_PAV;
lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
INIT_LIST_HEAD(&lcu->lcu);
INIT_LIST_HEAD(&lcu->inactive_devices);
INIT_LIST_HEAD(&lcu->active_devices);
INIT_LIST_HEAD(&lcu->grouplist);
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
spin_lock_init(&lcu->lock);
init_completion(&lcu->lcu_setup);
return lcu;
out_err4:
kfree(lcu->rsu_cqr->cpaddr);
out_err3:
kfree(lcu->rsu_cqr);
out_err2:
kfree(lcu->uac);
out_err1:
kfree(lcu);
return ERR_PTR(-ENOMEM);
}
static void _free_lcu(struct alias_lcu *lcu)
{
kfree(lcu->rsu_cqr->data);
kfree(lcu->rsu_cqr->cpaddr);
kfree(lcu->rsu_cqr);
kfree(lcu->uac);
kfree(lcu);
}
/*
* This is the function that will allocate all the server and lcu data,
* so this function must be called first for a new device.
* If the return value is 1, the lcu was already known before, if it
* is 0, this is a new lcu.
* Negative return code indicates that something went wrong (e.g. -ENOMEM)
*/
int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private;
unsigned long flags;
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
} else {
/* someone was faster */
_free_server(newserver);
}
}
lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
} else {
/* someone was faster */
_free_lcu(newlcu);
}
}
spin_lock(&lcu->lock);
list_add(&device->alias_list, &lcu->inactive_devices);
private->lcu = lcu;
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(&aliastree.lock, flags);
return 0;
}
/*
* This function removes a device from the scope of alias management.
* The complicated part is to make sure that it is not in use by
* any of the workers. If necessary cancel the work.
*/
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private;
unsigned long flags;
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
/* nothing to do if already disconnected */
if (!lcu)
return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
cancel_work_sync(&lcu->suc_data.worker);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->suc_data.device)
lcu->suc_data.device = NULL;
}
was_pending = 0;
if (device == lcu->ruac_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
was_pending = 1;
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->ruac_data.device)
lcu->ruac_data.device = NULL;
}
private->lcu = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
list_del(&lcu->lcu);
spin_unlock(&lcu->lock);
_free_lcu(lcu);
lcu = NULL;
} else {
if (was_pending)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
}
spin_unlock_irqrestore(&aliastree.lock, flags);
}
/*
* This function assumes that the unit address configuration stored
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
static int _add_device_to_lcu(struct alias_lcu *lcu,
struct dasd_device *device,
struct dasd_device *pos)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
struct dasd_uid uid;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
/* only lock if not already locked */
if (device != pos)
spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
CDEV_NESTED_SECOND);
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
private->uid.base_unit_addr =
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
uid = private->uid;
if (device != pos)
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
list_move(&device->alias_list, &lcu->active_devices);
return 0;
}
group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
group->uid.ssid = uid.ssid;
if (uid.type == UA_BASE_DEVICE)
group->uid.base_unit_addr = uid.real_unit_addr;
else
group->uid.base_unit_addr = uid.base_unit_addr;
memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
private->pavgroup = group;
return 0;
};
static void _remove_device_from_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
private = (struct dasd_eckd_private *) device->private;
list_move(&device->alias_list, &lcu->inactive_devices);
group = private->pavgroup;
if (!group)
return;
private->pavgroup = NULL;
if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
list_del(&group->group);
kfree(group);
return;
}
if (group->next == device)
group->next = NULL;
};
static int
suborder_not_supported(struct dasd_ccw_req *cqr)
{
char *sense;
char reason;
char msg_format;
char msg_no;
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
reason = sense[0];
msg_format = (sense[7] & 0xF0);
msg_no = (sense[7] & 0x0F);
/* command reject, Format 0 MSG 4 - invalid parameter */
if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
return 1;
return 0;
}
static int read_unit_address_configuration(struct dasd_device *device,
struct alias_lcu *lcu)
{
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
unsigned long flags;
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 10;
cqr->expires = 20 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x0e; /* Read unit address configuration */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)(addr_t) prssdp;
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
ccw->cda = (__u32)(addr_t) lcu->uac;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* need to unset flag here to detect race with summary unit check */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
do {
rc = dasd_sleep_on(cqr);
if (rc && suborder_not_supported(cqr))
return -EOPNOTSUPP;
} while (rc && (cqr->retries > 0));
if (rc) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
dasd_kfree_request(cqr, cqr->memdev);
return rc;
}
static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
{
unsigned long flags;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *device, *tempdev;
int i, rc;
struct dasd_eckd_private *private;
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = (struct dasd_eckd_private *) device->private;
private->pavgroup = NULL;
}
list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = (struct dasd_eckd_private *) device->private;
private->pavgroup = NULL;
}
list_del(&pavgroup->group);
kfree(pavgroup);
}
spin_unlock_irqrestore(&lcu->lock, flags);
rc = read_unit_address_configuration(refdev, lcu);
if (rc)
return rc;
/* need to take cdev lock before lcu lock */
spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
CDEV_NESTED_FIRST);
spin_lock(&lcu->lock);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
case UA_BASE_PAV_ALIAS:
lcu->pav = BASE_PAV;
break;
case UA_HYPER_PAV_ALIAS:
lcu->pav = HYPER_PAV;
break;
}
if (lcu->pav != NO_PAV)
break;
}
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
return 0;
}
static void lcu_update_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct read_uac_work_data *ruac_data;
struct dasd_device *device;
unsigned long flags;
int rc;
ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
device = ruac_data->device;
rc = _lcu_update(device, lcu);
/*
* Need to check flags again, as there could have been another
* prepare_update or a new device a new device while we were still
* processing the data
*/
spin_lock_irqsave(&lcu->lock, flags);
if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
} else {
lcu->ruac_data.device = NULL;
lcu->flags &= ~UPDATE_PENDING;
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static int _schedule_lcu_update(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_device *usedev = NULL;
struct alias_pav_group *group;
lcu->flags |= NEED_UAC_UPDATE;
if (lcu->ruac_data.device) {
/* already scheduled or running */
return 0;
}
if (device && !list_empty(&device->alias_list))
usedev = device;
if (!usedev && !list_empty(&lcu->grouplist)) {
group = list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
if (!list_empty(&group->baselist))
usedev = list_first_entry(&group->baselist,
struct dasd_device,
alias_list);
else if (!list_empty(&group->aliaslist))
usedev = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
if (!usedev && !list_empty(&lcu->active_devices)) {
usedev = list_first_entry(&lcu->active_devices,
struct dasd_device, alias_list);
}
/*
* if we haven't found a proper device yet, give up for now, the next
* device that will be set active will trigger an lcu update
*/
if (!usedev)
return -EINVAL;
lcu->ruac_data.device = usedev;
schedule_delayed_work(&lcu->ruac_data.dwork, 0);
return 0;
}
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_lcu *lcu;
unsigned long flags;
int rc;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
/* need to take cdev lock before lcu lock */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
spin_lock(&lcu->lock);
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
int dasd_alias_update_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
private->lcu->flags |= UPDATE_PENDING;
return dasd_alias_add_device(device);
}
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct alias_lcu *lcu;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
/* nothing to do if already removed */
if (!lcu)
return 0;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_device *alias_device;
struct alias_pav_group *group;
struct alias_lcu *lcu;
struct dasd_eckd_private *private, *alias_priv;
unsigned long flags;
private = (struct dasd_eckd_private *) base_device->private;
group = private->pavgroup;
lcu = private->lcu;
if (!group || !lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
if (unlikely(!(private->features.feature[8] & 0x01))) {
/*
* PAV enabled but prefix not, very unlikely
* seems to be a lost pathgroup
* use base device to do IO
*/
DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
"Prefix not enabled with PAV enabled\n");
return NULL;
}
spin_lock_irqsave(&lcu->lock, flags);
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
spin_unlock_irqrestore(&lcu->lock, flags);
return NULL;
} else {
alias_device = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
}
if (list_is_last(&alias_device->alias_list, &group->aliaslist))
group->next = list_first_entry(&group->aliaslist,
struct dasd_device, alias_list);
else
group->next = list_first_entry(&alias_device->alias_list,
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
alias_priv = (struct dasd_eckd_private *) alias_device->private;
if ((alias_priv->count < private->count) && !alias_device->stopped)
return alias_device;
else
return NULL;
}
/*
* Summary unit check handling depends on the way alias devices
* are handled so it is done here rather then in dasd_eckd.c
*/
static int reset_summary_unit_check(struct alias_lcu *lcu,
struct dasd_device *device,
char reason)
{
struct dasd_ccw_req *cqr;
int rc = 0;
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
strncpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = 0 ;
ccw->count = 16;
ccw->cda = (__u32)(addr_t) cqr->data;
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 255; /* set retry counter to enable basic ERP */
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 5 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (private->uid.type != UA_BASE_DEVICE) {
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
continue;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (private->uid.type != UA_BASE_DEVICE) {
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
continue;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
}
}
static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device, *temp;
struct dasd_eckd_private *private;
int rc;
unsigned long flags;
LIST_HEAD(active);
/*
* Problem here ist that dasd_flush_device_queue may wait
* for termination of a request to complete. We can't keep
* the lcu lock during that time, so we must assume that
* the lists may have changed.
* Idea: first gather all active alias devices in a separate list,
* then flush the first element of this list unlocked, and afterwards
* check if it is still on the list before moving it to the
* active_devices list.
*/
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(device, temp, &lcu->active_devices,
alias_list) {
private = (struct dasd_eckd_private *) device->private;
if (private->uid.type == UA_BASE_DEVICE)
continue;
list_move(&device->alias_list, &active);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_splice_init(&pavgroup->aliaslist, &active);
}
while (!list_empty(&active)) {
device = list_first_entry(&active, struct dasd_device,
alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_flush_device_queue(device);
spin_lock_irqsave(&lcu->lock, flags);
/*
* only move device around if it wasn't moved away while we
* were waiting for the flush
*/
if (device == list_first_entry(&active,
struct dasd_device, alias_list))
list_move(&device->alias_list, &lcu->active_devices);
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static void __stop_device_on_lcu(struct dasd_device *device,
struct dasd_device *pos)
{
/* If pos == device then device is already locked! */
if (pos == device) {
dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(pos->cdev));
}
/*
* This function is called in interrupt context, so the
* cdev lock for device is already locked!
*/
static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct alias_pav_group *pavgroup;
struct dasd_device *pos;
list_for_each_entry(pos, &lcu->active_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(pos, &pavgroup->baselist, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
__stop_device_on_lcu(device, pos);
}
}
static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
unsigned long flags;
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
flags);
}
}
}
static void summary_unit_check_handling_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct summary_unit_check_work_data *suc_data;
unsigned long flags;
struct dasd_device *device;
suc_data = container_of(work, struct summary_unit_check_work_data,
worker);
lcu = container_of(suc_data, struct alias_lcu, suc_data);
device = suc_data->device;
/* 1. flush alias devices */
flush_all_alias_devices_on_lcu(lcu);
/* 2. reset summary unit check */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device,
(DASD_STOPPED_SU | DASD_STOPPED_PENDING));
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
spin_lock_irqsave(&lcu->lock, flags);
_unstop_all_devices_on_lcu(lcu);
_restart_all_base_devices_on_lcu(lcu);
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
}
/*
* note: this will be called from int handler context (cdev locked)
*/
void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
struct irb *irb)
{
struct alias_lcu *lcu;
char reason;
struct dasd_eckd_private *private;
char *sense;
private = (struct dasd_eckd_private *) device->private;
sense = dasd_get_sense(irb);
if (sense) {
reason = sense[8];
DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
"eckd handle summary unit check: reason", reason);
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd handle summary unit check:"
" no reason code available");
return;
}
lcu = private->lcu;
if (!lcu) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
return;
}
spin_lock(&lcu->lock);
_stop_all_devices_on_lcu(lcu, device);
/* prepare for lcu_update */
private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
/* If this device is about to be removed just return and wait for
* the next interrupt on a different device
*/
if (list_empty(&device->alias_list)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
spin_unlock(&lcu->lock);
return;
}
if (lcu->suc_data.device) {
/* already scheduled or running */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
spin_unlock(&lcu->lock);
return ;
}
lcu->suc_data.reason = reason;
lcu->suc_data.device = device;
spin_unlock(&lcu->lock);
schedule_work(&lcu->suc_data.worker);
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,663 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2000
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <asm/dasd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
#define PRINTK_HEADER "dasd(diag):"
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
* amount of storage that is available in the static I/O buffer for each
* device. Currently each device gets 2 pages. We want to fit two requests
* into the available memory so that we can immediately start the next if one
* finishes. */
#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
sizeof(struct dasd_diag_req)) / \
sizeof(struct dasd_diag_bio)) / 2)
#define DIAG_MAX_RETRIES 32
#define DIAG_TIMEOUT 50
static struct dasd_discipline dasd_diag_discipline;
struct dasd_diag_private {
struct dasd_diag_characteristics rdc_data;
struct dasd_diag_rw_io iob;
struct dasd_diag_init_io iib;
blocknum_t pt_block;
struct ccw_dev_id dev_id;
};
struct dasd_diag_req {
unsigned int block_count;
struct dasd_diag_bio bio[0];
};
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
/* Perform DIAG250 call with block I/O parameter list iob (input and output)
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
static inline int dia250(void *iob, int cmd)
{
register unsigned long reg2 asm ("2") = (unsigned long) iob;
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
int rc;
rc = 3;
asm volatile(
" diag 2,%2,0x250\n"
"0: ipm %0\n"
" srl %0,28\n"
" or %0,3\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc), "=m" (*(addr_type *) iob)
: "d" (cmd), "d" (reg2), "m" (*(addr_type *) iob)
: "3", "cc");
return rc;
}
/* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero
* otherwise. */
static inline int
mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
blocknum_t offset, blocknum_t *end_block)
{
struct dasd_diag_private *private;
struct dasd_diag_init_io *iib;
int rc;
private = (struct dasd_diag_private *) device->private;
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
iib->block_size = blocksize;
iib->offset = offset;
iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(iib, INIT_BIO);
if ((rc & 3) == 0 && end_block)
*end_block = iib->end_block;
return rc;
}
/* Remove block I/O environment for device. Return zero on success, non-zero
* otherwise. */
static inline int
mdsk_term_io(struct dasd_device * device)
{
struct dasd_diag_private *private;
struct dasd_diag_init_io *iib;
int rc;
private = (struct dasd_diag_private *) device->private;
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
rc = dia250(iib, TERM_BIO);
return rc;
}
/* Error recovery for failed DIAG requests - try to reestablish the DIAG
* environment. */
static void
dasd_diag_erp(struct dasd_device *device)
{
int rc;
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
pr_warning("%s: The access mode of a DIAG device "
"changed to read-only\n",
dev_name(&device->cdev->dev));
rc = 0;
}
if (rc)
pr_warning("%s: DIAG ERP failed with "
"rc=%d\n", dev_name(&device->cdev->dev), rc);
}
/* Start a given request at the device. Return zero on success, non-zero
* otherwise. */
static int
dasd_start_diag(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
struct dasd_diag_private *private;
struct dasd_diag_req *dreq;
int rc;
device = cqr->startdev;
if (cqr->retries < 0) {
DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
"- no retry left)", cqr);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
private = (struct dasd_diag_private *) device->private;
dreq = (struct dasd_diag_req *) cqr->data;
private->iob.dev_nr = private->dev_id.devno;
private->iob.key = 0;
private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
private->iob.block_count = dreq->block_count;
private->iob.interrupt_params = (addr_t) cqr;
private->iob.bio_list = dreq->bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = dia250(&private->iob, RW_BIO);
switch (rc) {
case 0: /* Synchronous I/O finished successfully */
cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_SUCCESS;
/* Indicate to calling function that only a dasd_schedule_bh()
and no timer is needed */
rc = -EACCES;
break;
case 8: /* Asynchronous I/O was started */
cqr->status = DASD_CQR_IN_IO;
rc = 0;
break;
default: /* Error condition */
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
dasd_diag_erp(device);
rc = -EIO;
break;
}
cqr->intrc = rc;
return rc;
}
/* Terminate given request at the device. */
static int
dasd_diag_term_IO(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
device = cqr->startdev;
mdsk_term_io(device);
mdsk_init_io(device, device->block->bp_block, 0, NULL);
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_tod_clock();
dasd_schedule_device_bh(device);
return 0;
}
/* Handle external interruption. */
static void dasd_ext_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long expires;
unsigned long flags;
addr_t ip;
int rc;
switch (ext_code.subcode >> 8) {
case DASD_DIAG_CODE_31BIT:
ip = (addr_t) param32;
break;
case DASD_DIAG_CODE_64BIT:
ip = (addr_t) param64;
break;
default:
return;
}
inc_irq_stat(IRQEXT_DSD);
if (!ip) { /* no intparm: unsolicited interrupt */
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
"interrupt");
return;
}
cqr = (struct dasd_ccw_req *) ip;
device = (struct dasd_device *) cqr->startdev;
if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
DBF_DEV_EVENT(DBF_WARNING, device,
" magic number of dasd_ccw_req 0x%08X doesn't"
" match discipline 0x%08X",
cqr->magic, *(int *) (&device->discipline->name));
return;
}
/* get irq lock to modify request queue */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* Check for a pending clear operation */
if (cqr->status == DASD_CQR_CLEAR_PENDING) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return;
}
cqr->stopclk = get_tod_clock();
expires = 0;
if ((ext_code.subcode & 0xff) == 0) {
cqr->status = DASD_CQR_SUCCESS;
/* Start first request on queue if possible -> fast_io. */
if (!list_empty(&device->ccw_queue)) {
next = list_entry(device->ccw_queue.next,
struct dasd_ccw_req, devlist);
if (next->status == DASD_CQR_QUEUED) {
rc = dasd_start_diag(next);
if (rc == 0)
expires = next->expires;
}
}
} else {
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
"request %p was %d (%d retries left)", cqr,
ext_code.subcode & 0xff, cqr->retries);
dasd_diag_erp(device);
}
if (expires != 0)
dasd_device_set_timer(device, expires);
else
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
/* Check whether device can be controlled by DIAG discipline. Return zero on
* success, non-zero otherwise. */
static int
dasd_diag_check_device(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_diag_private *private;
struct dasd_diag_characteristics *rdc_data;
struct dasd_diag_bio bio;
struct vtoc_cms_label *label;
blocknum_t end_block;
unsigned int sb, bsize;
int rc;
private = (struct dasd_diag_private *) device->private;
if (private == NULL) {
private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
if (private == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
ccw_device_get_id(device->cdev, &private->dev_id);
device->private = (void *) private;
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"could not allocate dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rdc_data = (void *) &(private->rdc_data);
rdc_data->dev_nr = private->dev_id.devno;
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
rc = diag210((struct diag210 *) rdc_data);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
"information (rc=%d)", rc);
rc = -EOPNOTSUPP;
goto out;
}
device->default_expires = DIAG_TIMEOUT;
device->default_retries = DIAG_MAX_RETRIES;
/* Figure out position of label block */
switch (private->rdc_data.vdev_class) {
case DEV_CLASS_FBA:
private->pt_block = 1;
break;
case DEV_CLASS_ECKD:
private->pt_block = 2;
break;
default:
pr_warning("%s: Device type %d is not supported "
"in DIAG mode\n", dev_name(&device->cdev->dev),
private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
DBF_DEV_EVENT(DBF_INFO, device,
"%04X: %04X on real %04X/%02X",
rdc_data->dev_nr,
rdc_data->vdev_type,
rdc_data->rdev_type, rdc_data->rdev_model);
/* terminate all outstanding operations */
mdsk_term_io(device);
/* figure out blocksize of device */
label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
if (label == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to allocate initialization request");
rc = -ENOMEM;
goto out;
}
rc = 0;
end_block = 0;
/* try all sizes - needed for ECKD devices */
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
mdsk_init_io(device, bsize, 0, &end_block);
memset(&bio, 0, sizeof (struct dasd_diag_bio));
bio.type = MDSK_READ_REQ;
bio.block_number = private->pt_block + 1;
bio.buffer = label;
memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
private->iob.dev_nr = rdc_data->dev_nr;
private->iob.key = 0;
private->iob.flags = 0; /* do synchronous io */
private->iob.block_count = 1;
private->iob.interrupt_params = 0;
private->iob.bio_list = &bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
pr_warning("%s: A 64-bit DIAG call failed\n",
dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
goto out_label;
}
mdsk_term_io(device);
if (rc == 0)
break;
}
if (bsize > PAGE_SIZE) {
pr_warning("%s: Accessing the DASD failed because of an "
"incorrect format (rc=%d)\n",
dev_name(&device->cdev->dev), rc);
rc = -EIO;
goto out_label;
}
/* check for label block */
if (memcmp(label->label_id, DASD_DIAG_CMS1,
sizeof(DASD_DIAG_CMS1)) == 0) {
/* get formatted blocksize from label block */
bsize = (unsigned int) label->block_size;
block->blocks = (unsigned long) label->block_count;
} else
block->blocks = end_block;
block->bp_block = bsize;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
pr_warning("%s: DIAG initialization failed with rc=%d\n",
dev_name(&device->cdev->dev), rc);
rc = -EIO;
} else {
if (rc == 4)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
pr_info("%s: New DASD with %ld byte/block, total size %ld "
"KB%s\n", dev_name(&device->cdev->dev),
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
block->s2b_shift) >> 1,
(rc == 4) ? ", read-only device" : "");
rc = 0;
}
out_label:
free_page((long) label);
out:
if (rc) {
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
}
return rc;
}
/* Fill in virtual disk geometry for device. Return zero on success, non-zero
* otherwise. */
static int
dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_diag_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_postaction;
}
/* Create DASD request from block device request. Return pointer to new
* request on success, ERR_PTR otherwise. */
static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
else if (rq_data_dir(req) == WRITE)
rw_cmd = MDSK_WRITE_REQ;
else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
dreq = (struct dasd_diag_req *) cqr->data;
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
dbio->block_number = recid + 1;
dbio->buffer = dst;
dbio++;
dst += blksize;
recid++;
}
}
cqr->retries = memdev->default_retries;
cqr->buildclk = get_tod_clock();
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = memdev->default_expires * HZ;
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/* Release DASD request. Return non-zero if request was successful, zero
* otherwise. */
static int
dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
int status;
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
};
/* Fill in IOCTL data for device. */
static int
dasd_diag_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_diag_private *private;
private = (struct dasd_diag_private *) device->private;
info->label_block = (unsigned int) private->pt_block;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof (struct dasd_diag_characteristics);
memcpy(info->characteristics,
&((struct dasd_diag_private *) device->private)->rdc_data,
sizeof (struct dasd_diag_characteristics));
info->confdata_size = 0;
return 0;
}
static void
dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *stat)
{
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"dump sense not available for DIAG data");
}
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.max_blocks = DIAG_MAX_BLOCKS,
.check_device = dasd_diag_check_device,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_diag_fill_geometry,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
.erp_action = dasd_diag_erp_action,
.erp_postaction = dasd_diag_erp_postaction,
.build_cp = dasd_diag_build_cp,
.free_cp = dasd_diag_free_cp,
.dump_sense = dasd_diag_dump_sense,
.fill_info = dasd_diag_fill_info,
};
static int __init
dasd_diag_init(void)
{
if (!MACHINE_IS_VM) {
pr_info("Discipline %s cannot be used without z/VM\n",
dasd_diag_discipline.name);
return -ENODEV;
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
}
static void __exit
dasd_diag_cleanup(void)
{
unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
module_init(dasd_diag_init);
module_exit(dasd_diag_cleanup);

View file

@ -0,0 +1,122 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2000
*
*/
#define MDSK_WRITE_REQ 0x01
#define MDSK_READ_REQ 0x02
#define INIT_BIO 0x00
#define RW_BIO 0x01
#define TERM_BIO 0x02
#define DEV_CLASS_FBA 0x01
#define DEV_CLASS_ECKD 0x04
#define DASD_DIAG_CODE_31BIT 0x03
#define DASD_DIAG_CODE_64BIT 0x07
#define DASD_DIAG_RWFLAG_ASYNC 0x02
#define DASD_DIAG_RWFLAG_NOCACHE 0x01
#define DASD_DIAG_FLAGA_FORMAT_64BIT 0x80
struct dasd_diag_characteristics {
u16 dev_nr;
u16 rdc_len;
u8 vdev_class;
u8 vdev_type;
u8 vdev_status;
u8 vdev_flags;
u8 rdev_class;
u8 rdev_type;
u8 rdev_model;
u8 rdev_features;
} __attribute__ ((packed, aligned(4)));
#ifdef CONFIG_64BIT
#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
typedef u64 blocknum_t;
typedef s64 sblocknum_t;
struct dasd_diag_bio {
u8 type;
u8 status;
u8 spare1[2];
u32 alet;
blocknum_t block_number;
void *buffer;
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_init_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u32 block_size;
u8 spare2[4];
blocknum_t offset;
sblocknum_t start_block;
blocknum_t end_block;
u8 spare3[8];
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_rw_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u8 key;
u8 flags;
u8 spare2[2];
u32 block_count;
u32 alet;
u8 spare3[4];
u64 interrupt_params;
struct dasd_diag_bio *bio_list;
u8 spare4[8];
} __attribute__ ((packed, aligned(8)));
#else /* CONFIG_64BIT */
#define DASD_DIAG_FLAGA_DEFAULT 0x0
typedef u32 blocknum_t;
typedef s32 sblocknum_t;
struct dasd_diag_bio {
u8 type;
u8 status;
u16 spare1;
blocknum_t block_number;
u32 alet;
void *buffer;
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_init_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u32 block_size;
blocknum_t offset;
sblocknum_t start_block;
blocknum_t end_block;
u8 spare2[24];
} __attribute__ ((packed, aligned(8)));
struct dasd_diag_rw_io {
u16 dev_nr;
u8 flaga;
u8 spare1[21];
u8 key;
u8 flags;
u8 spare2[2];
u32 block_count;
u32 alet;
struct dasd_diag_bio *bio_list;
u32 interrupt_params;
u8 spare3[20];
} __attribute__ ((packed, aligned(8)));
#endif /* CONFIG_64BIT */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,533 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2000
*
*/
#ifndef DASD_ECKD_H
#define DASD_ECKD_H
/*****************************************************************************
* SECTION: CCW Definitions
****************************************************************************/
#define DASD_ECKD_CCW_WRITE 0x05
#define DASD_ECKD_CCW_READ 0x06
#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
#define DASD_ECKD_CCW_WRITE_KD 0x0d
#define DASD_ECKD_CCW_READ_KD 0x0e
#define DASD_ECKD_CCW_ERASE 0x11
#define DASD_ECKD_CCW_READ_COUNT 0x12
#define DASD_ECKD_CCW_SLCK 0x14
#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
#define DASD_ECKD_CCW_WRITE_CKD 0x1d
#define DASD_ECKD_CCW_READ_CKD 0x1e
#define DASD_ECKD_CCW_PSF 0x27
#define DASD_ECKD_CCW_SNID 0x34
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
#define DASD_ECKD_CCW_READ_MT 0x86
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
#define DASD_ECKD_CCW_RESERVE 0xB4
#define DASD_ECKD_CCW_READ_TRACK 0xDE
#define DASD_ECKD_CCW_PFX 0xE7
#define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9
#define DASD_ECKD_CCW_RCD 0xFA
/*
* Perform Subsystem Function / Sub-Orders
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
#define PSF_ORDER_SSC 0x1D
/*
* CUIR response condition codes
*/
#define PSF_CUIR_INVALID 0x00
#define PSF_CUIR_COMPLETED 0x01
#define PSF_CUIR_NOT_SUPPORTED 0x02
#define PSF_CUIR_ERROR_IN_REQ 0x03
#define PSF_CUIR_DENIED 0x04
#define PSF_CUIR_LAST_PATH 0x05
#define PSF_CUIR_DEVICE_ONLINE 0x06
#define PSF_CUIR_VARY_FAILURE 0x07
#define PSF_CUIR_SOFTWARE_FAILURE 0x08
#define PSF_CUIR_NOT_RECOGNIZED 0x09
/*
* CUIR codes
*/
#define CUIR_QUIESCE 0x01
#define CUIR_RESUME 0x02
/*
* attention message definitions
*/
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
*/
#define LV_COMPAT_CYL 0xFFFE
#define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
/*****************************************************************************
* SECTION: Type Definitions
****************************************************************************/
struct eckd_count {
__u16 cyl;
__u16 head;
__u8 record;
__u8 kl;
__u16 dl;
} __attribute__ ((packed));
struct ch_t {
__u16 cyl;
__u16 head;
} __attribute__ ((packed));
struct chs_t {
__u16 cyl;
__u16 head;
__u32 sector;
} __attribute__ ((packed));
struct chr_t {
__u16 cyl;
__u16 head;
__u8 record;
} __attribute__ ((packed));
struct geom_t {
__u16 cyl;
__u16 head;
__u32 sector;
} __attribute__ ((packed));
struct eckd_home {
__u8 skip_control[14];
__u16 cell_number;
__u8 physical_addr[3];
__u8 flag;
struct ch_t track_addr;
__u8 reserved;
__u8 key_length;
__u8 reserved2[2];
} __attribute__ ((packed));
struct DE_eckd_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
unsigned char reserved:1;
unsigned char seek:2; /* Seek control */
unsigned char auth:2; /* Access authorization */
unsigned char pci:1; /* PCI Fetch mode */
} __attribute__ ((packed)) mask;
struct {
unsigned char mode:2; /* Architecture mode */
unsigned char ckd:1; /* CKD Conversion */
unsigned char operation:3; /* Operation mode */
unsigned char cfw:1; /* Cache fast write */
unsigned char dfw:1; /* DASD fast write */
} __attribute__ ((packed)) attributes;
__u16 blk_size; /* Blocksize */
__u16 fast_write_id;
__u8 ga_additional; /* Global Attributes Additional */
__u8 ga_extended; /* Global Attributes Extended */
struct ch_t beg_ext;
struct ch_t end_ext;
unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
__u8 ep_format; /* Extended Parameter format byte */
__u8 ep_prio; /* Extended Parameter priority I/O byte */
__u8 ep_reserved1; /* Extended Parameter Reserved */
__u8 ep_rec_per_track; /* Number of records on a track */
__u8 ep_reserved[4]; /* Extended Parameter Reserved */
} __attribute__ ((packed));
struct LO_eckd_data {
struct {
unsigned char orientation:2;
unsigned char operation:6;
} __attribute__ ((packed)) operation;
struct {
unsigned char last_bytes_used:1;
unsigned char reserved:6;
unsigned char read_count_suffix:1;
} __attribute__ ((packed)) auxiliary;
__u8 unused;
__u8 count;
struct ch_t seek_addr;
struct chr_t search_arg;
__u8 sector;
__u16 length;
} __attribute__ ((packed));
struct LRE_eckd_data {
struct {
unsigned char orientation:2;
unsigned char operation:6;
} __attribute__ ((packed)) operation;
struct {
unsigned char length_valid:1;
unsigned char length_scope:1;
unsigned char imbedded_ccw_valid:1;
unsigned char check_bytes:2;
unsigned char imbedded_count_valid:1;
unsigned char reserved:1;
unsigned char read_count_suffix:1;
} __attribute__ ((packed)) auxiliary;
__u8 imbedded_ccw;
__u8 count;
struct ch_t seek_addr;
struct chr_t search_arg;
__u8 sector;
__u16 length;
__u8 imbedded_count;
__u8 extended_operation;
__u16 extended_parameter_length;
__u8 extended_parameter[0];
} __attribute__ ((packed));
/* Prefix data for format 0x00 and 0x01 */
struct PFX_eckd_data {
unsigned char format;
struct {
unsigned char define_extent:1;
unsigned char time_stamp:1;
unsigned char verify_base:1;
unsigned char hyper_pav:1;
unsigned char reserved:4;
} __attribute__ ((packed)) validity;
__u8 base_address;
__u8 aux;
__u8 base_lss;
__u8 reserved[7];
struct DE_eckd_data define_extent;
struct LRE_eckd_data locate_record;
} __attribute__ ((packed));
struct dasd_eckd_characteristics {
__u16 cu_type;
struct {
unsigned char support:2;
unsigned char async:1;
unsigned char reserved:1;
unsigned char cache_info:1;
unsigned char model:3;
} __attribute__ ((packed)) cu_model;
__u16 dev_type;
__u8 dev_model;
struct {
unsigned char mult_burst:1;
unsigned char RT_in_LR:1;
unsigned char reserved1:1;
unsigned char RD_IN_LR:1;
unsigned char reserved2:4;
unsigned char reserved3:8;
unsigned char defect_wr:1;
unsigned char XRC_supported:1;
unsigned char reserved4:1;
unsigned char striping:1;
unsigned char reserved5:4;
unsigned char cfw:1;
unsigned char reserved6:2;
unsigned char cache:1;
unsigned char dual_copy:1;
unsigned char dfw:1;
unsigned char reset_alleg:1;
unsigned char sense_down:1;
} __attribute__ ((packed)) facilities;
__u8 dev_class;
__u8 unit_type;
__u16 no_cyl;
__u16 trk_per_cyl;
__u8 sec_per_trk;
__u8 byte_per_track[3];
__u16 home_bytes;
__u8 formula;
union {
struct {
__u8 f1;
__u16 f2;
__u16 f3;
} __attribute__ ((packed)) f_0x01;
struct {
__u8 f1;
__u8 f2;
__u8 f3;
__u8 f4;
__u8 f5;
} __attribute__ ((packed)) f_0x02;
} __attribute__ ((packed)) factors;
__u16 first_alt_trk;
__u16 no_alt_trk;
__u16 first_dia_trk;
__u16 no_dia_trk;
__u16 first_sup_trk;
__u16 no_sup_trk;
__u8 MDR_ID;
__u8 OBR_ID;
__u8 director;
__u8 rd_trk_set;
__u16 max_rec_zero;
__u8 reserved1;
__u8 RWANY_in_LR;
__u8 factor6;
__u8 factor7;
__u8 factor8;
__u8 reserved2[3];
__u8 reserved3[6];
__u32 long_no_cyl;
} __attribute__ ((packed));
/* elements of the configuration data */
struct dasd_ned {
struct {
__u8 identifier:2;
__u8 token_id:1;
__u8 sno_valid:1;
__u8 subst_sno:1;
__u8 recNED:1;
__u8 emuNED:1;
__u8 reserved:1;
} __attribute__ ((packed)) flags;
__u8 descriptor;
__u8 dev_class;
__u8 reserved;
__u8 dev_type[6];
__u8 dev_model[3];
__u8 HDA_manufacturer[3];
__u8 HDA_location[2];
__u8 HDA_seqno[12];
__u8 ID;
__u8 unit_addr;
} __attribute__ ((packed));
struct dasd_sneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 res1;
__u16 format;
__u8 res2[4]; /* byte 4- 7 */
__u8 sua_flags; /* byte 8 */
__u8 base_unit_addr; /* byte 9 */
__u8 res3[22]; /* byte 10-31 */
} __attribute__ ((packed));
struct vd_sneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 res1;
__u16 format;
__u8 res2[4]; /* byte 4- 7 */
__u8 uit[16]; /* byte 8-23 */
__u8 res3[8]; /* byte 24-31 */
} __attribute__ ((packed));
struct dasd_gneq {
struct {
__u8 identifier:2;
__u8 reserved:6;
} __attribute__ ((packed)) flags;
__u8 reserved[5];
struct {
__u8 value:2;
__u8 number:6;
} __attribute__ ((packed)) timeout;
__u8 reserved3;
__u16 subsystemID;
__u8 reserved2[22];
} __attribute__ ((packed));
struct dasd_rssd_features {
char feature[256];
} __attribute__((packed));
struct dasd_rssd_messages {
__u16 length;
__u8 format;
__u8 code;
__u32 message_id;
__u8 flags;
char messages[4087];
} __packed;
struct dasd_cuir_message {
__u16 length;
__u8 format;
__u8 code;
__u32 message_id;
__u8 flags;
__u8 neq_map[3];
__u8 ned_map;
__u8 record_selector;
} __packed;
struct dasd_psf_cuir_response {
__u8 order;
__u8 flags;
__u8 cc;
__u8 chpid;
__u16 device_nr;
__u16 reserved;
__u32 message_id;
__u64 system_id;
__u8 cssid;
__u8 ssid;
} __packed;
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
unsigned char flags;
unsigned char reserved[4];
unsigned char suborder;
unsigned char varies[5];
} __attribute__ ((packed));
/*
* Perform Subsystem Function - Set Subsystem Characteristics
*/
struct dasd_psf_ssc_data {
unsigned char order;
unsigned char flags;
unsigned char cu_type[4];
unsigned char suborder;
unsigned char reserved[59];
} __attribute__((packed));
/*
* some structures and definitions for alias handling
*/
struct dasd_unit_address_configuration {
struct {
char ua_type;
char base_ua;
} unit[256];
} __attribute__((packed));
#define MAX_DEVICES_PER_LCU 256
/* flags on the LCU */
#define NEED_UAC_UPDATE 0x01
#define UPDATE_PENDING 0x02
enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
struct alias_root {
struct list_head serverlist;
spinlock_t lock;
};
struct alias_server {
struct list_head server;
struct dasd_uid uid;
struct list_head lculist;
};
struct summary_unit_check_work_data {
char reason;
struct dasd_device *device;
struct work_struct worker;
};
struct read_uac_work_data {
struct dasd_device *device;
struct delayed_work dwork;
};
struct alias_lcu {
struct list_head lcu;
struct dasd_uid uid;
enum pavtype pav;
char flags;
spinlock_t lock;
struct list_head grouplist;
struct list_head active_devices;
struct list_head inactive_devices;
struct dasd_unit_address_configuration *uac;
struct summary_unit_check_work_data suc_data;
struct read_uac_work_data ruac_data;
struct dasd_ccw_req *rsu_cqr;
struct completion lcu_setup;
};
struct alias_pav_group {
struct list_head group;
struct dasd_uid uid;
struct alias_lcu *lcu;
struct list_head baselist;
struct list_head aliaslist;
struct dasd_device *next;
};
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
struct vd_sneq *vdsneq;
struct dasd_gneq *gneq;
struct eckd_count count_area[5];
int init_cqr_status;
int uses_cdl;
struct attrib_data_t attrib; /* e.g. cache operations */
struct dasd_rssd_features features;
u32 real_cyl;
/* alias managemnet */
struct dasd_uid uid;
struct alias_pav_group *pavgroup;
struct alias_lcu *lcu;
int count;
u32 fcx_max_data;
};
int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
int dasd_alias_add_device(struct dasd_device *);
int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */

View file

@ -0,0 +1,709 @@
/*
* Character device driver for extended error reporting.
*
* Copyright IBM Corp. 2005
* extended error reporting for DASD ECKD devices
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eer):"
/*
* SECTION: the internal buffer
*/
/*
* The internal buffer is meant to store obaque blobs of data, so it does
* not know of higher level concepts like triggers.
* It consists of a number of pages that are used as a ringbuffer. Each data
* blob is stored in a simple record that consists of an integer, which
* contains the size of the following data, and the data bytes themselfes.
*
* To allow for multiple independent readers we create one internal buffer
* each time the device is opened and destroy the buffer when the file is
* closed again. The number of pages used for this buffer is determined by
* the module parmeter eer_pages.
*
* One record can be written to a buffer by using the functions
* - dasd_eer_start_record (one time per record to write the size to the
* buffer and reserve the space for the data)
* - dasd_eer_write_buffer (one or more times per record to write the data)
* The data can be written in several steps but you will have to compute
* the total size up front for the invocation of dasd_eer_start_record.
* If the ringbuffer is full, dasd_eer_start_record will remove the required
* number of old records.
*
* A record is typically read in two steps, first read the integer that
* specifies the size of the following data, then read the data.
* Both can be done by
* - dasd_eer_read_buffer
*
* For all mentioned functions you need to get the bufferlock first and keep
* it until a complete record is written or read.
*
* All information necessary to keep track of an internal buffer is kept in
* a struct eerbuffer. The buffer specific to a file pointer is strored in
* the private_data field of that file. To be able to write data to all
* existing buffers, each buffer is also added to the bufferlist.
* If the user does not want to read a complete record in one go, we have to
* keep track of the rest of the record. residual stores the number of bytes
* that are still to deliver. If the rest of the record is invalidated between
* two reads then residual will be set to -1 so that the next read will fail.
* All entries in the eerbuffer structure are protected with the bufferlock.
* To avoid races between writing to a buffer on the one side and creating
* and destroying buffers on the other side, the bufferlock must also be used
* to protect the bufferlist.
*/
static int eer_pages = 5;
module_param(eer_pages, int, S_IRUGO|S_IWUSR);
struct eerbuffer {
struct list_head list;
char **buffer;
int buffersize;
int buffer_page_count;
int head;
int tail;
int residual;
};
static LIST_HEAD(bufferlist);
static DEFINE_SPINLOCK(bufferlock);
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
/*
* How many free bytes are available on the buffer.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
{
if (eerb->head < eerb->tail)
return eerb->tail - eerb->head - 1;
return eerb->buffersize - eerb->head + eerb->tail -1;
}
/*
* How many bytes of buffer space are used.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
{
if (eerb->head >= eerb->tail)
return eerb->head - eerb->tail;
return eerb->buffersize - eerb->tail + eerb->head;
}
/*
* The dasd_eer_write_buffer function just copies count bytes of data
* to the buffer. Make sure to call dasd_eer_start_record first, to
* make sure that enough free space is available.
* Needs to be called with bufferlock held.
*/
static void dasd_eer_write_buffer(struct eerbuffer *eerb,
char *data, int count)
{
unsigned long headindex,localhead;
unsigned long rest, len;
char *nextdata;
nextdata = data;
rest = count;
while (rest > 0) {
headindex = eerb->head / PAGE_SIZE;
localhead = eerb->head % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localhead);
memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
nextdata += len;
rest -= len;
eerb->head += len;
if (eerb->head == eerb->buffersize)
eerb->head = 0; /* wrap around */
BUG_ON(eerb->head > eerb->buffersize);
}
}
/*
* Needs to be called with bufferlock held.
*/
static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
{
unsigned long tailindex,localtail;
unsigned long rest, len, finalcount;
char *nextdata;
finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
nextdata = data;
rest = finalcount;
while (rest > 0) {
tailindex = eerb->tail / PAGE_SIZE;
localtail = eerb->tail % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localtail);
memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
nextdata += len;
rest -= len;
eerb->tail += len;
if (eerb->tail == eerb->buffersize)
eerb->tail = 0; /* wrap around */
BUG_ON(eerb->tail > eerb->buffersize);
}
return finalcount;
}
/*
* Whenever you want to write a blob of data to the internal buffer you
* have to start by using this function first. It will write the number
* of bytes that will be written to the buffer. If necessary it will remove
* old records to make room for the new one.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
{
int tailcount;
if (count + sizeof(count) > eerb->buffersize)
return -ENOMEM;
while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
if (eerb->residual > 0) {
eerb->tail += eerb->residual;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
eerb->residual = -1;
}
dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
eerb->tail += tailcount;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
}
dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
return 0;
};
/*
* Release pages that are not used anymore.
*/
static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++)
free_page((unsigned long) buf[i]);
}
/*
* Allocate a new set of memory pages.
*/
static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++) {
buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
if (!buf[i]) {
dasd_eer_free_buffer_pages(buf, i);
return -ENOMEM;
}
}
return 0;
}
/*
* SECTION: The extended error reporting functionality
*/
/*
* When a DASD device driver wants to report an error, it calls the
* function dasd_eer_write and gives the respective trigger ID as
* parameter. Currently there are four kinds of triggers:
*
* DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
* DASD_EER_PPRCSUSPEND: PPRC was suspended
* DASD_EER_NOPATH: There is no path to the device left.
* DASD_EER_STATECHANGE: The state of the device has changed.
*
* For the first three triggers all required information can be supplied by
* the caller. For these triggers a record is written by the function
* dasd_eer_write_standard_trigger.
*
* The DASD_EER_STATECHANGE trigger is special since a sense subsystem
* status ccw need to be executed to gather the necessary sense data first.
* The dasd_eer_snss function will queue the SNSS request and the request
* callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
* trigger.
*
* To avoid memory allocations at runtime, the necessary memory is allocated
* when the extended error reporting is enabled for a device (by
* dasd_eer_probe). There is one sense subsystem status request for each
* eer enabled DASD device. The presence of the cqr in device->eer_cqr
* indicates that eer is enable for the device. The use of the snss request
* is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
* that the cqr is currently in use, dasd_eer_snss cannot start a second
* request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
* the SNSS request will check the bit and call dasd_eer_snss again.
*/
#define SNSS_DATA_SIZE 44
#define DASD_EER_BUSID_SIZE 10
struct dasd_eer_header {
__u32 total_size;
__u32 trigger;
__u64 tv_sec;
__u64 tv_usec;
char busid[DASD_EER_BUSID_SIZE];
} __attribute__ ((packed));
/*
* The following function can be used for those triggers that have
* all necessary data available when the function is called.
* If the parameter cqr is not NULL, the chain of requests will be searched
* for valid sense data, and all valid sense data sets will be added to
* the triggers data.
*/
static void dasd_eer_write_standard_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
struct dasd_ccw_req *temp_cqr;
int data_size;
struct timeval tv;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
char *sense;
/* go through cqr chain and count the valid sense data sets */
data_size = 0;
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
if (dasd_get_sense(&temp_cqr->irb))
data_size += 32;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
sense = dasd_get_sense(&temp_cqr->irb);
if (sense)
dasd_eer_write_buffer(eerb, sense, 32);
}
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function writes a DASD_EER_STATECHANGE trigger.
*/
static void dasd_eer_write_snss_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
int data_size;
int snss_rc;
struct timeval tv;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
if (snss_rc)
data_size = 0;
else
data_size = SNSS_DATA_SIZE;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
if (!snss_rc)
dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function is called for all triggers. It calls the appropriate
* function that writes the actual trigger records.
*/
void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
unsigned int id)
{
if (!device->eer_cqr)
return;
switch (id) {
case DASD_EER_FATALERROR:
case DASD_EER_PPRCSUSPEND:
dasd_eer_write_standard_trigger(device, cqr, id);
break;
case DASD_EER_NOPATH:
dasd_eer_write_standard_trigger(device, NULL, id);
break;
case DASD_EER_STATECHANGE:
dasd_eer_write_snss_trigger(device, cqr, id);
break;
default: /* unknown trigger, so we write it without any sense data */
dasd_eer_write_standard_trigger(device, NULL, id);
break;
}
}
EXPORT_SYMBOL(dasd_eer_write);
/*
* Start a sense subsystem status request.
* Needs to be called with the device held.
*/
void dasd_eer_snss(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
cqr = device->eer_cqr;
if (!cqr) /* Device not eer enabled. */
return;
if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
/* Sense subsystem status request in use. */
set_bit(DASD_FLAG_EER_SNSS, &device->flags);
return;
}
/* cdev is already locked, can't use dasd_add_request_head */
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
dasd_schedule_device_bh(device);
}
/*
* Callback function for use with sense subsystem status request.
*/
static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr == cqr) {
clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
/* Another SNSS has been requested in the meantime. */
dasd_eer_snss(device);
cqr = NULL;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
/*
* Extended error recovery has been switched off while
* the SNSS request was running. It could even have
* been switched off and on again in which case there
* is a new ccw in device->eer_cqr. Free the "old"
* snss request now.
*/
dasd_kfree_request(cqr, device);
}
/*
* Enable error reporting on a given device.
*/
int dasd_eer_enable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
struct ccw1 *ccw;
if (device->eer_cqr)
return 0;
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
if (IS_ERR(cqr))
return -ENOMEM;
cqr->startdev = device;
cqr->retries = 255;
cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
ccw->cda = (__u32)(addr_t) cqr->data;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->eer_cqr) {
device->eer_cqr = cqr;
cqr = NULL;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
dasd_kfree_request(cqr, device);
return 0;
}
/*
* Disable error reporting on a given device.
*/
void dasd_eer_disable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
int in_use;
if (!device->eer_cqr)
return;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr = device->eer_cqr;
device->eer_cqr = NULL;
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr && !in_use)
dasd_kfree_request(cqr, device);
}
/*
* SECTION: the device operations
*/
/*
* On the one side we need a lock to access our internal buffer, on the
* other side a copy_to_user can sleep. So we need to copy the data we have
* to transfer in a readbuffer, which is protected by the readbuffer_mutex.
*/
static char readbuffer[PAGE_SIZE];
static DEFINE_MUTEX(readbuffer_mutex);
static int dasd_eer_open(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
kfree(eerb);
DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
return -ENOMEM;
}
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
eerb->buffer_page_count)) {
kfree(eerb->buffer);
kfree(eerb);
return -ENOMEM;
}
filp->private_data = eerb;
spin_lock_irqsave(&bufferlock, flags);
list_add(&eerb->list, &bufferlist);
spin_unlock_irqrestore(&bufferlock, flags);
return nonseekable_open(inp,filp);
}
static int dasd_eer_close(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = (struct eerbuffer *) filp->private_data;
spin_lock_irqsave(&bufferlock, flags);
list_del(&eerb->list);
spin_unlock_irqrestore(&bufferlock, flags);
dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
kfree(eerb->buffer);
kfree(eerb);
return 0;
}
static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
int tc,rc;
int tailcount,effective_count;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
if (eerb->residual < 0) { /* the remainder of this record */
/* has been deleted */
eerb->residual = 0;
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
return -EIO;
} else if (eerb->residual > 0) {
/* OK we still have a second half of a record to deliver */
effective_count = min(eerb->residual, (int) count);
eerb->residual -= effective_count;
} else {
tc = 0;
while (!tc) {
tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
if (!tc) {
/* no data available */
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
rc = wait_event_interruptible(
dasd_eer_read_wait_queue,
eerb->head != eerb->tail);
if (rc)
return rc;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
}
}
WARN_ON(tc != sizeof(tailcount));
effective_count = min(tailcount,(int)count);
eerb->residual = tailcount - effective_count;
}
tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
WARN_ON(tc != effective_count);
spin_unlock_irqrestore(&bufferlock, flags);
if (copy_to_user(buf, readbuffer, effective_count)) {
mutex_unlock(&readbuffer_mutex);
return -EFAULT;
}
mutex_unlock(&readbuffer_mutex);
return effective_count;
}
static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
{
unsigned int mask;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
spin_lock_irqsave(&bufferlock, flags);
if (eerb->head != eerb->tail)
mask = POLLIN | POLLRDNORM ;
else
mask = 0;
spin_unlock_irqrestore(&bufferlock, flags);
return mask;
}
static const struct file_operations dasd_eer_fops = {
.open = &dasd_eer_open,
.release = &dasd_eer_close,
.read = &dasd_eer_read,
.poll = &dasd_eer_poll,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice *dasd_eer_dev = NULL;
int __init dasd_eer_init(void)
{
int rc;
dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
if (!dasd_eer_dev)
return -ENOMEM;
dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
dasd_eer_dev->name = "dasd_eer";
dasd_eer_dev->fops = &dasd_eer_fops;
rc = misc_register(dasd_eer_dev);
if (rc) {
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
"register misc device");
return rc;
}
return 0;
}
void dasd_eer_exit(void)
{
if (dasd_eer_dev) {
misc_deregister(dasd_eer_dev);
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
}
}

View file

@ -0,0 +1,202 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2001
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/init.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_erp:"
#include "dasd_int.h"
struct dasd_ccw_req *
dasd_alloc_erp_request(char *magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
struct dasd_ccw_req *cqr;
char *data;
int size;
/* Sanity checks */
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
spin_lock_irqsave(&device->mem_lock, flags);
cqr = (struct dasd_ccw_req *)
dasd_alloc_chunk(&device->erp_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(struct dasd_ccw_req));
INIT_LIST_HEAD(&cqr->devlist);
INIT_LIST_HEAD(&cqr->blocklist);
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = (struct ccw1 *) data;
data += cplength*sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
strncpy((char *) &cqr->magic, magic, 4);
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
void
dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->erp_chunks, cqr);
spin_unlock_irqrestore(&device->mem_lock, flags);
atomic_dec(&device->ref_count);
}
/*
* dasd_default_erp_action just retries the current cqr
*/
struct dasd_ccw_req *
dasd_default_erp_action(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
device = cqr->startdev;
/* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) {
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end dasd_default_erp_action */
/*
* DESCRIPTION
* Frees all ERPs of the current ERP Chain and set the status
* of the original CQR either to DASD_CQR_DONE if ERP was successful
* or to DASD_CQR_FAILED if ERP was NOT successful.
* NOTE: This function is only called if no discipline postaction
* is available
*
* PARAMETER
* erp current erp_head
*
* RETURN VALUES
* cqr pointer to the original CQR
*/
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{
int success;
unsigned long long startclk, stopclk;
struct dasd_device *startdev;
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
success = cqr->status == DASD_CQR_DONE;
startclk = cqr->startclk;
stopclk = cqr->stopclk;
startdev = cqr->startdev;
/* free all ERPs - but NOT the original cqr */
while (cqr->refers != NULL) {
struct dasd_ccw_req *refers;
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
/* set corresponding status to original cqr */
cqr->startclk = startclk;
cqr->stopclk = stopclk;
cqr->startdev = startdev;
if (success)
cqr->status = DASD_CQR_DONE;
else {
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end default_erp_postaction */
void
dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
"A timeout error occurred for cqr %p", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
"A transport error occurred for cqr %p", cqr);
return;
}
/* dump sense data */
if (device->discipline && device->discipline->dump_sense)
device->discipline->dump_sense(device, cqr, irb);
}
void
dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
/* dump sense data to s390 debugfeature*/
if (device->discipline && device->discipline->dump_sense_dbf)
device->discipline->dump_sense_dbf(device, irb, "log");
}
EXPORT_SYMBOL(dasd_log_sense_dbf);
EXPORT_SYMBOL(dasd_default_erp_action);
EXPORT_SYMBOL(dasd_default_erp_postaction);
EXPORT_SYMBOL(dasd_alloc_erp_request);
EXPORT_SYMBOL(dasd_free_erp_request);
EXPORT_SYMBOL(dasd_log_sense);

View file

@ -0,0 +1,635 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <asm/debug.h>
#include <linux/slab.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
#include "dasd_fba.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(fba):"
#define FBA_DEFAULT_RETRIES 32
#define DASD_FBA_CCW_WRITE 0x41
#define DASD_FBA_CCW_READ 0x42
#define DASD_FBA_CCW_LOCATE 0x43
#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
struct dasd_fba_private {
struct dasd_fba_characteristics rdc_data;
};
static struct ccw_device_id dasd_fba_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
static struct ccw_driver dasd_fba_driver; /* see below */
static int
dasd_fba_probe(struct ccw_device *cdev)
{
return dasd_generic_probe(cdev, &dasd_fba_discipline);
}
static int
dasd_fba_set_online(struct ccw_device *cdev)
{
return dasd_generic_set_online(cdev, &dasd_fba_discipline);
}
static struct ccw_driver dasd_fba_driver = {
.driver = {
.name = "dasd-fba",
.owner = THIS_MODULE,
},
.ids = dasd_fba_ids,
.probe = dasd_fba_probe,
.remove = dasd_generic_remove,
.set_offline = dasd_generic_set_offline,
.set_online = dasd_fba_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
.int_class = IRQIO_DAS,
};
static void
define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
int blksize, int beg, int nr)
{
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = (__u32) __pa(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
else if (rw == READ)
(data->mask).perm = 0x1;
else
data->mask.perm = 0x2;
data->blk_size = blksize;
data->ext_loc = beg;
data->ext_end = nr - 1;
}
static void
locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
int block_nr, int block_ct)
{
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = (__u32) __pa(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
else if (rw == READ)
data->operation.cmd = 0x6;
else
data->operation.cmd = 0x8;
data->blk_nr = block_nr;
data->blk_ct = block_ct;
}
static int
dasd_fba_check_characteristics(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_fba_private *private;
struct ccw_device *cdev = device->cdev;
int rc;
int readonly;
private = (struct dasd_fba_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD "
"data failed\n");
return -ENOMEM;
}
device->private = (void *) private;
} else {
memset(private, 0, sizeof(*private));
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
"dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
"characteristics returned error %d", rc);
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
return rc;
}
device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES;
device->path_data.opm = LPM_ANYPATH;
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
dev_info(&device->cdev->dev,
"New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
"and %d B/blk%s\n",
cdev->id.dev_type,
cdev->id.dev_model,
cdev->id.cu_type,
cdev->id.cu_model,
((private->rdc_data.blk_bdsa *
(private->rdc_data.blk_size >> 9)) >> 11),
private->rdc_data.blk_size,
readonly ? ", read-only device" : "");
return 0;
}
static int dasd_fba_do_analysis(struct dasd_block *block)
{
struct dasd_fba_private *private;
int sb, rc;
private = (struct dasd_fba_private *) block->base->private;
rc = dasd_check_blocksize(private->rdc_data.blk_size);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
private->rdc_data.blk_size);
return rc;
}
block->blocks = private->rdc_data.blk_bdsa;
block->bp_block = private->rdc_data.blk_size;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
block->s2b_shift++;
return 0;
}
static int dasd_fba_fill_geometry(struct dasd_block *block,
struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_fba_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
{
if (cqr->function == dasd_default_erp_action)
return dasd_default_erp_postaction;
DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
cqr->function);
return NULL;
}
static void dasd_fba_check_for_device_change(struct dasd_device *device,
struct dasd_ccw_req *cqr,
struct irb *irb)
{
char mask;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.cmd.dstat & mask) == mask)
dasd_generic_handle_state_change(device);
};
static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_fba_private *private;
unsigned long *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
private = (struct dasd_fba_private *) block->base->private;
if (rq_data_dir(req) == READ) {
cmd = DASD_FBA_CCW_READ;
} else if (rq_data_dir(req) == WRITE) {
cmd = DASD_FBA_CCW_WRITE;
} else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len / blksize;
#endif
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* 1x define extent + 1x locate record + number of blocks */
cplength = 2 + count;
/* 1x define extent + 1x locate record */
datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
cidaw * sizeof(unsigned long);
/*
* Find out number of additional locate record ccws if the device
* can't do data chaining.
*/
if (private->rdc_data.mode.bits.data_chain == 0) {
cplength += count - 1;
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
dst = copy + bv.bv_offset;
}
for (off = 0; off < bv.bv_len; off += blksize) {
/* Locate record for stupid devices. */
if (private->rdc_data.mode.bits.data_chain == 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw, LO_data++,
rq_data_dir(req),
recid - first_rec, 1);
ccw->flags = CCW_FLAG_CC;
ccw++;
} else {
if (recid > first_rec)
ccw[-1].flags |= CCW_FLAG_DC;
else
ccw[-1].flags |= CCW_FLAG_CC;
}
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
ccw->cda = (__u32)(addr_t) idaws;
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
ccw->cda = (__u32)(addr_t) dst;
ccw->flags = 0;
}
ccw++;
dst += blksize;
recid++;
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = memdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, off;
int status;
if (!dasd_page_cache)
goto out;
private = (struct dasd_fba_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
cda = *((char **)((addr_t) ccw->cda));
else
cda = (char *)((addr_t) ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
dst = NULL;
}
ccw++;
}
}
out:
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
};
static int
dasd_fba_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
info->label_block = 1;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof(struct dasd_fba_characteristics);
memcpy(info->characteristics,
&((struct dasd_fba_private *) device->private)->rdc_data,
sizeof (struct dasd_fba_characteristics));
info->confdata_size = 0;
return 0;
}
static void
dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
sense = (u64 *) dasd_get_sense(irb);
if (sense) {
DBF_DEV_EVENT(DBF_EMERG, device,
"%s: %s %02x%02x%02x %016llx %016llx %016llx "
"%016llx", reason,
scsw_is_tm(&irb->scsw) ? "t" : "c",
scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
scsw_dstat(&irb->scsw), sense[0], sense[1],
sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
"SORRY - NO VALID SENSE AVAILABLE\n");
}
}
static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data");
return;
}
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
irb->ecw[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
} else {
len += sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
while (act <= end) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
printk(KERN_ERR "%s", page);
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
while (act <= last) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
if (len > 0)
printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
/*
* max_blocks is dependent on the amount of storage that is available
* in the static io buffer for each device. Currently each device has
* 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
* 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
* up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
* addition we have one define extent ccw + 16 bytes of data and a
* locate record ccw for each block (stupid devices!) + 16 bytes of data.
* That makes:
* (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
* We want to fit two into the available memory so that we can immediately
* start the next request if one finishes off. That makes 100.1 blocks
* for one request. Give a little safety and the result is 96.
*/
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.max_blocks = 96,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_fba_handle_terminated_request,
.erp_action = dasd_fba_erp_action,
.erp_postaction = dasd_fba_erp_postaction,
.check_for_device_change = dasd_fba_check_for_device_change,
.build_cp = dasd_fba_build_cp,
.free_cp = dasd_fba_free_cp,
.dump_sense = dasd_fba_dump_sense,
.dump_sense_dbf = dasd_fba_dump_sense_dbf,
.fill_info = dasd_fba_fill_info,
};
static int __init
dasd_fba_init(void)
{
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4);
ret = ccw_driver_register(&dasd_fba_driver);
if (!ret)
wait_for_device_probe();
return ret;
}
static void __exit
dasd_fba_cleanup(void)
{
ccw_driver_unregister(&dasd_fba_driver);
}
module_init(dasd_fba_init);
module_exit(dasd_fba_cleanup);

View file

@ -0,0 +1,71 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Coypright IBM Corp. 1999, 2000
*
*/
#ifndef DASD_FBA_H
#define DASD_FBA_H
struct DE_fba_data {
struct {
unsigned char perm:2; /* Permissions on this extent */
unsigned char zero:2; /* Must be zero */
unsigned char da:1; /* usually zero */
unsigned char diag:1; /* allow diagnose */
unsigned char zero2:2; /* zero */
} __attribute__ ((packed)) mask;
__u8 zero; /* Must be zero */
__u16 blk_size; /* Blocksize */
__u32 ext_loc; /* Extent locator */
__u32 ext_beg; /* logical number of block 0 in extent */
__u32 ext_end; /* logocal number of last block in extent */
} __attribute__ ((packed));
struct LO_fba_data {
struct {
unsigned char zero:4;
unsigned char cmd:4;
} __attribute__ ((packed)) operation;
__u8 auxiliary;
__u16 blk_ct;
__u32 blk_nr;
} __attribute__ ((packed));
struct dasd_fba_characteristics {
union {
__u8 c;
struct {
unsigned char reserved:1;
unsigned char overrunnable:1;
unsigned char burst_byte:1;
unsigned char data_chain:1;
unsigned char zeros:4;
} __attribute__ ((packed)) bits;
} __attribute__ ((packed)) mode;
union {
__u8 c;
struct {
unsigned char zero0:1;
unsigned char removable:1;
unsigned char shared:1;
unsigned char zero1:1;
unsigned char mam:1;
unsigned char zeros:3;
} __attribute__ ((packed)) bits;
} __attribute__ ((packed)) features;
__u8 dev_class;
__u8 unit_type;
__u16 blk_size;
__u32 blk_per_cycl;
__u32 blk_per_bound;
__u32 blk_bdsa;
__u32 reserved0;
__u16 reserved1;
__u16 blk_ce;
__u32 reserved2;
__u16 reserved3;
} __attribute__ ((packed));
#endif /* DASD_FBA_H */

View file

@ -0,0 +1,178 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2001
*
* gendisk related functions for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_gendisk:"
#include "dasd_int.h"
/*
* Allocate and register gendisk structure for device.
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
struct gendisk *gdp;
struct dasd_device *base;
int len;
/* Make sure the minor for this device exists. */
base = block->base;
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
gdp = alloc_disk(1 << DASD_PARTN_BITS);
if (!gdp)
return -ENOMEM;
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
gdp->driverfs_dev = &base->cdev->dev;
/*
* Set device name.
* dasda - dasdz : 26 devices
* dasdaa - dasdzz : 676 devices, added up = 702
* dasdaaa - dasdzzz : 17576 devices, added up = 18278
* dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
*/
len = sprintf(gdp->disk_name, "dasd");
if (base->devindex > 25) {
if (base->devindex > 701) {
if (base->devindex > 18277)
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-18278)
/17576)%26));
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-702)/676)%26));
}
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-26)/26)%26));
}
len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
if (base->features & DASD_FEATURE_READONLY ||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
set_disk_ro(gdp, 1);
dasd_add_link_to_gendisk(gdp, base);
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
add_disk(block->gdp);
return 0;
}
/*
* Unregister and free gendisk structure for device.
*/
void dasd_gendisk_free(struct dasd_block *block)
{
if (block->gdp) {
del_gendisk(block->gdp);
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
}
}
/*
* Trigger a partition detection.
*/
int dasd_scan_partitions(struct dasd_block *block)
{
struct block_device *bdev;
bdev = bdget_disk(block->gdp, 0);
if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
return -ENODEV;
/*
* See fs/partition/check.c:register_disk,rescan_partitions
* Can't call rescan_partitions directly. Use ioctl.
*/
ioctl_by_bdev(bdev, BLKRRPART, 0);
/*
* Since the matching blkdev_put call to the blkdev_get in
* this function is not called before dasd_destroy_partitions
* the offline open_count limit needs to be increased from
* 0 to 1. This is done by setting device->bdev (see
* dasd_generic_set_offline). As long as the partition
* detection is running no offline should be allowed. That
* is why the assignment to device->bdev is done AFTER
* the BLKRRPART ioctl.
*/
block->bdev = bdev;
return 0;
}
/*
* Remove all inodes in the system for a device, delete the
* partitions and make device unusable by setting its size to zero.
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
/* The two structs have 168/176 byte on 31/64 bit. */
struct blkpg_partition bpart;
struct blkpg_ioctl_arg barg;
struct block_device *bdev;
/*
* Get the bdev pointer from the device structure and clear
* device->bdev to lower the offline open_count limit again.
*/
bdev = block->bdev;
block->bdev = NULL;
/*
* See fs/partition/check.c:delete_partition
* Can't call delete_partitions directly. Use ioctl.
* The ioctl also does locking and invalidation.
*/
memset(&bpart, 0, sizeof(struct blkpg_partition));
memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
barg.data = (void __force __user *) &bpart;
barg.op = BLKPG_DEL_PARTITION;
for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
invalidate_partition(block->gdp, 0);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
set_capacity(block->gdp, 0);
}
int dasd_gendisk_init(void)
{
int rc;
/* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) {
pr_warning("Registering the device driver with major number "
"%d failed\n", DASD_MAJOR);
return rc;
}
return 0;
}
void dasd_gendisk_exit(void)
{
unregister_blkdev(DASD_MAJOR, "dasd");
}

View file

@ -0,0 +1,815 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
*/
#ifndef DASD_INT_H
#define DASD_INT_H
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
/*
* States a dasd device can have:
* new: the dasd_device structure is allocated.
* known: the discipline for the device is identified.
* basic: the device can do basic i/o.
* unfmt: the device could not be analyzed (format is unknown).
* ready: partition detection is done and the device is can do block io.
* online: the device accepts requests from the block device queue.
*
* Things to do for startup state transitions:
* new -> known: find discipline for the device and create devfs entries.
* known -> basic: request irq line for the device.
* basic -> ready: do the initial analysis, e.g. format detection,
* do block device setup and detect partitions.
* ready -> online: schedule the device tasklet.
* Things to do for shutdown state transitions:
* online -> ready: just set the new device state.
* ready -> basic: flush requests from the block device layer, clear
* partition information and reset format information.
* basic -> known: terminate all requests and free irq.
* known -> new: remove devfs entries and forget discipline.
*/
#define DASD_STATE_NEW 0
#define DASD_STATE_KNOWN 1
#define DASD_STATE_BASIC 2
#define DASD_STATE_UNFMT 3
#define DASD_STATE_READY 4
#define DASD_STATE_ONLINE 5
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
#include <asm/ccwdev.h>
#include <linux/workqueue.h>
#include <asm/debug.h>
#include <asm/dasd.h>
#include <asm/idals.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
#define DASD_DIAG_MAGIC 0xC4C9C1C7
#define DASD_FBA_MAGIC 0xC6C2C140
/*
* SECTION: Type definitions
*/
struct dasd_device;
struct dasd_block;
/* BIT DEFINITIONS FOR SENSE DATA */
#define DASD_SENSE_BIT_0 0x80
#define DASD_SENSE_BIT_1 0x40
#define DASD_SENSE_BIT_2 0x20
#define DASD_SENSE_BIT_3 0x10
/* BIT DEFINITIONS FOR SIM SENSE */
#define DASD_SIM_SENSE 0x0F
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
/* lock class for nested cdev lock */
#define CDEV_NESTED_FIRST 1
#define CDEV_NESTED_SECOND 2
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
do { \
debug_sprintf_event(d_device->debug_area, \
d_level, \
d_str "\n", \
d_data); \
} while(0)
#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
do { \
debug_sprintf_exception(d_device->debug_area, \
d_level, \
d_str "\n", \
d_data); \
} while(0)
#define DBF_EVENT(d_level, d_str, d_data...)\
do { \
debug_sprintf_event(dasd_debug_area, \
d_level,\
d_str "\n", \
d_data); \
} while(0)
#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
do { \
struct ccw_dev_id __dev_id; \
ccw_device_get_id(d_cdev, &__dev_id); \
debug_sprintf_event(dasd_debug_area, \
d_level, \
"0.%x.%04x " d_str "\n", \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
#define DBF_EXC(d_level, d_str, d_data...)\
do { \
debug_sprintf_exception(dasd_debug_area, \
d_level,\
d_str "\n", \
d_data); \
} while(0)
/* limit size for an errorstring */
#define ERRORLENGTH 30
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
#define DBF_CRIT 2 /* critical conditions */
#define DBF_ERR 3 /* error conditions */
#define DBF_WARNING 4 /* warning conditions */
#define DBF_NOTICE 5 /* normal but significant condition */
#define DBF_INFO 6 /* informational */
#define DBF_DEBUG 6 /* debug-level messages */
/* messages to be written via klogd and dbf */
#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
dev_name(&d_device->cdev->dev), d_args); \
DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
} while(0)
#define MESSAGE(d_loglevel,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
DBF_EVENT(DBF_ALERT, d_string, d_args); \
} while(0)
/* messages to be written via klogd only */
#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
dev_name(&d_device->cdev->dev), d_args); \
} while(0)
#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
} while(0)
struct dasd_ccw_req {
unsigned int magic; /* Eye catcher */
struct list_head devlist; /* for dasd_device request queue */
struct list_head blocklist; /* for dasd_block request queue */
/* Where to execute what... */
struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */
struct dasd_device *basedev; /* base device if no block->base */
void *cpaddr; /* address of ccw or tcw */
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */
short retries; /* A retry counter */
unsigned long flags; /* flags of this request */
/* ... and how */
unsigned long starttime; /* jiffies time of request start */
unsigned long expires; /* expiration period in jiffies */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
/* these are important for recovering erroneous requests */
int intrc; /* internal error, e.g. from start_IO */
struct irb irb; /* device status in case of an error */
struct dasd_ccw_req *refers; /* ERP-chain queueing. */
void *function; /* originating ERP action */
/* these are for statistics only */
unsigned long long buildclk; /* TOD-clock of request generation */
unsigned long long startclk; /* TOD-clock of request start */
unsigned long long stopclk; /* TOD-clock of request interrupt */
unsigned long long endclk; /* TOD-clock of request termination */
/* Callback that is called after reaching final status. */
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
};
/*
* dasd_ccw_req -> status can be:
*/
#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
#define DASD_CQR_DONE 0x01 /* request is completed successfully */
#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
#define DASD_CQR_FAILED 0x04 /* request is finally failed */
#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
#define DASD_CQR_ERROR 0x82 /* request is completed with error */
#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
#define DASD_CQR_CLEARED 0x84 /* request was cleared */
#define DASD_CQR_SUCCESS 0x85 /* request was successful */
/* default expiration time*/
#define DASD_EXPIRES 300
#define DASD_EXPIRES_MAX 40000000
#define DASD_RETRIES 256
#define DASD_RETRIES_MAX 32768
/* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
* stolen. Should not be combined with
* DASD_CQR_FLAGS_USE_ERP
*/
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
* Unique identifier for dasd device.
*/
#define UA_NOT_CONFIGURED 0x00
#define UA_BASE_DEVICE 0x01
#define UA_BASE_PAV_ALIAS 0x02
#define UA_HYPER_PAV_ALIAS 0x03
struct dasd_uid {
__u8 type;
char vendor[4];
char serial[15];
__u16 ssid;
__u8 real_unit_addr;
__u8 base_unit_addr;
char vduit[33];
};
/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
* no, currently we are not planning to reimplement the driver in C++
*/
struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
int max_blocks; /* maximum number of blocks to be chained */
struct list_head list; /* used for list of disciplines */
/*
* Device recognition functions. check_device is used to verify
* the sense data and the information returned by read device
* characteristics. It returns 0 if the discipline can be used
* for the device in question. uncheck_device is called during
* device shutdown to deregister a device from its discipline.
*/
int (*check_device) (struct dasd_device *);
void (*uncheck_device) (struct dasd_device *);
/*
* do_analysis is used in the step from device state "basic" to
* state "accept". It returns 0 if the device can be made ready,
* it returns -EMEDIUMTYPE if the device can't be made ready or
* -EAGAIN if do_analysis started a ccw that needs to complete
* before the analysis may be repeated.
*/
int (*do_analysis) (struct dasd_block *);
/*
* This function is called, when new paths become available.
* Disciplins may use this callback to do necessary setup work,
* e.g. verify that new path is compatible with the current
* configuration.
*/
int (*verify_path)(struct dasd_device *, __u8);
/*
* Last things to do when a device is set online, and first things
* when it is set offline.
*/
int (*basic_to_ready) (struct dasd_device *);
int (*online_to_ready) (struct dasd_device *);
int (*basic_to_known)(struct dasd_device *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
* returns a ccw chain to be used to format the device.
* handle_terminated_request allows to examine a cqr and prepare
* it for retry.
*/
struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
struct dasd_block *,
struct request *);
int (*start_IO) (struct dasd_ccw_req *);
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
int (*format_device) (struct dasd_device *,
struct format_data_t *, int enable_pav);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
* Error recovery functions. examine_error() returns a value that
* indicates what to do for an error condition. If examine_error()
* returns 'dasd_era_recover' erp_action() is called to create a
* special error recovery ccw. erp_postaction() is called after
* an error recovery ccw has finished its execution. dump_sense
* is called for every error condition to print the sense data
* to the console.
*/
dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
struct irb *);
void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
void (*check_for_device_change) (struct dasd_device *,
struct dasd_ccw_req *,
struct irb *);
/* i/o control functions. */
int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
/* reload device after state change */
int (*reload) (struct dasd_device *);
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
int (*check_attention)(struct dasd_device *, __u8);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
* eer pointer) is freed. The error reporting module needs to do all necessary
* cleanup steps.
* The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
*/
#define DASD_EER_DISABLE 0
#define DASD_EER_TRIGGER 1
/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
#define DASD_EER_FATALERROR 1
#define DASD_EER_NOPATH 2
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
struct dasd_path {
__u8 opm;
__u8 tbvpm;
__u8 ppm;
__u8 npm;
/* paths that are not used because of a special condition */
__u8 cablepm; /* miss-cabled */
__u8 hpfpm; /* the HPF requirements of the other paths are not met */
__u8 cuirpm; /* CUIR varied offline */
};
struct dasd_profile_info {
/* legacy part of profile data, as in dasd_profile_info_t */
unsigned int dasd_io_reqs; /* number of requests processed */
unsigned int dasd_io_sects; /* number of sectors processed */
unsigned int dasd_io_secs[32]; /* histogram of request's sizes */
unsigned int dasd_io_times[32]; /* histogram of requests's times */
unsigned int dasd_io_timps[32]; /* h. of requests's times per sector */
unsigned int dasd_io_time1[32]; /* hist. of time from build to start */
unsigned int dasd_io_time2[32]; /* hist. of time from start to irq */
unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
unsigned int dasd_io_time3[32]; /* hist. of time from irq to end */
unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
/* new data */
struct timespec starttod; /* time of start or last reset */
unsigned int dasd_io_alias; /* requests using an alias */
unsigned int dasd_io_tpm; /* requests using transport mode */
unsigned int dasd_read_reqs; /* total number of read requests */
unsigned int dasd_read_sects; /* total number read sectors */
unsigned int dasd_read_alias; /* read request using an alias */
unsigned int dasd_read_tpm; /* read requests in transport mode */
unsigned int dasd_read_secs[32]; /* histogram of request's sizes */
unsigned int dasd_read_times[32]; /* histogram of requests's times */
unsigned int dasd_read_time1[32]; /* hist. time from build to start */
unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */
unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */
unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
};
struct dasd_profile {
struct dentry *dentry;
struct dasd_profile_info *data;
spinlock_t lock;
};
struct dasd_device {
/* Block device stuff. */
struct dasd_block *block;
unsigned int devindex;
unsigned long flags; /* per device flags */
unsigned short features; /* copy of devmap-features (read-only!) */
/* extended error reporting stuff (eer) */
struct dasd_ccw_req *eer_cqr;
/* Device discipline stuff. */
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
char *private;
struct dasd_path path_data;
/* Device state and target state. */
int state, target;
struct mutex state_mutex;
int stopped; /* device (ccw_device_start) was stopped */
/* reference count. */
atomic_t ref_count;
/* ccw queue and memory for static ccw/erp buffers. */
struct list_head ccw_queue;
spinlock_t mem_lock;
void *ccw_mem;
void *erp_mem;
struct list_head ccw_chunks;
struct list_head erp_chunks;
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
struct work_struct reload_device;
struct work_struct kick_validate;
struct timer_list timer;
debug_info_t *debug_area;
struct ccw_device *cdev;
/* hook for alias management */
struct list_head alias_list;
/* default expiration time in s */
unsigned long default_expires;
unsigned long default_retries;
unsigned long blk_timeout;
struct dentry *debugfs_dentry;
struct dasd_profile profile;
};
struct dasd_block {
/* Block device stuff. */
struct gendisk *gdp;
struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct block_device *bdev;
atomic_t open_count;
unsigned long long blocks; /* size of volume in blocks */
unsigned int bp_block; /* bytes per block */
unsigned int s2b_shift; /* log2 (bp_block/512) */
struct dasd_device *base;
struct list_head ccw_queue;
spinlock_t queue_lock;
atomic_t tasklet_scheduled;
struct tasklet_struct tasklet;
struct timer_list timer;
struct dentry *debugfs_dentry;
struct dasd_profile profile;
};
struct dasd_attention_data {
struct dasd_device *device;
__u8 lpum;
};
/* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
#define DASD_STOPPED_PENDING 4 /* long busy */
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
#define DASD_STOPPED_SU 16 /* summary unit check handling */
#define DASD_STOPPED_PM 32 /* pm state transition */
#define DASD_UNRESUMED_PM 64 /* pm resume failed state */
/* per device flags */
#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
#define DASD_FLAG_DEVICE_RO 6 /* The device itself is read-only. Don't
* confuse this with the user specified
* read-only feature.
*/
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
#define DASD_SLEEPON_START_TAG ((void *) 1)
#define DASD_SLEEPON_END_TAG ((void *) 2)
void dasd_put_device_wake(struct dasd_device *);
/*
* Reference count inliners
*/
static inline void
dasd_get_device(struct dasd_device *device)
{
atomic_inc(&device->ref_count);
}
static inline void
dasd_put_device(struct dasd_device *device)
{
if (atomic_dec_return(&device->ref_count) == 0)
dasd_put_device_wake(device);
}
/*
* The static memory in ccw_mem and erp_mem is managed by a sorted
* list of free memory chunks.
*/
struct dasd_mchunk
{
struct list_head list;
unsigned long size;
} __attribute__ ((aligned(8)));
static inline void
dasd_init_chunklist(struct list_head *chunk_list, void *mem,
unsigned long size)
{
struct dasd_mchunk *chunk;
INIT_LIST_HEAD(chunk_list);
chunk = (struct dasd_mchunk *) mem;
chunk->size = size - sizeof(struct dasd_mchunk);
list_add(&chunk->list, chunk_list);
}
static inline void *
dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
{
struct dasd_mchunk *chunk, *tmp;
size = (size + 7L) & -8L;
list_for_each_entry(chunk, chunk_list, list) {
if (chunk->size < size)
continue;
if (chunk->size > size + sizeof(struct dasd_mchunk)) {
char *endaddr = (char *) (chunk + 1) + chunk->size;
tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
tmp->size = size;
chunk->size -= size + sizeof(struct dasd_mchunk);
chunk = tmp;
} else
list_del(&chunk->list);
return (void *) (chunk + 1);
}
return NULL;
}
static inline void
dasd_free_chunk(struct list_head *chunk_list, void *mem)
{
struct dasd_mchunk *chunk, *tmp;
struct list_head *p, *left;
chunk = (struct dasd_mchunk *)
((char *) mem - sizeof(struct dasd_mchunk));
/* Find out the left neighbour in chunk_list. */
left = chunk_list;
list_for_each(p, chunk_list) {
if (list_entry(p, struct dasd_mchunk, list) > chunk)
break;
left = p;
}
/* Try to merge with right neighbour = next element from left. */
if (left->next != chunk_list) {
tmp = list_entry(left->next, struct dasd_mchunk, list);
if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
list_del(&tmp->list);
chunk->size += tmp->size + sizeof(struct dasd_mchunk);
}
}
/* Try to merge with left neighbour. */
if (left != chunk_list) {
tmp = list_entry(left, struct dasd_mchunk, list);
if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
tmp->size += chunk->size + sizeof(struct dasd_mchunk);
return;
}
}
__list_add(&chunk->list, left, left->next);
}
/*
* Check if bsize is in { 512, 1024, 2048, 4096 }
*/
static inline int
dasd_check_blocksize(int bsize)
{
if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
return -EMEDIUMTYPE;
return 0;
}
/* externals in dasd.c */
#define DASD_PROFILE_OFF 0
#define DASD_PROFILE_ON 1
#define DASD_PROFILE_GLOBAL_ONLY 2
extern debug_info_t *dasd_debug_area;
extern struct dasd_profile_info dasd_global_profile_data;
extern unsigned int dasd_global_profile_level;
extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(int , int, int, struct dasd_device *);
struct dasd_ccw_req *
dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
static inline int
dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
{
return set_normalized_cda(ccw, cda);
}
struct dasd_device *dasd_alloc_device(void);
void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *);
enum blk_eh_timer_return dasd_times_out(struct request *req);
void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
void dasd_reload_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
int dasd_start_IO(struct dasd_ccw_req *);
int dasd_term_IO(struct dasd_ccw_req *);
void dasd_schedule_device_bh(struct dasd_device *);
void dasd_schedule_block_bh(struct dasd_block *);
int dasd_sleep_on(struct dasd_ccw_req *);
int dasd_sleep_on_queue(struct list_head *);
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
void dasd_device_set_timer(struct dasd_device *, int);
void dasd_device_clear_timer(struct dasd_device *);
void dasd_block_set_timer(struct dasd_block *, int);
void dasd_block_clear_timer(struct dasd_block *);
int dasd_cancel_req(struct dasd_ccw_req *);
int dasd_flush_device_queue(struct dasd_device *);
int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
int dasd_generic_last_path_gone(struct dasd_device *);
int dasd_generic_path_operational(struct dasd_device *);
void dasd_generic_shutdown(struct ccw_device *);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
void dasd_generic_path_event(struct ccw_device *, int *);
int dasd_generic_verify_path(struct dasd_device *, __u8);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
void dasd_device_set_stop_bits(struct dasd_device *, int);
void dasd_device_remove_stop_bits(struct dasd_device *, int);
int dasd_device_is_ro(struct dasd_device *);
void dasd_profile_reset(struct dasd_profile *);
int dasd_profile_on(struct dasd_profile *);
void dasd_profile_off(struct dasd_profile *);
void dasd_global_profile_reset(void);
char *dasd_get_user_string(const char __user *, size_t);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
extern int dasd_probeonly;
extern int dasd_autodetect;
extern int dasd_nopav;
extern int dasd_nofcx;
int dasd_devmap_init(void);
void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
struct dasd_device *dasd_device_from_devindex(int);
void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
int dasd_parse(void);
int dasd_busid_known(const char *);
/* externals in dasd_gendisk.c */
int dasd_gendisk_init(void);
void dasd_gendisk_exit(void);
int dasd_gendisk_alloc(struct dasd_block *);
void dasd_gendisk_free(struct dasd_block *);
int dasd_scan_partitions(struct dasd_block *);
void dasd_destroy_partitions(struct dasd_block *);
/* externals in dasd_ioctl.c */
int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
/* externals in dasd_proc.c */
int dasd_proc_init(void);
void dasd_proc_exit(void);
/* externals in dasd_erp.c */
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
/* externals in dasd_3990_erp.c */
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
/* externals in dasd_eer.c */
#ifdef CONFIG_DASD_EER
int dasd_eer_init(void);
void dasd_eer_exit(void);
int dasd_eer_enable(struct dasd_device *);
void dasd_eer_disable(struct dasd_device *);
void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
unsigned int id);
void dasd_eer_snss(struct dasd_device *);
static inline int dasd_eer_enabled(struct dasd_device *device)
{
return device->eer_cqr != NULL;
}
#else
#define dasd_eer_init() (0)
#define dasd_eer_exit() do { } while (0)
#define dasd_eer_enable(d) (0)
#define dasd_eer_disable(d) do { } while (0)
#define dasd_eer_write(d,c,i) do { } while (0)
#define dasd_eer_snss(d) do { } while (0)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
#endif /* DASD_H */

View file

@ -0,0 +1,579 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2001
*
* i/o controls for the dasd driver.
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/schid.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
#include "dasd_int.h"
static int
dasd_ioctl_api_version(void __user *argp)
{
int ver = DASD_API_VERSION;
return put_user(ver, (int __user *)argp);
}
/*
* Enable device.
* used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
*/
static int
dasd_ioctl_enable(struct block_device *bdev)
{
struct dasd_device *base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
dasd_enable_device(base);
/* Formatting the dasd device can change the capacity. */
mutex_lock(&bdev->bd_mutex);
i_size_write(bdev->bd_inode,
(loff_t)get_capacity(base->block->gdp) << 9);
mutex_unlock(&bdev->bd_mutex);
dasd_put_device(base);
return 0;
}
/*
* Disable device.
* Used by dasdfmt. Disable I/O operations but allow ioctls.
*/
static int
dasd_ioctl_disable(struct block_device *bdev)
{
struct dasd_device *base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
/*
* Man this is sick. We don't do a real disable but only downgrade
* the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
* BIODASDDISABLE to disable accesses to the device via the block
* device layer but it still wants to do i/o on the device by
* using the BIODASDFMT ioctl. Therefore the correct state for the
* device is DASD_STATE_BASIC that allows to do basic i/o.
*/
dasd_set_target_state(base, DASD_STATE_BASIC);
/*
* Set i_size to zero, since read, write, etc. check against this
* value.
*/
mutex_lock(&bdev->bd_mutex);
i_size_write(bdev->bd_inode, 0);
mutex_unlock(&bdev->bd_mutex);
dasd_put_device(base);
return 0;
}
/*
* Quiesce device.
*/
static int dasd_ioctl_quiesce(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
/*
* Resume device.
*/
static int dasd_ioctl_resume(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
return 0;
}
/*
* Abort all failfast I/O on a device.
*/
static int dasd_ioctl_abortio(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
struct dasd_ccw_req *cqr, *n;
base = block->base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
return 0;
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
spin_lock_irqsave(&block->request_queue_lock, flags);
spin_lock(&block->queue_lock);
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
cqr->callback_data &&
cqr->callback_data != DASD_SLEEPON_START_TAG &&
cqr->callback_data != DASD_SLEEPON_END_TAG) {
spin_unlock(&block->queue_lock);
blk_abort_request(cqr->callback_data);
spin_lock(&block->queue_lock);
}
}
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&block->request_queue_lock, flags);
dasd_schedule_block_bh(block);
return 0;
}
/*
* Allow I/O on a device
*/
static int dasd_ioctl_allowio(struct dasd_block *block)
{
struct dasd_device *base;
base = block->base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
return 0;
}
/*
* performs formatting of _device_ according to _fdata_
* Note: The discipline's format_function is assumed to deliver formatting
* commands to format multiple units of the device. In terms of the ECKD
* devices this means CCWs are generated to format multiple tracks.
*/
static int
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_device *base;
int enable_pav = 1;
int rc, retries;
int start, stop;
base = block->base;
if (base->discipline->format_device == NULL)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
dev_name(&base->cdev->dev));
return -EBUSY;
}
DBF_DEV_EVENT(DBF_NOTICE, base,
"formatting units %u to %u (%u B blocks) flags %u",
fdata->start_unit,
fdata->stop_unit, fdata->blksize, fdata->intensity);
/* Since dasdfmt keeps the device open after it was disabled,
* there still exists an inode for this device.
* We must update i_blkbits, otherwise we might get errors when
* enabling the device later.
*/
if (fdata->start_unit == 0) {
struct block_device *bdev = bdget_disk(block->gdp, 0);
bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
bdput(bdev);
}
retries = 255;
/* backup start- and endtrack for retries */
start = fdata->start_unit;
stop = fdata->stop_unit;
do {
rc = base->discipline->format_device(base, fdata, enable_pav);
if (rc) {
if (rc == -EAGAIN) {
retries--;
/* disable PAV in case of errors */
enable_pav = 0;
fdata->start_unit = start;
fdata->stop_unit = stop;
} else
return rc;
} else
/* success */
break;
} while (retries);
if (!retries)
return -EIO;
else
return 0;
}
/*
* Format device.
*/
static int
dasd_ioctl_format(struct block_device *bdev, void __user *argp)
{
struct dasd_device *base;
struct format_data_t fdata;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (base->features & DASD_FEATURE_READONLY ||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
dasd_put_device(base);
return -EROFS;
}
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
dasd_put_device(base);
return -EFAULT;
}
if (bdev != bdev->bd_contains) {
pr_warning("%s: The specified DASD is a partition and cannot "
"be formatted\n",
dev_name(&base->cdev->dev));
dasd_put_device(base);
return -EINVAL;
}
rc = dasd_format(base->block, &fdata);
dasd_put_device(base);
return rc;
}
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
*/
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
dasd_profile_reset(&block->profile);
return 0;
}
/*
* Return device profile information
*/
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
struct dasd_profile_info_t *data;
int rc = 0;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_bh(&block->profile.lock);
if (block->profile.data) {
data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
data->dasd_io_sects = block->profile.data->dasd_io_sects;
memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
sizeof(data->dasd_io_secs));
memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
sizeof(data->dasd_io_times));
memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
sizeof(data->dasd_io_timps));
memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
sizeof(data->dasd_io_time1));
memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
sizeof(data->dasd_io_time2));
memcpy(data->dasd_io_time2ps,
block->profile.data->dasd_io_time2ps,
sizeof(data->dasd_io_time2ps));
memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
sizeof(data->dasd_io_time3));
memcpy(data->dasd_io_nr_req,
block->profile.data->dasd_io_nr_req,
sizeof(data->dasd_io_nr_req));
spin_unlock_bh(&block->profile.lock);
} else {
spin_unlock_bh(&block->profile.lock);
rc = -EIO;
goto out;
}
if (copy_to_user(argp, data, sizeof(*data)))
rc = -EFAULT;
out:
kfree(data);
return rc;
}
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
return -ENOTTY;
}
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
return -ENOTTY;
}
#endif
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
static int dasd_ioctl_information(struct dasd_block *block,
unsigned int cmd, void __user *argp)
{
struct dasd_information2_t *dasd_info;
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
unsigned long flags;
int rc;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
if (dasd_info == NULL)
return -ENOMEM;
rc = base->discipline->fill_info(base, dasd_info);
if (rc) {
kfree(dasd_info);
return rc;
}
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
ccw_device_get_schid(cdev, &sch_id);
dasd_info->devno = dev_id.devno;
dasd_info->schid = sch_id.sch_no;
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;
dasd_info->dev_model = cdev->id.dev_model;
dasd_info->status = base->state;
/*
* The open_count is increased for every opener, that includes
* the blkdev_get in dasd_scan_partitions.
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
if (!block->bdev)
dasd_info->open_count++;
/*
* check if device is really formatted
* LDL / CDL was returned by 'fill_info'
*/
if ((base->state < DASD_STATE_READY) ||
(dasd_check_blocksize(block->bp_block)))
dasd_info->format = DASD_FORMAT_NONE;
dasd_info->features |=
((base->features & DASD_FEATURE_READONLY) != 0);
memcpy(dasd_info->type, base->discipline->name, 4);
if (block->request_queue->request_fn) {
struct list_head *l;
#ifdef DASD_EXTENDED_PROFILING
{
struct list_head *l;
spin_lock_irqsave(&block->lock, flags);
list_for_each(l, &block->request_queue->queue_head)
dasd_info->req_queue_len++;
spin_unlock_irqrestore(&block->lock, flags);
}
#endif /* DASD_EXTENDED_PROFILING */
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
flags);
}
rc = 0;
if (copy_to_user(argp, dasd_info,
((cmd == (unsigned int) BIODASDINFO2) ?
sizeof(struct dasd_information2_t) :
sizeof(struct dasd_information_t))))
rc = -EFAULT;
kfree(dasd_info);
return rc;
}
/*
* Set read only
*/
static int
dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
{
struct dasd_device *base;
int intval, rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev != bdev->bd_contains)
// ro setting is not allowed for partitions
return -EINVAL;
if (get_user(intval, (int __user *)argp))
return -EFAULT;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
dasd_put_device(base);
return -EROFS;
}
set_disk_ro(bdev->bd_disk, intval);
rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
dasd_put_device(base);
return rc;
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
struct cmbdata __user *argp)
{
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
ret = cmf_readall(block->base->cdev, &data);
if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
return -EFAULT;
return ret;
}
int dasd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block;
struct dasd_device *base;
void __user *argp;
int rc;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *)arg;
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
PRINT_DEBUG("empty data ptr");
return -EINVAL;
}
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
block = base->block;
rc = 0;
switch (cmd) {
case BIODASDDISABLE:
rc = dasd_ioctl_disable(bdev);
break;
case BIODASDENABLE:
rc = dasd_ioctl_enable(bdev);
break;
case BIODASDQUIESCE:
rc = dasd_ioctl_quiesce(block);
break;
case BIODASDRESUME:
rc = dasd_ioctl_resume(block);
break;
case BIODASDABORTIO:
rc = dasd_ioctl_abortio(block);
break;
case BIODASDALLOWIO:
rc = dasd_ioctl_allowio(block);
break;
case BIODASDFMT:
rc = dasd_ioctl_format(bdev, argp);
break;
case BIODASDINFO:
rc = dasd_ioctl_information(block, cmd, argp);
break;
case BIODASDINFO2:
rc = dasd_ioctl_information(block, cmd, argp);
break;
case BIODASDPRRD:
rc = dasd_ioctl_read_profile(block, argp);
break;
case BIODASDPRRST:
rc = dasd_ioctl_reset_profile(block);
break;
case BLKROSET:
rc = dasd_ioctl_set_ro(bdev, argp);
break;
case DASDAPIVER:
rc = dasd_ioctl_api_version(argp);
break;
case BIODASDCMFENABLE:
rc = enable_cmf(base->cdev);
break;
case BIODASDCMFDISABLE:
rc = disable_cmf(base->cdev);
break;
case BIODASDREADALLCMB:
rc = dasd_ioctl_readall_cmb(block, cmd, argp);
break;
default:
/* if the discipline has an ioctl method try it. */
rc = -ENOTTY;
if (base->discipline->ioctl)
rc = base->discipline->ioctl(block, cmd, argp);
}
dasd_put_device(base);
return rc;
}

View file

@ -0,0 +1,376 @@
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Coypright IBM Corp. 1999, 2002
*
* /proc interface for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_proc:"
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
static struct proc_dir_entry *dasd_devices_entry = NULL;
static struct proc_dir_entry *dasd_statistics_entry = NULL;
static int
dasd_devices_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
struct dasd_block *block;
char *substr;
device = dasd_device_from_devindex((unsigned long) v - 1);
if (IS_ERR(device))
return 0;
if (device->block)
block = device->block;
else {
dasd_put_device(device);
return 0;
}
/* Print device number. */
seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
if (device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
else
seq_printf(m, "(none)");
/* Print kdev. */
if (block->gdp)
seq_printf(m, " at (%3d:%6d)",
MAJOR(disk_devt(block->gdp)),
MINOR(disk_devt(block->gdp)));
else
seq_printf(m, " at (???:??????)");
/* Print device name. */
if (block->gdp)
seq_printf(m, " is %-8s", block->gdp->disk_name);
else
seq_printf(m, " is ????????");
/* Print devices features. */
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
seq_printf(m, "%4s: ", substr);
/* Print device status information. */
switch (device->state) {
case DASD_STATE_NEW:
seq_printf(m, "new");
break;
case DASD_STATE_KNOWN:
seq_printf(m, "detected");
break;
case DASD_STATE_BASIC:
seq_printf(m, "basic");
break;
case DASD_STATE_UNFMT:
seq_printf(m, "unformatted");
break;
case DASD_STATE_READY:
case DASD_STATE_ONLINE:
seq_printf(m, "active ");
if (dasd_check_blocksize(block->bp_block))
seq_printf(m, "n/f ");
else
seq_printf(m,
"at blocksize: %d, %lld blocks, %lld MB",
block->bp_block, block->blocks,
((block->bp_block >> 9) *
block->blocks) >> 11);
break;
default:
seq_printf(m, "no stat");
break;
}
dasd_put_device(device);
if (dasd_probeonly)
seq_printf(m, "(probeonly)");
seq_printf(m, "\n");
return 0;
}
static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= dasd_max_devindex)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return dasd_devices_start(m, pos);
}
static void dasd_devices_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations dasd_devices_seq_ops = {
.start = dasd_devices_start,
.next = dasd_devices_next,
.stop = dasd_devices_stop,
.show = dasd_devices_show,
};
static int dasd_devices_open(struct inode *inode, struct file *file)
{
return seq_open(file, &dasd_devices_seq_ops);
}
static const struct file_operations dasd_devices_file_ops = {
.owner = THIS_MODULE,
.open = dasd_devices_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_DASD_PROFILE
static int dasd_stats_all_block_on(void)
{
int i, rc;
struct dasd_device *device;
rc = 0;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
rc = dasd_profile_on(&device->block->profile);
dasd_put_device(device);
if (rc)
return rc;
}
return 0;
}
static void dasd_stats_all_block_off(void)
{
int i;
struct dasd_device *device;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
dasd_profile_off(&device->block->profile);
dasd_put_device(device);
}
}
static void dasd_stats_all_block_reset(void)
{
int i;
struct dasd_device *device;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
dasd_profile_reset(&device->block->profile);
dasd_put_device(device);
}
}
static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
{
int i;
for (i = 0; i < 32; i++) {
seq_printf(m, "%7d ", array[i] / factor);
if (i == 15)
seq_putc(m, '\n');
}
seq_putc(m, '\n');
}
#endif /* CONFIG_DASD_PROFILE */
static int dasd_stats_proc_show(struct seq_file *m, void *v)
{
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info *prof;
int factor;
/* check for active profiling */
if (!dasd_global_profile_level) {
seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
return 0;
}
prof = &dasd_global_profile_data;
/* prevent counter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
seq_printf(m, "with %u sectors(512B each)\n",
prof->dasd_io_sects);
seq_printf(m, "Scale Factor is %d\n", factor);
seq_printf(m,
" __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k "
" _16k _32k _64k 128k\n");
seq_printf(m,
" _256 _512 __1M __2M __4M __8M "
" _16M _32M _64M 128M 256M 512M "
" __1G __2G __4G " " _>4G\n");
seq_printf(m, "Histogram of sizes (512B secs)\n");
dasd_statistics_array(m, prof->dasd_io_secs, factor);
seq_printf(m, "Histogram of I/O times (microseconds)\n");
dasd_statistics_array(m, prof->dasd_io_times, factor);
seq_printf(m, "Histogram of I/O times per sector\n");
dasd_statistics_array(m, prof->dasd_io_timps, factor);
seq_printf(m, "Histogram of I/O time till ssch\n");
dasd_statistics_array(m, prof->dasd_io_time1, factor);
seq_printf(m, "Histogram of I/O time between ssch and irq\n");
dasd_statistics_array(m, prof->dasd_io_time2, factor);
seq_printf(m, "Histogram of I/O time between ssch "
"and irq per sector\n");
dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
seq_printf(m, "Histogram of I/O time between irq and end\n");
dasd_statistics_array(m, prof->dasd_io_time3, factor);
seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
#else
seq_printf(m, "Statistics are not activated in this kernel\n");
#endif
return 0;
}
static int dasd_stats_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, dasd_stats_proc_show, NULL);
}
static ssize_t dasd_stats_proc_write(struct file *file,
const char __user *user_buf, size_t user_len, loff_t *pos)
{
#ifdef CONFIG_DASD_PROFILE
char *buffer, *str;
int rc;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
/* check for valid verbs */
str = skip_spaces(buffer);
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
/* 'set xxx' was given */
str = skip_spaces(str + 4);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
rc = dasd_stats_all_block_on();
if (rc) {
dasd_stats_all_block_off();
goto out_error;
}
dasd_global_profile_reset();
dasd_global_profile_level = DASD_PROFILE_ON;
pr_info("The statistics feature has been switched "
"on\n");
} else if (strcmp(str, "off") == 0) {
/* switch off and reset statistics profiling */
dasd_global_profile_level = DASD_PROFILE_OFF;
dasd_global_profile_reset();
dasd_stats_all_block_off();
pr_info("The statistics feature has been switched "
"off\n");
} else
goto out_parse_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
dasd_global_profile_reset();
dasd_stats_all_block_reset();
pr_info("The statistics have been reset\n");
} else
goto out_parse_error;
vfree(buffer);
return user_len;
out_parse_error:
rc = -EINVAL;
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
str);
out_error:
vfree(buffer);
return rc;
#else
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
#endif /* CONFIG_DASD_PROFILE */
}
static const struct file_operations dasd_stats_proc_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_proc_write,
};
/*
* Create dasd proc-fs entries.
* In case creation failed, cleanup and return -ENOENT.
*/
int
dasd_proc_init(void)
{
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
dasd_devices_entry = proc_create("devices",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
&dasd_devices_file_ops);
if (!dasd_devices_entry)
goto out_nodevices;
dasd_statistics_entry = proc_create("statistics",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
&dasd_stats_proc_fops);
if (!dasd_statistics_entry)
goto out_nostatistics;
return 0;
out_nostatistics:
remove_proc_entry("devices", dasd_proc_root_entry);
out_nodevices:
remove_proc_entry("dasd", NULL);
out_nodasd:
return -ENOENT;
}
void
dasd_proc_exit(void)
{
remove_proc_entry("devices", dasd_proc_root_entry);
remove_proc_entry("statistics", dasd_proc_root_entry);
remove_proc_entry("dasd", NULL);
}

1084
drivers/s390/block/dcssblk.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,494 @@
/*
* Block driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "scm_block"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
#include "scm_blk.h"
debug_info_t *scm_debug;
static int scm_major;
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(inactive_requests);
static unsigned int nr_requests = 64;
static atomic_t nr_devices = ATOMIC_INIT(0);
module_param(nr_requests, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("scm:scmdev*");
static void __scm_free_rq(struct scm_request *scmrq)
{
struct aob_rq_header *aobrq = to_aobrq(scmrq);
free_page((unsigned long) scmrq->aob);
free_page((unsigned long) scmrq->aidaw);
__scm_free_rq_cluster(scmrq);
kfree(aobrq);
}
static void scm_free_rqs(void)
{
struct list_head *iter, *safe;
struct scm_request *scmrq;
spin_lock_irq(&list_lock);
list_for_each_safe(iter, safe, &inactive_requests) {
scmrq = list_entry(iter, struct scm_request, list);
list_del(&scmrq->list);
__scm_free_rq(scmrq);
}
spin_unlock_irq(&list_lock);
}
static int __scm_alloc_rq(void)
{
struct aob_rq_header *aobrq;
struct scm_request *scmrq;
aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
if (!aobrq)
return -ENOMEM;
scmrq = (void *) aobrq->data;
scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
if (!scmrq->aob || !scmrq->aidaw) {
__scm_free_rq(scmrq);
return -ENOMEM;
}
if (__scm_alloc_rq_cluster(scmrq)) {
__scm_free_rq(scmrq);
return -ENOMEM;
}
INIT_LIST_HEAD(&scmrq->list);
spin_lock_irq(&list_lock);
list_add(&scmrq->list, &inactive_requests);
spin_unlock_irq(&list_lock);
return 0;
}
static int scm_alloc_rqs(unsigned int nrqs)
{
int ret = 0;
while (nrqs-- && !ret)
ret = __scm_alloc_rq();
return ret;
}
static struct scm_request *scm_request_fetch(void)
{
struct scm_request *scmrq = NULL;
spin_lock(&list_lock);
if (list_empty(&inactive_requests))
goto out;
scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
list_del(&scmrq->list);
out:
spin_unlock(&list_lock);
return scmrq;
}
static void scm_request_done(struct scm_request *scmrq)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
list_add(&scmrq->list, &inactive_requests);
spin_unlock_irqrestore(&list_lock, flags);
}
static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
{
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
}
static void scm_request_prepare(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
struct bio_vec bv;
msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count = 1;
msb->scm_addr = scmdev->address +
((u64) blk_rq_pos(scmrq->request) << 9);
msb->oc = (rq_data_dir(scmrq->request) == READ) ?
MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
msb->data_addr = (u64) aidaw;
rq_for_each_segment(bv, scmrq->request, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
}
}
static inline void scm_request_init(struct scm_blk_dev *bdev,
struct scm_request *scmrq,
struct request *req)
{
struct aob_rq_header *aobrq = to_aobrq(scmrq);
struct aob *aob = scmrq->aob;
memset(aob, 0, sizeof(*aob));
memset(scmrq->aidaw, 0, PAGE_SIZE);
aobrq->scmdev = bdev->scmdev;
aob->request.cmd_code = ARQB_CMD_MOVE;
aob->request.data = (u64) aobrq;
scmrq->request = req;
scmrq->bdev = bdev;
scmrq->retries = 4;
scmrq->error = 0;
scm_request_cluster_init(scmrq);
}
static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
{
if (atomic_read(&bdev->queued_reqs)) {
/* Queue restart is triggered by the next interrupt. */
return;
}
blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
}
void scm_request_requeue(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
scm_release_cluster(scmrq);
blk_requeue_request(bdev->rq, scmrq->request);
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
scm_ensure_queue_restart(bdev);
}
void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
scm_release_cluster(scmrq);
blk_end_request_all(scmrq->request, scmrq->error);
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
static void scm_blk_request(struct request_queue *rq)
{
struct scm_device *scmdev = rq->queuedata;
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
struct scm_request *scmrq;
struct request *req;
int ret;
while ((req = blk_peek_request(rq))) {
if (req->cmd_type != REQ_TYPE_FS) {
blk_start_request(req);
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
blk_end_request_all(req, -EIO);
continue;
}
if (!scm_permit_request(bdev, req)) {
scm_ensure_queue_restart(bdev);
return;
}
scmrq = scm_request_fetch();
if (!scmrq) {
SCM_LOG(5, "no request");
scm_ensure_queue_restart(bdev);
return;
}
scm_request_init(bdev, scmrq, req);
if (!scm_reserve_cluster(scmrq)) {
SCM_LOG(5, "cluster busy");
scm_request_done(scmrq);
return;
}
if (scm_need_cluster_request(scmrq)) {
atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
scm_initiate_cluster_request(scmrq);
return;
}
scm_request_prepare(scmrq);
atomic_inc(&bdev->queued_reqs);
blk_start_request(req);
ret = eadm_start_aob(scmrq->aob);
if (ret) {
SCM_LOG(5, "no subchannel");
scm_request_requeue(scmrq);
return;
}
}
}
static void __scmrq_log_error(struct scm_request *scmrq)
{
struct aob *aob = scmrq->aob;
if (scmrq->error == -ETIMEDOUT)
SCM_LOG(1, "Request timeout");
else {
SCM_LOG(1, "Request error");
SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
}
if (scmrq->retries)
SCM_LOG(1, "Retry request");
else
pr_err("An I/O operation to SCM failed with rc=%d\n",
scmrq->error);
}
void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
{
struct scm_request *scmrq = data;
struct scm_blk_dev *bdev = scmrq->bdev;
scmrq->error = error;
if (error)
__scmrq_log_error(scmrq);
spin_lock(&bdev->lock);
list_add_tail(&scmrq->list, &bdev->finished_requests);
spin_unlock(&bdev->lock);
tasklet_hi_schedule(&bdev->tasklet);
}
static void scm_blk_handle_error(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
unsigned long flags;
if (scmrq->error != -EIO)
goto restart;
/* For -EIO the response block is valid. */
switch (scmrq->aob->response.eqc) {
case EQC_WR_PROHIBIT:
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state != SCM_WR_PROHIBIT)
pr_info("%lx: Write access to the SCM increment is suspended\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_WR_PROHIBIT;
spin_unlock_irqrestore(&bdev->lock, flags);
goto requeue;
default:
break;
}
restart:
if (!eadm_start_aob(scmrq->aob))
return;
requeue:
spin_lock_irqsave(&bdev->rq_lock, flags);
scm_request_requeue(scmrq);
spin_unlock_irqrestore(&bdev->rq_lock, flags);
}
static void scm_blk_tasklet(struct scm_blk_dev *bdev)
{
struct scm_request *scmrq;
unsigned long flags;
spin_lock_irqsave(&bdev->lock, flags);
while (!list_empty(&bdev->finished_requests)) {
scmrq = list_first_entry(&bdev->finished_requests,
struct scm_request, list);
list_del(&scmrq->list);
spin_unlock_irqrestore(&bdev->lock, flags);
if (scmrq->error && scmrq->retries-- > 0) {
scm_blk_handle_error(scmrq);
/* Request restarted or requeued, handle next. */
spin_lock_irqsave(&bdev->lock, flags);
continue;
}
if (scm_test_cluster_request(scmrq)) {
scm_cluster_request_irq(scmrq);
spin_lock_irqsave(&bdev->lock, flags);
continue;
}
scm_request_finish(scmrq);
spin_lock_irqsave(&bdev->lock, flags);
}
spin_unlock_irqrestore(&bdev->lock, flags);
/* Look out for more requests. */
blk_run_queue(bdev->rq);
}
static const struct block_device_operations scm_blk_devops = {
.owner = THIS_MODULE,
};
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
struct request_queue *rq;
int len, ret = -ENOMEM;
unsigned int devindex, nr_max_blk;
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
if (devindex > 701) {
ret = -ENODEV;
goto out;
}
bdev->scmdev = scmdev;
bdev->state = SCM_OPER;
spin_lock_init(&bdev->rq_lock);
spin_lock_init(&bdev->lock);
INIT_LIST_HEAD(&bdev->finished_requests);
atomic_set(&bdev->queued_reqs, 0);
tasklet_init(&bdev->tasklet,
(void (*)(unsigned long)) scm_blk_tasklet,
(unsigned long) bdev);
rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
if (!rq)
goto out;
bdev->rq = rq;
nr_max_blk = min(scmdev->nr_max_block,
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
blk_queue_logical_block_size(rq, 1 << 12);
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
blk_queue_max_segments(rq, nr_max_blk);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
scm_blk_dev_cluster_setup(bdev);
bdev->gendisk = alloc_disk(SCM_NR_PARTS);
if (!bdev->gendisk)
goto out_queue;
rq->queuedata = scmdev;
bdev->gendisk->driverfs_dev = &scmdev->dev;
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->queue = rq;
bdev->gendisk->major = scm_major;
bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
if (devindex > 25) {
len += snprintf(bdev->gendisk->disk_name + len,
DISK_NAME_LEN - len, "%c",
'a' + (devindex / 26) - 1);
devindex = devindex % 26;
}
snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
'a' + devindex);
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
add_disk(bdev->gendisk);
return 0;
out_queue:
blk_cleanup_queue(rq);
out:
atomic_dec(&nr_devices);
return ret;
}
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
tasklet_kill(&bdev->tasklet);
del_gendisk(bdev->gendisk);
blk_cleanup_queue(bdev->gendisk->queue);
put_disk(bdev->gendisk);
}
void scm_blk_set_available(struct scm_blk_dev *bdev)
{
unsigned long flags;
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state == SCM_WR_PROHIBIT)
pr_info("%lx: Write access to the SCM increment is restored\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_OPER;
spin_unlock_irqrestore(&bdev->lock, flags);
}
static int __init scm_blk_init(void)
{
int ret = -EINVAL;
if (!scm_cluster_size_valid())
goto out;
ret = register_blkdev(0, "scm");
if (ret < 0)
goto out;
scm_major = ret;
ret = scm_alloc_rqs(nr_requests);
if (ret)
goto out_free;
scm_debug = debug_register("scm_log", 16, 1, 16);
if (!scm_debug) {
ret = -ENOMEM;
goto out_free;
}
debug_register_view(scm_debug, &debug_hex_ascii_view);
debug_set_level(scm_debug, 2);
ret = scm_drv_init();
if (ret)
goto out_dbf;
return ret;
out_dbf:
debug_unregister(scm_debug);
out_free:
scm_free_rqs();
unregister_blkdev(scm_major, "scm");
out:
return ret;
}
module_init(scm_blk_init);
static void __exit scm_blk_cleanup(void)
{
scm_drv_cleanup();
debug_unregister(scm_debug);
scm_free_rqs();
unregister_blkdev(scm_major, "scm");
}
module_exit(scm_blk_cleanup);

View file

@ -0,0 +1,134 @@
#ifndef SCM_BLK_H
#define SCM_BLK_H
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <asm/debug.h>
#include <asm/eadm.h>
#define SCM_NR_PARTS 8
#define SCM_QUEUE_DELAY 5
struct scm_blk_dev {
struct tasklet_struct tasklet;
struct request_queue *rq;
struct gendisk *gendisk;
struct scm_device *scmdev;
spinlock_t rq_lock; /* guard the request queue */
spinlock_t lock; /* guard the rest of the blockdev */
atomic_t queued_reqs;
enum {SCM_OPER, SCM_WR_PROHIBIT} state;
struct list_head finished_requests;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
struct list_head cluster_list;
#endif
};
struct scm_request {
struct scm_blk_dev *bdev;
struct request *request;
struct aidaw *aidaw;
struct aob *aob;
struct list_head list;
u8 retries;
int error;
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
struct {
enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
struct list_head list;
void **buf;
} cluster;
#endif
};
#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
void scm_blk_dev_cleanup(struct scm_blk_dev *);
void scm_blk_set_available(struct scm_blk_dev *);
void scm_blk_irq(struct scm_device *, void *, int);
void scm_request_finish(struct scm_request *);
void scm_request_requeue(struct scm_request *);
int scm_drv_init(void);
void scm_drv_cleanup(void);
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
void __scm_free_rq_cluster(struct scm_request *);
int __scm_alloc_rq_cluster(struct scm_request *);
void scm_request_cluster_init(struct scm_request *);
bool scm_reserve_cluster(struct scm_request *);
void scm_release_cluster(struct scm_request *);
void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
bool scm_need_cluster_request(struct scm_request *);
void scm_initiate_cluster_request(struct scm_request *);
void scm_cluster_request_irq(struct scm_request *);
bool scm_test_cluster_request(struct scm_request *);
bool scm_cluster_size_valid(void);
#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
{
return 0;
}
static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
static inline bool scm_reserve_cluster(struct scm_request *scmrq)
{
return true;
}
static inline void scm_release_cluster(struct scm_request *scmrq) {}
static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
static inline bool scm_need_cluster_request(struct scm_request *scmrq)
{
return false;
}
static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
static inline bool scm_test_cluster_request(struct scm_request *scmrq)
{
return false;
}
static inline bool scm_cluster_size_valid(void)
{
return true;
}
#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
extern debug_info_t *scm_debug;
#define SCM_LOG(imp, txt) do { \
debug_text_event(scm_debug, imp, txt); \
} while (0)
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
if (!debug_level_enabled(scm_debug, level))
return;
while (length > 0) {
debug_event(scm_debug, level, data, length);
length -= scm_debug->buf_size;
data += scm_debug->buf_size;
}
}
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
{
struct {
u64 address;
u8 oper_state;
u8 rank;
} __packed data = {
.address = scmdev->address,
.oper_state = scmdev->attrs.oper_state,
.rank = scmdev->attrs.rank,
};
SCM_LOG_HEX(level, &data, sizeof(data));
}
#endif /* SCM_BLK_H */

View file

@ -0,0 +1,230 @@
/*
* Block driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
#include "scm_blk.h"
static unsigned int write_cluster_size = 64;
module_param(write_cluster_size, uint, S_IRUGO);
MODULE_PARM_DESC(write_cluster_size,
"Number of pages used for contiguous writes.");
#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
void __scm_free_rq_cluster(struct scm_request *scmrq)
{
int i;
if (!scmrq->cluster.buf)
return;
for (i = 0; i < 2 * write_cluster_size; i++)
free_page((unsigned long) scmrq->cluster.buf[i]);
kfree(scmrq->cluster.buf);
}
int __scm_alloc_rq_cluster(struct scm_request *scmrq)
{
int i;
scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
GFP_KERNEL);
if (!scmrq->cluster.buf)
return -ENOMEM;
for (i = 0; i < 2 * write_cluster_size; i++) {
scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
if (!scmrq->cluster.buf[i])
return -ENOMEM;
}
INIT_LIST_HEAD(&scmrq->cluster.list);
return 0;
}
void scm_request_cluster_init(struct scm_request *scmrq)
{
scmrq->cluster.state = CLUSTER_NONE;
}
static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
{
unsigned long firstA, lastA, firstB, lastB;
firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
lastA = (((u64) blk_rq_pos(A->request) << 9) +
blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
lastB = (((u64) blk_rq_pos(B->request) << 9) +
blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
return (firstB <= lastA && firstA <= lastB);
}
bool scm_reserve_cluster(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_request *iter;
if (write_cluster_size == 0)
return true;
spin_lock(&bdev->lock);
list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
if (clusters_intersect(scmrq, iter) &&
(rq_data_dir(scmrq->request) == WRITE ||
rq_data_dir(iter->request) == WRITE)) {
spin_unlock(&bdev->lock);
return false;
}
}
list_add(&scmrq->cluster.list, &bdev->cluster_list);
spin_unlock(&bdev->lock);
return true;
}
void scm_release_cluster(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
unsigned long flags;
if (write_cluster_size == 0)
return;
spin_lock_irqsave(&bdev->lock, flags);
list_del(&scmrq->cluster.list);
spin_unlock_irqrestore(&bdev->lock, flags);
}
void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
{
INIT_LIST_HEAD(&bdev->cluster_list);
blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
}
static void scm_prepare_cluster_request(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
struct request *req = scmrq->request;
struct aidaw *aidaw = scmrq->aidaw;
struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter;
struct bio_vec bv;
int i = 0;
u64 addr;
switch (scmrq->cluster.state) {
case CLUSTER_NONE:
scmrq->cluster.state = CLUSTER_READ;
/* fall through */
case CLUSTER_READ:
scmrq->aob->request.msb_count = 1;
msb->bs = MSB_BS_4K;
msb->oc = MSB_OC_READ;
msb->flags = MSB_FLAG_IDA;
msb->data_addr = (u64) aidaw;
msb->blk_count = write_cluster_size;
addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->scm_addr = round_down(addr, CLUSTER_SIZE);
if (msb->scm_addr !=
round_down(addr + (u64) blk_rq_bytes(req) - 1,
CLUSTER_SIZE))
msb->blk_count = 2 * write_cluster_size;
for (i = 0; i < msb->blk_count; i++) {
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
aidaw++;
}
break;
case CLUSTER_WRITE:
msb->oc = MSB_OC_WRITE;
for (addr = msb->scm_addr;
addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
addr += PAGE_SIZE) {
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
aidaw++;
i++;
}
rq_for_each_segment(bv, req, iter) {
aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
i++;
}
for (; i < msb->blk_count; i++) {
aidaw->data_addr = (u64) scmrq->cluster.buf[i];
aidaw++;
}
break;
}
}
bool scm_need_cluster_request(struct scm_request *scmrq)
{
if (rq_data_dir(scmrq->request) == READ)
return false;
return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
}
/* Called with queue lock held. */
void scm_initiate_cluster_request(struct scm_request *scmrq)
{
scm_prepare_cluster_request(scmrq);
if (eadm_start_aob(scmrq->aob))
scm_request_requeue(scmrq);
}
bool scm_test_cluster_request(struct scm_request *scmrq)
{
return scmrq->cluster.state != CLUSTER_NONE;
}
void scm_cluster_request_irq(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
unsigned long flags;
switch (scmrq->cluster.state) {
case CLUSTER_NONE:
BUG();
break;
case CLUSTER_READ:
if (scmrq->error) {
scm_request_finish(scmrq);
break;
}
scmrq->cluster.state = CLUSTER_WRITE;
spin_lock_irqsave(&bdev->rq_lock, flags);
scm_initiate_cluster_request(scmrq);
spin_unlock_irqrestore(&bdev->rq_lock, flags);
break;
case CLUSTER_WRITE:
scm_request_finish(scmrq);
break;
}
}
bool scm_cluster_size_valid(void)
{
if (write_cluster_size == 1 || write_cluster_size > 128)
return false;
return !(write_cluster_size & (write_cluster_size - 1));
}

View file

@ -0,0 +1,92 @@
/*
* Device driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#define KMSG_COMPONENT "scm_block"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/eadm.h>
#include "scm_blk.h"
static void scm_notify(struct scm_device *scmdev, enum scm_event event)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
switch (event) {
case SCM_CHANGE:
pr_info("%lx: The capabilities of the SCM increment changed\n",
(unsigned long) scmdev->address);
SCM_LOG(2, "State changed");
SCM_LOG_STATE(2, scmdev);
break;
case SCM_AVAIL:
SCM_LOG(2, "Increment available");
SCM_LOG_STATE(2, scmdev);
scm_blk_set_available(bdev);
break;
}
}
static int scm_probe(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev;
int ret;
SCM_LOG(2, "probe");
SCM_LOG_STATE(2, scmdev);
if (scmdev->attrs.oper_state != OP_STATE_GOOD)
return -EINVAL;
bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
dev_set_drvdata(&scmdev->dev, bdev);
ret = scm_blk_dev_setup(bdev, scmdev);
if (ret) {
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
goto out;
}
out:
return ret;
}
static int scm_remove(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
scm_blk_dev_cleanup(bdev);
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
return 0;
}
static struct scm_driver scm_drv = {
.drv = {
.name = "scm_block",
.owner = THIS_MODULE,
},
.notify = scm_notify,
.probe = scm_probe,
.remove = scm_remove,
.handler = scm_blk_irq,
};
int __init scm_drv_init(void)
{
return scm_driver_register(&scm_drv);
}
void scm_drv_cleanup(void)
{
scm_driver_unregister(&scm_drv);
}

479
drivers/s390/block/xpram.c Normal file
View file

@ -0,0 +1,479 @@
/*
* Xpram.c -- the S/390 expanded memory RAM-disk
*
* significant parts of this code are based on
* the sbull device driver presented in
* A. Rubini: Linux Device Drivers
*
* Author of XPRAM specific coding: Reinhard Buendgen
* buendgen@de.ibm.com
* Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* External interfaces:
* Interfaces to linux kernel
* xpram_setup: read kernel parameters
* Device specific file operations
* xpram_iotcl
* xpram_open
*
* "ad-hoc" partitioning:
* the expanded memory can be partitioned among several devices
* (with different minors). The partitioning set up can be
* set by kernel or module parameters (int devs & int sizes[])
*
* Potential future improvements:
* generic hard disk support to replace ad-hoc partitioning
*/
#define KMSG_COMPONENT "xpram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h> /* isdigit, isxdigit */
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/uaccess.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
static unsigned int xpram_pages;
static int xpram_devs;
/*
* Parameter parsing functions.
*/
static int devs = XPRAM_DEVS;
static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
"the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
"the defaults are 0s \n" \
"All devices with size 0 equally partition the "
"remaining space on the expanded strorage not "
"claimed by explicit sizes\n");
MODULE_LICENSE("GPL");
/*
* Copy expanded memory page (4kB) into main memory
* Arguments
* page_addr: address of target page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgin failed
* -ENXIO: if xpram has vanished
*/
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Copy a 4kB page of main memory to an expanded memory page
* Arguments
* page_addr: address of source page
* xpage_index: index of expandeded memory page
* Return value
* 0: if operation succeeds
* -EIO: if pgout failed
* -ENXIO: if xpram has vanished
*/
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
{
int cc = 2; /* return unused cc 2 if pgin traps */
asm volatile(
" .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2)
return -ENXIO;
if (cc == 1)
return -EIO;
return 0;
}
/*
* Check if xpram is available.
*/
static int xpram_present(void)
{
unsigned long mem_page;
int rc;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return -ENOMEM;
rc = xpram_page_in(mem_page, 0);
free_page(mem_page);
return rc ? -ENXIO : 0;
}
/*
* Return index of the last available xpram page.
*/
static unsigned long xpram_highest_page_index(void)
{
unsigned int page_index, add_bit;
unsigned long mem_page;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return 0;
page_index = 0;
add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
while (add_bit > 0) {
if (xpram_page_in(mem_page, page_index | add_bit) == 0)
page_index |= add_bit;
add_bit >>= 1;
}
free_page (mem_page);
return page_index;
}
/*
* Block device make request function.
*/
static void xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
unsigned long page_addr;
unsigned long bytes;
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if ((bio->bi_iter.bi_size >> 12) > xdev->size)
/* Request size is no page-aligned. */
goto fail;
if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
bio_for_each_segment(bvec, bio, iter) {
page_addr = (unsigned long)
kmap(bvec.bv_page) + bvec.bv_offset;
bytes = bvec.bv_len;
if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
/* More paranoia. */
goto fail;
while (bytes > 0) {
if (bio_data_dir(bio) == READ) {
if (xpram_page_in(page_addr, index) != 0)
goto fail;
} else {
if (xpram_page_out(page_addr, index) != 0)
goto fail;
}
page_addr += 4096;
bytes -= 4096;
index++;
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
return;
fail:
bio_io_error(bio);
}
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
unsigned long size;
/*
* get geometry: we have to fake one... trim the size to a
* multiple of 64 (32k): tell we have 16 sectors, 4 heads,
* whatever cylinders. Tell also that data starts at sector. 4.
*/
size = (xpram_pages * 8) & ~0x3f;
geo->cylinders = size >> 6;
geo->heads = 4;
geo->sectors = 16;
geo->start = 4;
return 0;
}
static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
.getgeo = xpram_getgeo,
};
/*
* Setup xpram_sizes array.
*/
static int __init xpram_setup_sizes(unsigned long pages)
{
unsigned long mem_needed;
unsigned long mem_auto;
unsigned long long size;
char *sizes_end;
int mem_auto_no;
int i;
/* Check number of devices. */
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
pr_err("%d is not a valid number of XPRAM devices\n",devs);
return -EINVAL;
}
xpram_devs = devs;
/*
* Copy sizes array to xpram_sizes and align partition
* sizes to page boundary.
*/
mem_needed = 0;
mem_auto_no = 0;
for (i = 0; i < xpram_devs; i++) {
if (sizes[i]) {
size = simple_strtoull(sizes[i], &sizes_end, 0);
switch (*sizes_end) {
case 'g':
case 'G':
size <<= 20;
break;
case 'm':
case 'M':
size <<= 10;
}
xpram_sizes[i] = (size + 3) & -4UL;
}
if (xpram_sizes[i])
mem_needed += xpram_sizes[i];
else
mem_auto_no++;
}
pr_info(" number of devices (partitions): %d \n", xpram_devs);
for (i = 0; i < xpram_devs; i++) {
if (xpram_sizes[i])
pr_info(" size of partition %d: %u kB\n",
i, xpram_sizes[i]);
else
pr_info(" size of partition %d to be set "
"automatically\n",i);
}
pr_info(" memory needed (for sized partitions): %lu kB\n",
mem_needed);
pr_info(" partitions to be sized automatically: %d\n",
mem_auto_no);
if (mem_needed > pages * 4) {
pr_err("Not enough expanded memory available\n");
return -EINVAL;
}
/*
* partitioning:
* xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
* else: ; all partitions with zero xpram_sizes[i]
* partition equally the remaining space
*/
if (mem_auto_no) {
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
pr_info(" automatically determined "
"partition size: %lu kB\n", mem_auto);
for (i = 0; i < xpram_devs; i++)
if (xpram_sizes[i] == 0)
xpram_sizes[i] = mem_auto;
}
return 0;
}
static int __init xpram_setup_blkdev(void)
{
unsigned long offset;
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*
* Register xpram major.
*/
rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
if (rc < 0)
goto out;
/*
* Setup device structures.
*/
offset = 0;
for (i = 0; i < xpram_devs; i++) {
struct gendisk *disk = xpram_disks[i];
xpram_devices[i].size = xpram_sizes[i] / 4;
xpram_devices[i].offset = offset;
offset += xpram_devices[i].size;
disk->major = XPRAM_MAJOR;
disk->first_minor = i;
disk->fops = &xpram_devops;
disk->private_data = &xpram_devices[i];
disk->queue = xpram_queues[i];
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
add_disk(disk);
}
return 0;
out:
while (i--) {
blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
}
return rc;
}
/*
* Resume failed: Print error message and call panic.
*/
static void xpram_resume_error(const char *message)
{
pr_err("Resuming the system failed: %s\n", message);
panic("xpram resume error\n");
}
/*
* Check if xpram setup changed between suspend and resume.
*/
static int xpram_restore(struct device *dev)
{
if (!xpram_pages)
return 0;
if (xpram_present() != 0)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
return 0;
}
static const struct dev_pm_ops xpram_pm_ops = {
.restore = xpram_restore,
};
static struct platform_driver xpram_pdrv = {
.driver = {
.name = XPRAM_NAME,
.owner = THIS_MODULE,
.pm = &xpram_pm_ops,
},
};
static struct platform_device *xpram_pdev;
/*
* Finally, the init/exit functions.
*/
static void __exit xpram_exit(void)
{
int i;
for (i = 0; i < xpram_devs; i++) {
del_gendisk(xpram_disks[i]);
blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
platform_device_unregister(xpram_pdev);
platform_driver_unregister(&xpram_pdrv);
}
static int __init xpram_init(void)
{
int rc;
/* Find out size of expanded memory. */
if (xpram_present() != 0) {
pr_err("No expanded memory available\n");
return -ENODEV;
}
xpram_pages = xpram_highest_page_index() + 1;
pr_info(" %u pages expanded memory found (%lu KB).\n",
xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
rc = platform_driver_register(&xpram_pdrv);
if (rc)
return rc;
xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0);
if (IS_ERR(xpram_pdev)) {
rc = PTR_ERR(xpram_pdev);
goto fail_platform_driver_unregister;
}
rc = xpram_setup_blkdev();
if (rc)
goto fail_platform_device_unregister;
return 0;
fail_platform_device_unregister:
platform_device_unregister(xpram_pdev);
fail_platform_driver_unregister:
platform_driver_unregister(&xpram_pdrv);
return rc;
}
module_init(xpram_init);
module_exit(xpram_exit);

191
drivers/s390/char/Kconfig Normal file
View file

@ -0,0 +1,191 @@
comment "S/390 character device drivers"
depends on S390
config TN3270
def_tristate y
prompt "Support for locally attached 3270 terminals"
depends on CCW
help
Include support for IBM 3270 terminals.
config TN3270_TTY
def_tristate y
prompt "Support for tty input/output on 3270 terminals"
depends on TN3270 && TTY
help
Include support for using an IBM 3270 terminal as a Linux tty.
config TN3270_FS
def_tristate m
prompt "Support for fullscreen applications on 3270 terminals"
depends on TN3270
help
Include support for fullscreen applications on an IBM 3270 terminal.
config TN3270_CONSOLE
def_bool y
prompt "Support for console on 3270 terminal"
depends on TN3270=y && TN3270_TTY=y
help
Include support for using an IBM 3270 terminal as a Linux system
console. Available only if 3270 support is compiled in statically.
config TN3215
def_bool y
prompt "Support for 3215 line mode terminal"
depends on CCW && TTY
help
Include support for IBM 3215 line-mode terminals.
config TN3215_CONSOLE
def_bool y
prompt "Support for console on 3215 line mode terminal"
depends on TN3215
help
Include support for using an IBM 3215 line-mode terminal as a
Linux system console.
config CCW_CONSOLE
def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
config SCLP_TTY
def_bool y
prompt "Support for SCLP line mode terminal"
depends on S390 && TTY
help
Include support for IBM SCLP line-mode terminals.
config SCLP_CONSOLE
def_bool y
prompt "Support for console on SCLP line mode terminal"
depends on SCLP_TTY
help
Include support for using an IBM HWC line-mode terminal as the Linux
system console.
config SCLP_VT220_TTY
def_bool y
prompt "Support for SCLP VT220-compatible terminal"
depends on S390 && TTY
help
Include support for an IBM SCLP VT220-compatible terminal.
config SCLP_VT220_CONSOLE
def_bool y
prompt "Support for console on SCLP VT220-compatible terminal"
depends on SCLP_VT220_TTY
help
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
config SCLP_CPI
def_tristate m
prompt "Control-Program Identification"
depends on S390
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
gives you a nice name for the system on the service element.
Please select this option as a module since built-in operation is
completely untested.
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
config SCLP_ASYNC
def_tristate m
prompt "Support for Call Home via Asynchronous SCLP Records"
depends on S390
help
This option enables the call home function, which is able to inform
the service element and connected organisations about a kernel panic.
You should only select this option if you know what you are doing,
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
depends on S390 && 64BIT
select CRC16
help
This option enables support for file transfers from a Hardware
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
transfer cache size from it's default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config S390_TAPE
def_tristate m
prompt "S/390 tape device support"
depends on CCW
help
Select this option if you want to access channel-attached tape
devices on IBM S/390 or zSeries.
If you select this option you will also want to select at
least one of the tape interface options and one of the tape
hardware options in order to access a tape device.
This option is also available as a module. The module will be
called tape390 and include all selected interfaces and
hardware drivers.
comment "S/390 tape hardware support"
depends on S390_TAPE
config S390_TAPE_34XX
def_tristate m
prompt "Support for 3480/3490 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3480/3490 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config S390_TAPE_3590
def_tristate m
prompt "Support for 3590 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3590 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config VMLOGRDR
def_tristate m
prompt "Support for the z/VM recording system services (VM only)"
depends on IUCV
help
Select this option if you want to be able to receive records collected
by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
*SYMPTOM.
This driver depends on the IUCV support driver.
config VMCP
def_bool y
prompt "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
config MONREADER
def_tristate m
prompt "API for reading z/VM monitor service records"
depends on IUCV
help
Character device driver for reading z/VM monitor service records
config MONWRITER
def_tristate m
prompt "API for writing z/VM monitor service records"
depends on S390
help
Character device driver for writing z/VM monitor service records
config S390_VMUR
def_tristate m
prompt "z/VM unit record device driver"
depends on S390
help
Character device driver for z/VM reader, puncher and printer.

View file

@ -0,0 +1,38 @@
#
# S/390 character devices
#
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
obj-$(CONFIG_TN3270_TTY) += tty3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
tape-$(CONFIG_PROC_FS) += tape_proc.o
tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
obj-$(CONFIG_MONWRITER) += monwriter.o
obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o

1226
drivers/s390/char/con3215.c Normal file

File diff suppressed because it is too large Load diff

638
drivers/s390/char/con3270.c Normal file
View file

@ -0,0 +1,638 @@
/*
* IBM/3270 Driver - console view.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "raw3270.h"
#include "tty3270.h"
#include "ctrlchar.h"
#define CON3270_OUTPUT_BUFFER_SIZE 1024
#define CON3270_STRING_PAGES 4
static struct raw3270_fn con3270_fn;
static bool auto_update = 1;
module_param(auto_update, bool, 0);
/*
* Main 3270 console view data structure.
*/
struct con3270 {
struct raw3270_view view;
struct list_head freemem; /* list of free memory for strings. */
/* Output stuff. */
struct list_head lines; /* list of lines. */
struct list_head update; /* list of lines to update. */
int line_nr; /* line number for next update. */
int nr_lines; /* # lines in list. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct string *cline; /* current output line. */
struct string *status; /* last line of display. */
struct raw3270_request *write; /* single write request. */
struct timer_list timer;
/* Input stuff. */
struct string *input; /* input string for read request. */
struct raw3270_request *read; /* single read request. */
struct raw3270_request *kreset; /* single keyboard reset request. */
struct tasklet_struct readlet; /* tasklet to issue read request. */
};
static struct con3270 *condev;
/* con3270->update_flags. See con3270_update for details. */
#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define CON_UPDATE_STATUS 4 /* Update status line. */
#define CON_UPDATE_ALL 8 /* Recreate screen. */
static void con3270_update(struct con3270 *);
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void con3270_set_timer(struct con3270 *cp, int expires)
{
if (expires == 0)
del_timer(&cp->timer);
else
mod_timer(&cp->timer, jiffies + expires);
}
/*
* The status line is the last line of the screen. It shows the string
* "console view" in the lower left corner and "Running"/"More..."/"Holding"
* in the lower right corner of the screen.
*/
static void
con3270_update_status(struct con3270 *cp)
{
char *str;
str = (cp->nr_up != 0) ? "History" : "Running";
memcpy(cp->status->string + 24, str, 7);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
cp->update_flags |= CON_UPDATE_STATUS;
}
static void
con3270_create_status(struct con3270 *cp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
'c','o','n','s','o','l','e',' ','v','i','e','w',
TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
/* Copy blueprint to status line */
memcpy(cp->status->string, blueprint, sizeof(blueprint));
/* Set TO_RA addresses. */
raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
cp->view.cols * (cp->view.rows - 1));
raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
cp->view.cols * cp->view.rows - 8);
/* Convert strings to ebcdic. */
codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
}
/*
* Set output offsets to 3270 datastream fragment of a console string.
*/
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
if (s->len >= cp->view.cols - 5)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
}
/*
* Rebuild update list to print all lines.
*/
static void
con3270_rebuild_update(struct con3270 *cp)
{
struct string *s, *n;
int nr;
/*
* Throw away update list and create a new one,
* containing all lines that will fit on the screen.
*/
list_for_each_entry_safe(s, n, &cp->update, update)
list_del_init(&s->update);
nr = cp->view.rows - 2 + cp->nr_up;
list_for_each_entry_reverse(s, &cp->lines, list) {
if (nr < cp->view.rows - 1)
list_add(&s->update, &cp->update);
if (--nr < 0)
break;
}
cp->line_nr = 0;
cp->update_flags |= CON_UPDATE_LIST;
}
/*
* Alloc string for size bytes. Free strings from history if necessary.
*/
static struct string *
con3270_alloc_string(struct con3270 *cp, size_t size)
{
struct string *s, *n;
s = alloc_string(&cp->freemem, size);
if (s)
return s;
list_for_each_entry_safe(s, n, &cp->lines, list) {
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
cp->nr_lines--;
if (free_string(&cp->freemem, s) >= size)
break;
}
s = alloc_string(&cp->freemem, size);
BUG_ON(!s);
if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
cp->nr_up = cp->nr_lines - cp->view.rows + 1;
con3270_rebuild_update(cp);
con3270_update_status(cp);
}
return s;
}
/*
* Write completion callback.
*/
static void
con3270_write_callback(struct raw3270_request *rq, void *data)
{
raw3270_request_reset(rq);
xchg(&((struct con3270 *) rq->view)->write, rq);
}
/*
* Update console display.
*/
static void
con3270_update(struct con3270 *cp)
{
struct raw3270_request *wrq;
char wcc, prolog[6];
unsigned long flags;
unsigned long updated;
struct string *s, *n;
int rc;
if (!auto_update && !raw3270_view_active(&cp->view))
return;
if (cp->view.dev)
raw3270_activate_view(&cp->view);
wrq = xchg(&cp->write, 0);
if (!wrq) {
con3270_set_timer(cp, 1);
return;
}
spin_lock_irqsave(&cp->view.lock, flags);
updated = 0;
if (cp->update_flags & CON_UPDATE_ALL) {
con3270_rebuild_update(cp);
con3270_update_status(cp);
cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
CON_UPDATE_STATUS;
}
if (cp->update_flags & CON_UPDATE_ERASE) {
/* Use erase write alternate to initialize display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
updated |= CON_UPDATE_ERASE;
} else
raw3270_request_set_cmd(wrq, TC_WRITE);
wcc = TW_NONE;
raw3270_request_add_data(wrq, &wcc, 1);
/*
* Update status line.
*/
if (cp->update_flags & CON_UPDATE_STATUS)
if (raw3270_request_add_data(wrq, cp->status->string,
cp->status->len) == 0)
updated |= CON_UPDATE_STATUS;
if (cp->update_flags & CON_UPDATE_LIST) {
prolog[0] = TO_SBA;
prolog[3] = TO_SA;
prolog[4] = TAT_COLOR;
prolog[5] = TAC_TURQ;
raw3270_buffer_address(cp->view.dev, prolog + 1,
cp->view.cols * cp->line_nr);
raw3270_request_add_data(wrq, prolog, 6);
/* Write strings in the update list to the screen. */
list_for_each_entry_safe(s, n, &cp->update, update) {
if (s != cp->cline)
con3270_update_string(cp, s, cp->line_nr);
if (raw3270_request_add_data(wrq, s->string,
s->len) != 0)
break;
list_del_init(&s->update);
if (s != cp->cline)
cp->line_nr++;
}
if (list_empty(&cp->update))
updated |= CON_UPDATE_LIST;
}
wrq->callback = con3270_write_callback;
rc = raw3270_start(&cp->view, wrq);
if (rc == 0) {
cp->update_flags &= ~updated;
if (cp->update_flags)
con3270_set_timer(cp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&cp->write, wrq);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
/*
* Read tasklet.
*/
static void
con3270_read_tasklet(struct raw3270_request *rrq)
{
static char kreset_data = TW_KR;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
deactivate = 0;
/* Check aid byte. */
switch (cp->input->string[0]) {
case 0x7d: /* enter: jump to bottom. */
nr_up = 0;
break;
case 0xf3: /* PF3: deactivate the console view. */
deactivate = 1;
break;
case 0x6d: /* clear: start from scratch. */
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
break;
case 0xf7: /* PF7: do a page up in the console log. */
nr_up += cp->view.rows - 2;
if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
nr_up = cp->nr_lines - cp->view.rows + 1;
if (nr_up < 0)
nr_up = 0;
}
break;
case 0xf8: /* PF8: do a page down in the console log. */
nr_up -= cp->view.rows - 2;
if (nr_up < 0)
nr_up = 0;
break;
}
if (nr_up != cp->nr_up) {
cp->nr_up = nr_up;
con3270_rebuild_update(cp);
con3270_update_status(cp);
con3270_set_timer(cp, 1);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
/* Start keyboard reset command. */
raw3270_request_reset(cp->kreset);
raw3270_request_set_cmd(cp->kreset, TC_WRITE);
raw3270_request_add_data(cp->kreset, &kreset_data, 1);
raw3270_start(&cp->view, cp->kreset);
if (deactivate)
raw3270_deactivate_view(&cp->view);
raw3270_request_reset(rrq);
xchg(&cp->read, rrq);
raw3270_put_view(&cp->view);
}
/*
* Read request completion callback.
*/
static void
con3270_read_callback(struct raw3270_request *rq, void *data)
{
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
}
/*
* Issue a read request. Called only from interrupt function.
*/
static void
con3270_issue_read(struct con3270 *cp)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&cp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = con3270_read_callback;
rrq->callback_data = cp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
/* Issue the read modified request. */
rc = raw3270_start_irq(&cp->view, rrq);
if (rc)
raw3270_request_reset(rrq);
}
/*
* Switch to the console view.
*/
static int
con3270_activate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
return 0;
}
static void
con3270_deactivate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
del_timer(&cp->timer);
}
static int
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
con3270_issue_read(cp);
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
static struct raw3270_fn con3270_fn = {
.activate = con3270_activate,
.deactivate = con3270_deactivate,
.intv = (void *) con3270_irq
};
static inline void
con3270_cline_add(struct con3270 *cp)
{
if (!list_empty(&cp->cline->list))
/* Already added. */
return;
list_add_tail(&cp->cline->list, &cp->lines);
cp->nr_lines++;
con3270_rebuild_update(cp);
}
static inline void
con3270_cline_insert(struct con3270 *cp, unsigned char c)
{
cp->cline->string[cp->cline->len++] =
cp->view.ascebc[(c < ' ') ? ' ' : c];
if (list_empty(&cp->cline->update)) {
list_add_tail(&cp->cline->update, &cp->update);
cp->update_flags |= CON_UPDATE_LIST;
}
}
static inline void
con3270_cline_end(struct con3270 *cp)
{
struct string *s;
unsigned int size;
/* Copy cline. */
size = (cp->cline->len < cp->view.cols - 5) ?
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
if (s->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
while (--size > cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
list_add(&s->list, &cp->cline->list);
list_del_init(&cp->cline->list);
if (!list_empty(&cp->cline->update)) {
list_add(&s->update, &cp->cline->update);
list_del_init(&cp->cline->update);
}
cp->cline->len = 0;
}
/*
* Write a string to the 3270 console
*/
static void
con3270_write(struct console *co, const char *str, unsigned int count)
{
struct con3270 *cp;
unsigned long flags;
unsigned char c;
cp = condev;
spin_lock_irqsave(&cp->view.lock, flags);
while (count-- > 0) {
c = *str++;
if (cp->cline->len == 0)
con3270_cline_add(cp);
if (c != '\n')
con3270_cline_insert(cp, c);
if (c == '\n' || cp->cline->len >= cp->view.cols)
con3270_cline_end(cp);
}
/* Setup timer to output current console buffer after 1/10 second */
cp->nr_up = 0;
if (cp->view.dev && !timer_pending(&cp->timer))
con3270_set_timer(cp, HZ/10);
spin_unlock_irqrestore(&cp->view.lock,flags);
}
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
*index = c->index;
return tty3270_driver;
}
/*
* Wait for end of write request.
*/
static void
con3270_wait_write(struct con3270 *cp)
{
while (!cp->write) {
raw3270_wait_cons_dev(cp->view.dev);
barrier();
}
}
/*
* panic() calls con3270_flush through a panic_notifier
* before the system enters a disabled, endless loop.
*/
static void
con3270_flush(void)
{
struct con3270 *cp;
unsigned long flags;
cp = condev;
if (!cp->view.dev)
return;
raw3270_pm_unfreeze(&cp->view);
raw3270_activate_view(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
con3270_rebuild_update(cp);
con3270_update_status(cp);
while (cp->update_flags != 0) {
spin_unlock_irqrestore(&cp->view.lock, flags);
con3270_update(cp);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
static int con3270_notify(struct notifier_block *self,
unsigned long event, void *data)
{
con3270_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
/*
* The console structure for the 3270 console
*/
static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
.device = con3270_device,
.flags = CON_PRINTBUFFER,
};
/*
* 3270 console initialization code called from console_init().
*/
static int __init
con3270_init(void)
{
struct raw3270 *rp;
void *cbuf;
int i;
/* Check if 3270 is to be the console */
if (!CONSOLE_IS_3270)
return -ENODEV;
/* Set the console mode for VM */
if (MACHINE_IS_VM) {
cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
rp = raw3270_setup_console();
if (IS_ERR(rp))
return PTR_ERR(rp);
condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
(unsigned long) condev);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
condev->cline->len = 0;
con3270_create_status(condev);
condev->input = alloc_string(&condev->freemem, 80);
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
console_initcall(con3270_init);

View file

@ -0,0 +1,72 @@
/*
* Unified handling of special chars.
*
* Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/stddef.h>
#include <asm/errno.h>
#include <linux/sysrq.h>
#include <linux/ctype.h>
#include "ctrlchar.h"
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
handle_sysrq(ctrlchar_sysrq_key);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
#endif
/**
* Check for special chars at start of input.
*
* @param buf Console input buffer.
* @param len Length of valid data in buffer.
* @param tty The tty struct for this console.
* @return CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
*/
unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
{
if ((len < 2) || (len > 3))
return CTRLCHAR_NONE;
/* hat is 0xb1 in codepage 037 (US etc.) and thus */
/* converted to 0x5e in ascii ('^') */
if ((buf[0] != '^') && (buf[0] != '\252'))
return CTRLCHAR_NONE;
#ifdef CONFIG_MAGIC_SYSRQ
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
#endif
if (len != 2)
return CTRLCHAR_NONE;
switch (tolower(buf[1])) {
case 'c':
return INTR_CHAR(tty) | CTRLCHAR_CTRL;
case 'd':
return EOF_CHAR(tty) | CTRLCHAR_CTRL;
case 'z':
return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
}
return CTRLCHAR_NONE;
}

View file

@ -0,0 +1,19 @@
/*
* Unified handling of special chars.
*
* Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/tty.h>
extern unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
#define CTRLCHAR_NONE (1 << 8)
#define CTRLCHAR_CTRL (2 << 8)
#define CTRLCHAR_SYSRQ (3 << 8)
#define CTRLCHAR_MASK (~0xffu)

View file

@ -0,0 +1,158 @@
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
u_short plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *funcbufptr = func_buf;
int funcbufsize = sizeof(func_buf);
int funcbufleft = 0; /* space left */
char *func_table[MAX_NR_FUNC] = {
func_buf + 0,
func_buf + 5,
func_buf + 10,
func_buf + 15,
func_buf + 20,
func_buf + 25,
func_buf + 31,
func_buf + 37,
func_buf + 43,
func_buf + 49,
func_buf + 55,
func_buf + 61,
func_buf + 67,
func_buf + 73,
func_buf + 79,
func_buf + 85,
func_buf + 91,
func_buf + 97,
func_buf + 103,
func_buf + 109,
NULL,
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;

View file

@ -0,0 +1,191 @@
# Default keymap for 3270 (ebcdic codepage 037).
keymaps 0-1,4-5
keycode 0 = nul Oslash
keycode 1 = nul a
keycode 2 = nul b
keycode 3 = nul c
keycode 4 = nul d
keycode 5 = nul e
keycode 6 = nul f
keycode 7 = nul g
keycode 8 = nul h
keycode 9 = nul i
keycode 10 = nul guillemotleft
keycode 11 = nul guillemotright
keycode 12 = nul eth
keycode 13 = nul yacute
keycode 14 = nul thorn
keycode 15 = nul plusminus
keycode 16 = nul degree
keycode 17 = nul j
keycode 18 = nul k
keycode 19 = nul l
keycode 20 = nul m
keycode 21 = nul n
keycode 22 = nul o
keycode 23 = nul p
keycode 24 = nul q
keycode 25 = nul r
keycode 26 = nul nul
keycode 27 = nul nul
keycode 28 = nul ae
keycode 29 = nul cedilla
keycode 30 = nul AE
keycode 31 = nul currency
keycode 32 = nul mu
keycode 33 = nul tilde
keycode 34 = nul s
keycode 35 = nul t
keycode 36 = nul u
keycode 37 = nul v
keycode 38 = nul w
keycode 39 = nul x
keycode 40 = nul y
keycode 41 = nul z
keycode 42 = nul exclamdown
keycode 43 = nul questiondown
keycode 44 = nul ETH
keycode 45 = nul Yacute
keycode 46 = nul THORN
keycode 47 = nul registered
keycode 48 = nul dead_circumflex
keycode 49 = nul sterling
keycode 50 = nul yen
keycode 51 = nul periodcentered
keycode 52 = nul copyright
keycode 53 = nul section
keycode 54 = nul paragraph
keycode 55 = nul onequarter
keycode 56 = nul onehalf
keycode 57 = nul threequarters
keycode 58 = nul bracketleft
keycode 59 = nul bracketright
keycode 60 = nul nul
keycode 61 = nul diaeresis
keycode 62 = nul acute
keycode 63 = nul multiply
keycode 64 = space braceleft
keycode 65 = nul A
keycode 66 = acircumflex B
keycode 67 = adiaeresis C
keycode 68 = agrave D
keycode 69 = aacute E
keycode 70 = atilde F
keycode 71 = aring G
keycode 72 = ccedilla H
keycode 73 = ntilde I
keycode 74 = cent nul
keycode 75 = period ocircumflex
keycode 76 = less odiaeresis
keycode 77 = parenleft ograve
keycode 78 = plus oacute
keycode 79 = bar otilde
keycode 80 = ampersand braceright
keycode 81 = eacute J
keycode 82 = acircumflex K
keycode 83 = ediaeresis L
keycode 84 = egrave M
keycode 85 = iacute N
keycode 86 = icircumflex O
keycode 87 = idiaeresis P
keycode 88 = igrave Q
keycode 89 = ssharp R
keycode 90 = exclam onesuperior
keycode 91 = dollar ucircumflex
keycode 92 = asterisk udiaeresis
keycode 93 = parenright ugrave
keycode 94 = semicolon uacute
keycode 95 = notsign ydiaeresis
keycode 96 = minus backslash
keycode 97 = slash division
keycode 98 = Acircumflex S
keycode 99 = Adiaeresis T
keycode 100 = Agrave U
keycode 101 = Aacute V
keycode 102 = Atilde W
keycode 103 = Aring X
keycode 104 = Ccedilla Y
keycode 105 = Ntilde Z
keycode 106 = brokenbar twosuperior
keycode 107 = comma Ocircumflex
keycode 108 = percent Odiaeresis
keycode 109 = underscore Ograve
keycode 110 = greater Oacute
keycode 111 = question Otilde
keycode 112 = oslash zero
keycode 113 = Eacute one
keycode 114 = Ecircumflex two
keycode 115 = Ediaeresis three
keycode 116 = Egrave four
keycode 117 = Iacute five
keycode 118 = Icircumflex six
keycode 119 = Idiaeresis seven
keycode 120 = Igrave eight
keycode 121 = grave nine
keycode 122 = colon threesuperior
keycode 123 = numbersign Ucircumflex
keycode 124 = at Udiaeresis
keycode 125 = apostrophe Ugrave
keycode 126 = equal Uacute
keycode 127 = quotedbl nul
# AID keys
control keycode 74 = F22
control keycode 75 = F23
control keycode 76 = F24
control keycode 107 = Control_z # PA3
control keycode 108 = Control_c # PA1
control keycode 109 = KeyboardSignal # Clear
control keycode 110 = Control_d # PA2
control keycode 122 = F10
control keycode 123 = F11 # F11
control keycode 124 = Last_Console # F12
control keycode 125 = Linefeed
shift control keycode 65 = F13
shift control keycode 66 = F14
shift control keycode 67 = F15
shift control keycode 68 = F16
shift control keycode 69 = F17
shift control keycode 70 = F18
shift control keycode 71 = F19
shift control keycode 72 = F20
shift control keycode 73 = F21
shift control keycode 113 = F1
shift control keycode 114 = F2
shift control keycode 115 = Incr_Console
shift control keycode 116 = F4
shift control keycode 117 = F5
shift control keycode 118 = F6
shift control keycode 119 = Scroll_Backward
shift control keycode 120 = Scroll_Forward
shift control keycode 121 = F9
string F1 = "\033[[A"
string F2 = "\033[[B"
string F3 = "\033[[C"
string F4 = "\033[[D"
string F5 = "\033[[E"
string F6 = "\033[17~"
string F7 = "\033[18~"
string F8 = "\033[19~"
string F9 = "\033[20~"
string F10 = "\033[21~"
string F11 = "\033[23~"
string F12 = "\033[24~"
string F13 = "\033[25~"
string F14 = "\033[26~"
string F15 = "\033[28~"
string F16 = "\033[29~"
string F17 = "\033[31~"
string F18 = "\033[32~"
string F19 = "\033[33~"
string F20 = "\033[34~"
# string F21 ??
# string F22 ??
# string F23 ??
# string F24 ??
compose '^' 'c' to Control_c
compose '^' 'd' to Control_d
compose '^' 'z' to Control_z
compose '^' '\012' to nul

View file

@ -0,0 +1,237 @@
/*
* DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <asm/ctl_reg.h>
#include "hmcdrv_ftp.h"
#include "diag_ftp.h"
/* DIAGNOSE X'2C4' return codes in Ry */
#define DIAG_FTP_RET_OK 0 /* HMC FTP started successfully */
#define DIAG_FTP_RET_EBUSY 4 /* HMC FTP service currently busy */
#define DIAG_FTP_RET_EIO 8 /* HMC FTP service I/O error */
/* and an artificial extension */
#define DIAG_FTP_RET_EPERM 2 /* HMC FTP service privilege error */
/* FTP service status codes (after INTR at guest real location 133) */
#define DIAG_FTP_STAT_OK 0U /* request completed successfully */
#define DIAG_FTP_STAT_PGCC 4U /* program check condition */
#define DIAG_FTP_STAT_PGIOE 8U /* paging I/O error */
#define DIAG_FTP_STAT_TIMEOUT 12U /* timeout */
#define DIAG_FTP_STAT_EBASE 16U /* base of error codes from SCLP */
#define DIAG_FTP_STAT_LDFAIL (DIAG_FTP_STAT_EBASE + 1U) /* failed */
#define DIAG_FTP_STAT_LDNPERM (DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
#define DIAG_FTP_STAT_LDRUNS (DIAG_FTP_STAT_EBASE + 3U) /* runs */
#define DIAG_FTP_STAT_LDNRUNS (DIAG_FTP_STAT_EBASE + 4U) /* not runs */
/**
* struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
* @bufaddr: real buffer address (at 4k boundary)
* @buflen: length of buffer
* @offset: dir/file offset
* @intparm: interruption parameter (unused)
* @transferred: bytes transferred
* @fsize: file size, filled on GET
* @failaddr: failing address
* @spare: padding
* @fident: file name - ASCII
*/
struct diag_ftp_ldfpl {
u64 bufaddr;
u64 buflen;
u64 offset;
u64 intparm;
u64 transferred;
u64 fsize;
u64 failaddr;
u64 spare;
u8 fident[HMCDRV_FTP_FIDENT_MAX];
} __packed;
static DECLARE_COMPLETION(diag_ftp_rx_complete);
static int diag_ftp_subcode;
/**
* diag_ftp_handler() - FTP services IRQ handler
* @extirq: external interrupt (sub-) code
* @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
* @param64: unused (for 64-bit interrupt parameters)
*/
static void diag_ftp_handler(struct ext_code extirq,
unsigned int param32,
unsigned long param64)
{
if ((extirq.subcode >> 8) != 8)
return; /* not a FTP services sub-code */
inc_irq_stat(IRQEXT_FTP);
diag_ftp_subcode = extirq.subcode & 0xffU;
complete(&diag_ftp_rx_complete);
}
/**
* diag_ftp_2c4() - DIAGNOSE X'2C4' service call
* @fpl: pointer to prepared LDFPL
* @cmd: FTP command to be executed
*
* Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
* @fpl and FTP function code @cmd. In case of an error the function does
* nothing and returns an (negative) error code.
*
* Notes:
* 1. This function only initiates a transfer, so the caller must wait
* for completion (asynchronous execution).
* 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
* 3. fpl->bufaddr must be a real address, 4k aligned
*/
static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
enum hmcdrv_ftp_cmdid cmd)
{
int rc;
asm volatile(
" diag %[addr],%[cmd],0x2c4\n"
"0: j 2f\n"
"1: la %[rc],%[err]\n"
"2:\n"
EX_TABLE(0b, 1b)
: [rc] "=d" (rc), "+m" (*fpl)
: [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
[err] "i" (DIAG_FTP_RET_EPERM)
: "cc");
switch (rc) {
case DIAG_FTP_RET_OK:
return 0;
case DIAG_FTP_RET_EBUSY:
return -EBUSY;
case DIAG_FTP_RET_EPERM:
return -EPERM;
case DIAG_FTP_RET_EIO:
default:
return -EIO;
}
}
/**
* diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
struct diag_ftp_ldfpl *ldfpl;
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
ftp->fname, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&diag_ftp_rx_complete);
ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ldfpl) {
len = -ENOMEM;
goto out;
}
len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
if (len >= HMCDRV_FTP_FIDENT_MAX) {
len = -EINVAL;
goto out_free;
}
ldfpl->transferred = 0;
ldfpl->fsize = 0;
ldfpl->offset = ftp->ofs;
ldfpl->buflen = ftp->len;
ldfpl->bufaddr = virt_to_phys(ftp->buf);
len = diag_ftp_2c4(ldfpl, ftp->id);
if (len)
goto out_free;
/*
* There is no way to cancel the running diag X'2C4', the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&diag_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed DIAG X'2C4' after %lu ms\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
#endif
switch (diag_ftp_subcode) {
case DIAG_FTP_STAT_OK: /* success */
len = ldfpl->transferred;
if (fsize)
*fsize = ldfpl->fsize;
break;
case DIAG_FTP_STAT_LDNPERM:
len = -EPERM;
break;
case DIAG_FTP_STAT_LDRUNS:
len = -EBUSY;
break;
case DIAG_FTP_STAT_LDFAIL:
len = -ENOENT; /* no such file or media */
break;
default:
len = -EIO;
break;
}
out_free:
free_page((unsigned long) ldfpl);
out:
return len;
}
/**
* diag_ftp_startup() - startup of FTP services, when running on z/VM
*
* Return: 0 on success, else an (negative) error code
*/
int diag_ftp_startup(void)
{
int rc;
rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
if (rc)
return rc;
ctl_set_bit(0, 63 - 22);
return 0;
}
/**
* diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
*/
void diag_ftp_shutdown(void)
{
ctl_clear_bit(0, 63 - 22);
unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
}

View file

@ -0,0 +1,21 @@
/*
* DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
*
* Notice that all functions exported here are not reentrant.
* So usage should be exclusive, ensured by the caller (e.g. using a
* mutex).
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __DIAG_FTP_H__
#define __DIAG_FTP_H__
#include "hmcdrv_ftp.h"
int diag_ftp_startup(void);
void diag_ftp_shutdown(void);
ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
#endif /* __DIAG_FTP_H__ */

575
drivers/s390/char/fs3270.c Normal file
View file

@ -0,0 +1,575 @@
/*
* IBM/3270 Driver - fullscreen driver.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include "raw3270.h"
#include "ctrlchar.h"
static struct raw3270_fn fs3270_fn;
struct fs3270 {
struct raw3270_view view;
struct pid *fs_pid; /* Pid of controlling program. */
int read_command; /* ccw command to use for reads. */
int write_command; /* ccw command to use for writes. */
int attention; /* Got attention. */
int active; /* Fullscreen view is active. */
struct raw3270_request *init; /* single init request. */
wait_queue_head_t wait; /* Init & attention wait queue. */
struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */
size_t rdbuf_size; /* size of data returned by RDBUF */
};
static DEFINE_MUTEX(fs3270_mutex);
static void
fs3270_wake_up(struct raw3270_request *rq, void *data)
{
wake_up((wait_queue_head_t *) data);
}
static inline int
fs3270_working(struct fs3270 *fp)
{
/*
* The fullscreen view is in working order if the view
* has been activated AND the initial request is finished.
*/
return fp->active && raw3270_request_final(fp->init);
}
static int
fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
{
struct fs3270 *fp;
int rc;
fp = (struct fs3270 *) view;
rq->callback = fs3270_wake_up;
rq->callback_data = &fp->wait;
do {
if (!fs3270_working(fp)) {
/* Fullscreen view isn't ready yet. */
rc = wait_event_interruptible(fp->wait,
fs3270_working(fp));
if (rc != 0)
break;
}
rc = raw3270_start(view, rq);
if (rc == 0) {
/* Started successfully. Now wait for completion. */
wait_event(fp->wait, raw3270_request_final(rq));
}
} while (rc == -EACCES);
return rc;
}
/*
* Switch to the fullscreen view.
*/
static void
fs3270_reset_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_restore_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
fp->rdbuf_size = 0;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static int
fs3270_activate(struct raw3270_view *view)
{
struct fs3270 *fp;
char *cp;
int rc;
fp = (struct fs3270 *) view;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return 0;
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
fp->init->callback = fs3270_reset_callback;
} else {
/* Restore fullscreen buffer saved by fs3270_deactivate. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->ccw.count = fp->rdbuf_size;
cp = fp->rdbuf->data[0];
cp[0] = TW_KR;
cp[1] = TO_SBA;
cp[2] = cp[6];
cp[3] = cp[7];
cp[4] = TO_IC;
cp[5] = TO_SBA;
cp[6] = 0x40;
cp[7] = 0x40;
fp->init->rescnt = 0;
fp->init->callback = fs3270_restore_callback;
}
rc = fp->init->rc = raw3270_start_locked(view, fp->init);
if (rc)
fp->init->callback(fp->init, NULL);
else
fp->active = 1;
return rc;
}
/*
* Shutdown fullscreen view.
*/
static void
fs3270_save_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
/* Correct idal buffer element 0 address. */
fp->rdbuf->data[0] -= 5;
fp->rdbuf->size += 5;
/*
* If the rdbuf command failed or the idal buffer is
* to small for the amount of data returned by the
* rdbuf command, then we have no choice but to send
* a SIGHUP to the application.
*/
if (rq->rc != 0 || rq->rescnt == 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
} else
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_deactivate(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
fp->active = 0;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return;
/* Prepare read-buffer request. */
raw3270_request_set_cmd(fp->init, TC_RDBUF);
/*
* Hackish: skip first 5 bytes of the idal buffer to make
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
fp->rdbuf->data[0] += 5;
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
fp->init->callback = fs3270_save_callback;
/* Start I/O to read in the 3270 buffer. */
fp->init->rc = raw3270_start_locked(view, fp->init);
if (fp->init->rc)
fp->init->callback(fp->init, NULL);
}
static int
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1;
wake_up(&fp->wait);
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
* Process reads from fullscreen 3270.
*/
static ssize_t
fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
ssize_t rc;
if (count == 0 || count > 65535)
return -EINVAL;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (fp->read_command == 0 && fp->write_command != 0)
fp->read_command = 6;
raw3270_request_set_cmd(rq, fp->read_command ? : 2);
raw3270_request_set_idal(rq, ib);
rc = wait_event_interruptible(fp->wait, fp->attention);
fp->attention = 0;
if (rc == 0) {
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0) {
count -= rq->rescnt;
if (idal_buffer_to_user(ib, data, count) != 0)
rc = -EFAULT;
else
rc = count;
}
}
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* Process writes to fullscreen 3270.
*/
static ssize_t
fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
int write_command;
ssize_t rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (idal_buffer_from_user(ib, data, count) == 0) {
write_command = fp->write_command ? : 1;
if (write_command == 5)
write_command = 13;
raw3270_request_set_cmd(rq, write_command);
raw3270_request_set_idal(rq, ib);
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0)
rc = count - rq->rescnt;
} else
rc = -EFAULT;
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* process ioctl commands for the tube driver
*/
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
break;
case TUBOCMD:
fp->write_command = arg;
break;
case TUBGETI:
rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
iocb.line_cnt = fp->view.rows;
iocb.col_cnt = fp->view.cols;
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* Allocate fs3270 structure.
*/
static struct fs3270 *
fs3270_alloc_view(void)
{
struct fs3270 *fp;
fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
fp->init = raw3270_request_alloc(0);
if (IS_ERR(fp->init)) {
kfree(fp);
return ERR_PTR(-ENOMEM);
}
return fp;
}
/*
* Free fs3270 structure.
*/
static void
fs3270_free_view(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->rdbuf)
idal_buffer_free(fp->rdbuf);
raw3270_request_free(((struct fs3270 *) view)->init);
kfree(view);
}
/*
* Unlink fs3270 data structure from filp.
*/
static void
fs3270_release(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
/* View to a 3270 device. Can be console, tty or fullscreen. */
static struct raw3270_fn fs3270_fn = {
.activate = fs3270_activate,
.deactivate = fs3270_deactivate,
.intv = (void *) fs3270_irq,
.release = fs3270_release,
.free = fs3270_free_view
};
/*
* This routine is called whenever a 3270 fullscreen device is opened.
*/
static int
fs3270_open(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
struct idal_buffer *ib;
int minor, rc = 0;
if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
return -ENODEV;
minor = iminor(file_inode(filp));
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
tty_kref_put(tty);
return -ENODEV;
}
minor = tty->index;
tty_kref_put(tty);
}
mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
raw3270_put_view(&fp->view);
rc = -EBUSY;
goto out;
}
/* Allocate fullscreen view structure. */
fp = fs3270_alloc_view();
if (IS_ERR(fp)) {
rc = PTR_ERR(fp);
goto out;
}
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
}
/* Allocate idal-buffer. */
ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
rc = PTR_ERR(ib);
goto out;
}
fp->rdbuf = ib;
rc = raw3270_activate_view(&fp->view);
if (rc) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
goto out;
}
nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static int
fs3270_close(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
fp = filp->private_data;
filp->private_data = NULL;
if (fp) {
put_pid(fp->fs_pid);
fp->fs_pid = NULL;
raw3270_reset(&fp->view);
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
}
return 0;
}
static const struct file_operations fs3270_fops = {
.owner = THIS_MODULE, /* owner */
.read = fs3270_read, /* read */
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
.llseek = no_llseek,
};
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
static struct raw3270_notifier fs3270_notifier =
{
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
};
/*
* 3270 fullscreen driver initialization.
*/
static int __init
fs3270_init(void)
{
int rc;
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
}
static void __exit
fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
module_init(fs3270_init);
module_exit(fs3270_exit);

View file

@ -0,0 +1,252 @@
/*
* SE/HMC Drive (Read) Cache Functions
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#define HMCDRV_CACHE_TIMEOUT 30 /* aging timeout in seconds */
/**
* struct hmcdrv_cache_entry - file cache (only used on read/dir)
* @id: FTP command ID
* @content: kernel-space buffer, 4k aligned
* @len: size of @content cache (0 if caching disabled)
* @ofs: start of content within file (-1 if no cached content)
* @fname: file name
* @fsize: file size
* @timeout: cache timeout in jiffies
*
* Notice that the first three members (id, fname, fsize) are cached on all
* read/dir requests. But content is cached only under some preconditions.
* Uncached content is signalled by a negative value of @ofs.
*/
struct hmcdrv_cache_entry {
enum hmcdrv_ftp_cmdid id;
char fname[HMCDRV_FTP_FIDENT_MAX];
size_t fsize;
loff_t ofs;
unsigned long timeout;
void *content;
size_t len;
};
static int hmcdrv_cache_order; /* cache allocated page order */
static struct hmcdrv_cache_entry hmcdrv_cache_file = {
.fsize = SIZE_MAX,
.ofs = -1,
.len = 0,
.fname = {'\0'}
};
/**
* hmcdrv_cache_get() - looks for file data/content in read cache
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read from cache or a negative number if nothing
* in content cache (for the file/cmd specified in @ftp)
*/
static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
{
loff_t pos; /* position in cache (signed) */
ssize_t len;
if ((ftp->id != hmcdrv_cache_file.id) ||
strcmp(hmcdrv_cache_file.fname, ftp->fname))
return -1;
if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
return 0;
if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
time_after(jiffies, hmcdrv_cache_file.timeout))
return -1;
/* there seems to be cached content - calculate the maximum number
* of bytes that can be returned (regarding file size and offset)
*/
len = hmcdrv_cache_file.fsize - ftp->ofs;
if (len > ftp->len)
len = ftp->len;
/* check if the requested chunk falls into our cache (which starts
* at offset 'hmcdrv_cache_file.ofs' in the file of interest)
*/
pos = ftp->ofs - hmcdrv_cache_file.ofs;
if ((pos >= 0) &&
((pos + len) <= hmcdrv_cache_file.len)) {
memcpy(ftp->buf,
hmcdrv_cache_file.content + pos,
len);
pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
hmcdrv_cache_file.fname, len,
hmcdrv_cache_file.fsize);
return len;
}
return -1;
}
/**
* hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Return: number of bytes read/written or a (negative) error code
*/
static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
/* only cache content if the read/dir cache really exists
* (hmcdrv_cache_file.len > 0), is large enough to handle the
* request (hmcdrv_cache_file.len >= ftp->len) and there is a need
* to do so (ftp->len > 0)
*/
if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
/* because the cache is not located at ftp->buf, we have to
* assemble a new HMC drive FTP cmd specification (pointing
* to our cache, and using the increased size)
*/
struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
cftp.buf = hmcdrv_cache_file.content; /* and update */
cftp.len = hmcdrv_cache_file.len; /* buffer data */
len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
if (len > 0) {
pr_debug("caching %zd bytes content for '%s'\n",
len, ftp->fname);
if (len > ftp->len)
len = ftp->len;
hmcdrv_cache_file.ofs = ftp->ofs;
hmcdrv_cache_file.timeout = jiffies +
HMCDRV_CACHE_TIMEOUT * HZ;
memcpy(ftp->buf, hmcdrv_cache_file.content, len);
}
} else {
len = func(ftp, &hmcdrv_cache_file.fsize);
hmcdrv_cache_file.ofs = -1; /* invalidate content */
}
if (len > 0) {
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
strlcpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
}
return len;
}
/**
* hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure exclusive execution.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
(ftp->id == HMCDRV_FTP_NLIST) ||
(ftp->id == HMCDRV_FTP_GET)) {
len = hmcdrv_cache_get(ftp);
if (len >= 0) /* got it from cache ? */
return len; /* yes */
len = hmcdrv_cache_do(ftp, func);
if (len >= 0)
return len;
} else {
len = func(ftp, NULL); /* simply do original command */
}
/* invalidate the (read) cache in case there was a write operation
* or an error on read/dir
*/
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
return len;
}
/**
* hmcdrv_cache_startup() - startup of HMC drive cache
* @cachesize: cache size
*
* Return: 0 on success, else a (negative) error code
*/
int hmcdrv_cache_startup(size_t cachesize)
{
if (cachesize > 0) { /* perform caching ? */
hmcdrv_cache_order = get_order(cachesize);
hmcdrv_cache_file.content =
(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
hmcdrv_cache_order);
if (!hmcdrv_cache_file.content) {
pr_err("Allocating the requested cache size of %zu bytes failed\n",
cachesize);
return -ENOMEM;
}
pr_debug("content cache enabled, size is %zu bytes\n",
cachesize);
}
hmcdrv_cache_file.len = cachesize;
return 0;
}
/**
* hmcdrv_cache_shutdown() - shutdown of HMC drive cache
*/
void hmcdrv_cache_shutdown(void)
{
if (hmcdrv_cache_file.content) {
free_pages((unsigned long) hmcdrv_cache_file.content,
hmcdrv_cache_order);
hmcdrv_cache_file.content = NULL;
}
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
hmcdrv_cache_file.len = 0; /* no cache */
}

View file

@ -0,0 +1,24 @@
/*
* SE/HMC Drive (Read) Cache Functions
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_CACHE_H__
#define __HMCDRV_CACHE_H__
#include <linux/mmzone.h>
#include "hmcdrv_ftp.h"
#define HMCDRV_CACHE_SIZE_DFLT (MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func);
int hmcdrv_cache_startup(size_t cachesize);
void hmcdrv_cache_shutdown(void);
#endif /* __HMCDRV_CACHE_H__ */

View file

@ -0,0 +1,370 @@
/*
* HMC Drive CD/DVD Device
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
* This file provides a Linux "misc" character device for access to an
* assigned HMC drive CD/DVD-ROM. It works as follows: First create the
* device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
* SEEK_END) indicates that a new FTP command follows (not needed on the
* first command after open). Then write() the FTP command ASCII string
* to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
* end read() the response.
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include "hmcdrv_dev.h"
#include "hmcdrv_ftp.h"
/* If the following macro is defined, then the HMC device creates it's own
* separated device class (and dynamically assigns a major number). If not
* defined then the HMC device is assigned to the "misc" class devices.
*
#define HMCDRV_DEV_CLASS "hmcftp"
*/
#define HMCDRV_DEV_NAME "hmcdrv"
#define HMCDRV_DEV_BUSY_DELAY 500 /* delay between -EBUSY trials in ms */
#define HMCDRV_DEV_BUSY_RETRIES 3 /* number of retries on -EBUSY */
struct hmcdrv_dev_node {
#ifdef HMCDRV_DEV_CLASS
struct cdev dev; /* character device structure */
umode_t mode; /* mode of device node (unused, zero) */
#else
struct miscdevice dev; /* "misc" device structure */
#endif
};
static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len);
/*
* device operations
*/
static const struct file_operations hmcdrv_dev_fops = {
.open = hmcdrv_dev_open,
.llseek = hmcdrv_dev_seek,
.release = hmcdrv_dev_release,
.read = hmcdrv_dev_read,
.write = hmcdrv_dev_write,
};
static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
#ifdef HMCDRV_DEV_CLASS
static struct class *hmcdrv_dev_class; /* device class pointer */
static dev_t hmcdrv_dev_no; /* device number (major/minor) */
/**
* hmcdrv_dev_name() - provides a naming hint for a device node in /dev
* @dev: device for which the naming/mode hint is
* @mode: file mode for device node created in /dev
*
* See: devtmpfs.c, function devtmpfs_create_node()
*
* Return: recommended device file name in /dev
*/
static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
{
char *nodename = NULL;
const char *devname = dev_name(dev); /* kernel device name */
if (devname)
nodename = kasprintf(GFP_KERNEL, "%s", devname);
/* on device destroy (rmmod) the mode pointer may be NULL
*/
if (mode)
*mode = hmcdrv_dev.mode;
return nodename;
}
#endif /* HMCDRV_DEV_CLASS */
/*
* open()
*/
static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
{
int rc;
/* check for non-blocking access, which is really unsupported
*/
if (fp->f_flags & O_NONBLOCK)
return -EINVAL;
/* Because it makes no sense to open this device read-only (then a
* FTP command cannot be emitted), we respond with an error.
*/
if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
return -EINVAL;
/* prevent unloading this module as long as anyone holds the
* device file open - so increment the reference count here
*/
if (!try_module_get(THIS_MODULE))
return -ENODEV;
fp->private_data = NULL; /* no command yet */
rc = hmcdrv_ftp_startup();
if (rc)
module_put(THIS_MODULE);
pr_debug("open file '/dev/%s' with return code %d\n",
fp->f_dentry->d_name.name, rc);
return rc;
}
/*
* release()
*/
static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
{
pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name);
kfree(fp->private_data);
fp->private_data = NULL;
hmcdrv_ftp_shutdown();
module_put(THIS_MODULE);
return 0;
}
/*
* lseek()
*/
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
{
switch (whence) {
case SEEK_CUR: /* relative to current file position */
pos += fp->f_pos; /* new position stored in 'pos' */
break;
case SEEK_SET: /* absolute (relative to beginning of file) */
break; /* SEEK_SET */
/* We use SEEK_END as a special indicator for a SEEK_SET
* (set absolute position), combined with a FTP command
* clear.
*/
case SEEK_END:
if (fp->private_data) {
kfree(fp->private_data);
fp->private_data = NULL;
}
break; /* SEEK_END */
default: /* SEEK_DATA, SEEK_HOLE: unsupported */
return -EINVAL;
}
if (pos < 0)
return -EINVAL;
if (fp->f_pos != pos)
++fp->f_version;
fp->f_pos = pos;
return pos;
}
/*
* transfer (helper function)
*/
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
ssize_t retlen;
unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
do {
retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
if (retlen != -EBUSY)
break;
msleep(HMCDRV_DEV_BUSY_DELAY);
} while (--trials > 0);
return retlen;
}
/*
* read()
*/
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
(fp->private_data == NULL)) { /* no FTP cmd defined ? */
return -EBADF;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, ubuf, len);
pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n",
fp->f_dentry->d_name.name, (long long) *pos, retlen, len);
if (retlen > 0)
*pos += retlen;
return retlen;
}
/*
* write()
*/
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n",
fp->f_dentry->d_name.name, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
fp->private_data = kmalloc(len + 1, GFP_KERNEL);
if (!fp->private_data)
return -ENOMEM;
if (!copy_from_user(fp->private_data, ubuf, len)) {
((char *)fp->private_data)[len] = '\0';
return len;
}
kfree(fp->private_data);
fp->private_data = NULL;
return -EFAULT;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, (char __user *) ubuf, len);
if (retlen > 0)
*pos += retlen;
pr_debug("write to file '/dev/%s' returned %zd\n",
fp->f_dentry->d_name.name, retlen);
return retlen;
}
/**
* hmcdrv_dev_init() - creates a HMC drive CD/DVD device
*
* This function creates a HMC drive CD/DVD kernel device and an associated
* device under /dev, using a dynamically allocated major number.
*
* Return: 0 on success, else an error code.
*/
int hmcdrv_dev_init(void)
{
int rc;
#ifdef HMCDRV_DEV_CLASS
struct device *dev;
rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
if (rc)
goto out_err;
cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
hmcdrv_dev.dev.owner = THIS_MODULE;
rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
if (rc)
goto out_unreg;
/* At this point the character device exists in the kernel (see
* /proc/devices), but not under /dev nor /sys/devices/virtual. So
* we have to create an associated class (see /sys/class).
*/
hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
if (IS_ERR(hmcdrv_dev_class)) {
rc = PTR_ERR(hmcdrv_dev_class);
goto out_devdel;
}
/* Finally a device node in /dev has to be established (as 'mkdev'
* does from the command line). Notice that assignment of a device
* node name/mode function is optional (only for mode != 0600).
*/
hmcdrv_dev.mode = 0; /* "unset" */
hmcdrv_dev_class->devnode = hmcdrv_dev_name;
dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
"%s", HMCDRV_DEV_NAME);
if (!IS_ERR(dev))
return 0;
rc = PTR_ERR(dev);
class_destroy(hmcdrv_dev_class);
hmcdrv_dev_class = NULL;
out_devdel:
cdev_del(&hmcdrv_dev.dev);
out_unreg:
unregister_chrdev_region(hmcdrv_dev_no, 1);
out_err:
#else /* !HMCDRV_DEV_CLASS */
hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
rc = misc_register(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
return rc;
}
/**
* hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
*/
void hmcdrv_dev_exit(void)
{
#ifdef HMCDRV_DEV_CLASS
if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
class_destroy(hmcdrv_dev_class);
}
cdev_del(&hmcdrv_dev.dev);
unregister_chrdev_region(hmcdrv_dev_no, 1);
#else /* !HMCDRV_DEV_CLASS */
misc_deregister(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
}

View file

@ -0,0 +1,14 @@
/*
* SE/HMC Drive FTP Device
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_DEV_H__
#define __HMCDRV_DEV_H__
int hmcdrv_dev_init(void);
void hmcdrv_dev_exit(void);
#endif /* __HMCDRV_DEV_H__ */

View file

@ -0,0 +1,343 @@
/*
* HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/crc16.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#include "sclp_ftp.h"
#include "diag_ftp.h"
/**
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
* @cmd: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
void (*shutdown)(void);
ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
};
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
static struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
/**
* hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
* @cmd: FTP command string (NOT zero-terminated)
* @len: length of FTP command string in @cmd
*/
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
{
/* HMC FTP command descriptor */
struct hmcdrv_ftp_cmd_desc {
const char *str; /* command string */
enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
};
/* Description of all HMC drive FTP commands
*
* Notes:
* 1. Array size should be a prime number.
* 2. Do not change the order of commands in table (because the
* index is determined by CRC % ARRAY_SIZE).
* 3. Original command 'nlist' was renamed, else the CRC would
* collide with 'append' (see point 2).
*/
static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
{.str = "get", /* [0] get (CRC = 0x68eb) */
.cmd = HMCDRV_FTP_GET},
{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
.cmd = HMCDRV_FTP_DIR},
{.str = "delete", /* [2] delete (CRC = 0x53ae) */
.cmd = HMCDRV_FTP_DELETE},
{.str = "nls", /* [3] nls (CRC = 0xf87c) */
.cmd = HMCDRV_FTP_NLIST},
{.str = "put", /* [4] put (CRC = 0xac56) */
.cmd = HMCDRV_FTP_PUT},
{.str = "append", /* [5] append (CRC = 0xf56e) */
.cmd = HMCDRV_FTP_APPEND},
{.str = NULL} /* [6] unused */
};
const struct hmcdrv_ftp_cmd_desc *pdesc;
u16 crc = 0xffffU;
if (len == 0)
return HMCDRV_FTP_NOOP; /* error indiactor */
crc = crc16(crc, cmd, len);
pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
if (!pdesc->str || strncmp(pdesc->str, cmd, len))
return HMCDRV_FTP_NOOP;
pr_debug("FTP command '%s' found, with ID %d\n",
pdesc->str, pdesc->cmd);
return pdesc->cmd;
}
/**
* hmcdrv_ftp_parse() - HMC drive FTP command parser
* @cmd: FTP command string "<cmd> <filename>"
* @ftp: Pointer to FTP command specification buffer (output)
*
* Return: 0 on success, else a (negative) error code
*/
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
{
char *start;
int argc = 0;
ftp->id = HMCDRV_FTP_NOOP;
ftp->fname = NULL;
while (*cmd != '\0') {
while (isspace(*cmd))
++cmd;
if (*cmd == '\0')
break;
start = cmd;
switch (argc) {
case 0: /* 1st argument (FTP command) */
while ((*cmd != '\0') && !isspace(*cmd))
++cmd;
ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
break;
case 1: /* 2nd / last argument (rest of line) */
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
/* fall through */
default:
*cmd = '\0';
break;
} /* switch */
++argc;
} /* while */
if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
return -EINVAL;
return 0;
}
/**
* hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
{
ssize_t len;
mutex_lock(&hmcdrv_ftp_mutex);
if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
} else {
len = -ENXIO;
}
mutex_unlock(&hmcdrv_ftp_mutex);
return len;
}
EXPORT_SYMBOL(hmcdrv_ftp_do);
/**
* hmcdrv_ftp_probe() - probe for the HMC drive FTP service
*
* Return: 0 if service is available, else an (negative) error code
*/
int hmcdrv_ftp_probe(void)
{
int rc;
struct hmcdrv_ftp_cmdspec ftp = {
.id = HMCDRV_FTP_NOOP,
.ofs = 0,
.fname = "",
.len = PAGE_SIZE
};
ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ftp.buf)
return -ENOMEM;
rc = hmcdrv_ftp_startup();
if (rc)
return rc;
rc = hmcdrv_ftp_do(&ftp);
free_page((unsigned long) ftp.buf);
hmcdrv_ftp_shutdown();
switch (rc) {
case -ENOENT: /* no such file/media or currently busy, */
case -EBUSY: /* but service seems to be available */
rc = 0;
break;
default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
if (rc > 0)
rc = 0; /* clear length (success) */
break;
} /* switch */
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_probe);
/**
* hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
*
* @cmd: FTP command string "<cmd> <filename>"
* @offset: file position to read/write
* @buf: user-space buffer for read/written directory/file
* @len: size of @buf (read/dir) or number of bytes to write
*
* This function must not be called before hmcdrv_ftp_startup() was called.
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
int order;
struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
if (retlen)
return retlen;
order = get_order(ftp.len);
ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
if (!ftp.buf)
return -ENOMEM;
switch (ftp.id) {
case HMCDRV_FTP_DIR:
case HMCDRV_FTP_NLIST:
case HMCDRV_FTP_GET:
retlen = hmcdrv_ftp_do(&ftp);
if ((retlen >= 0) &&
copy_to_user(buf, ftp.buf, retlen))
retlen = -EFAULT;
break;
case HMCDRV_FTP_PUT:
case HMCDRV_FTP_APPEND:
if (!copy_from_user(ftp.buf, buf, ftp.len))
retlen = hmcdrv_ftp_do(&ftp);
else
retlen = -EFAULT;
break;
case HMCDRV_FTP_DELETE:
retlen = hmcdrv_ftp_do(&ftp);
break;
default:
retlen = -EOPNOTSUPP;
break;
}
free_pages((unsigned long) ftp.buf, order);
return retlen;
}
/**
* hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
* dedicated (owner) instance
*
* Return: 0 on success, else an (negative) error code
*/
int hmcdrv_ftp_startup(void)
{
static struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
.startup = diag_ftp_startup,
.shutdown = diag_ftp_shutdown,
.transfer = diag_ftp_cmd
};
static struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
.startup = sclp_ftp_startup,
.shutdown = sclp_ftp_shutdown,
.transfer = sclp_ftp_cmd
};
int rc = 0;
mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
if (hmcdrv_ftp_refcnt == 0) {
if (MACHINE_IS_VM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
else
rc = -EOPNOTSUPP;
if (hmcdrv_ftp_funcs)
rc = hmcdrv_ftp_funcs->startup();
}
if (!rc)
++hmcdrv_ftp_refcnt;
mutex_unlock(&hmcdrv_ftp_mutex);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_startup);
/**
* hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
* dedicated (owner) instance
*/
void hmcdrv_ftp_shutdown(void)
{
mutex_lock(&hmcdrv_ftp_mutex);
--hmcdrv_ftp_refcnt;
if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
hmcdrv_ftp_funcs->shutdown();
mutex_unlock(&hmcdrv_ftp_mutex);
}
EXPORT_SYMBOL(hmcdrv_ftp_shutdown);

View file

@ -0,0 +1,63 @@
/*
* SE/HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_FTP_H__
#define __HMCDRV_FTP_H__
#include <linux/types.h> /* size_t, loff_t */
/*
* HMC drive FTP Service max. length of path (w/ EOS)
*/
#define HMCDRV_FTP_FIDENT_MAX 192
/**
* enum hmcdrv_ftp_cmdid - HMC drive FTP commands
* @HMCDRV_FTP_NOOP: do nothing (only for probing)
* @HMCDRV_FTP_GET: read a file
* @HMCDRV_FTP_PUT: (over-) write a file
* @HMCDRV_FTP_APPEND: append to a file
* @HMCDRV_FTP_DIR: list directory long (ls -l)
* @HMCDRV_FTP_NLIST: list files, no directories (name list)
* @HMCDRV_FTP_DELETE: delete a file
* @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
*/
enum hmcdrv_ftp_cmdid {
HMCDRV_FTP_NOOP = 0,
HMCDRV_FTP_GET = 1,
HMCDRV_FTP_PUT = 2,
HMCDRV_FTP_APPEND = 3,
HMCDRV_FTP_DIR = 4,
HMCDRV_FTP_NLIST = 5,
HMCDRV_FTP_DELETE = 6,
HMCDRV_FTP_CANCEL = 7
};
/**
* struct hmcdrv_ftp_cmdspec - FTP command specification
* @id: FTP command ID
* @ofs: offset in file
* @fname: filename (ASCII), null-terminated
* @buf: kernel-space transfer data buffer, 4k aligned
* @len: (max) number of bytes to transfer from/to @buf
*/
struct hmcdrv_ftp_cmdspec {
enum hmcdrv_ftp_cmdid id;
loff_t ofs;
const char *fname;
void __kernel *buf;
size_t len;
};
int hmcdrv_ftp_startup(void);
void hmcdrv_ftp_shutdown(void);
int hmcdrv_ftp_probe(void);
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len);
#endif /* __HMCDRV_FTP_H__ */

View file

@ -0,0 +1,64 @@
/*
* HMC Drive DVD Module
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <linux/stat.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_dev.h"
#include "hmcdrv_cache.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Copyright 2013 IBM Corporation");
MODULE_DESCRIPTION("HMC drive DVD access");
/*
* module parameter 'cachesize'
*/
static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
/**
* hmcdrv_mod_init() - module init function
*/
static int __init hmcdrv_mod_init(void)
{
int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
if (rc)
return rc;
rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
if (rc)
return rc;
rc = hmcdrv_dev_init();
if (rc)
hmcdrv_cache_shutdown();
return rc;
}
/**
* hmcdrv_mod_exit() - module exit function
*/
static void __exit hmcdrv_mod_exit(void)
{
hmcdrv_dev_exit();
hmcdrv_cache_shutdown();
}
module_init(hmcdrv_mod_init);
module_exit(hmcdrv_mod_exit);

View file

@ -0,0 +1,559 @@
/*
* ebcdic keycode functions for s390 console drivers
*
* S390 version
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/consolemap.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <asm/uaccess.h>
#include "keyboard.h"
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_ignore,\
k_dead, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore
typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
/* maximum values each key_handler can handle */
static const int kbd_max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
static unsigned char ret_diacr[NR_DEAD] = {
'`', '\'', '^', '~', '"', ','
};
/*
* Alloc/free of kbd_data structures.
*/
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
goto out;
kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
if (!kbd->key_maps)
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
kbd->key_maps[i] = kmemdup(key_maps[i],
sizeof(u_short) * NR_KEYS,
GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
if (!kbd->func_table)
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
kbd->func_table[i] = kstrdup(func_table[i],
GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
kbd->accent_table = kmemdup(accent_table,
sizeof(struct kbdiacruc) * MAX_DIACR,
GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
kbd->accent_table_size = accent_table_size;
return kbd;
out_fn_handler:
kfree(kbd->fn_handler);
out_func:
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
out_maps:
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
out_kbd:
kfree(kbd);
out:
return NULL;
}
void
kbd_free(struct kbd_data *kbd)
{
int i;
kfree(kbd->accent_table);
kfree(kbd->fn_handler);
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
kfree(kbd);
}
/*
* Generate ascii -> ebcdic translation table from kbd_data.
*/
void
kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ascebc, 0x40, 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
k = ((i & 1) << 7) + j;
keysym = keymap[j];
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ascebc[KVAL(keysym)] = k;
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ascebc[ret_diacr[KVAL(keysym)]] = k;
}
}
}
#if 0
/*
* Generate ebcdic -> ascii translation table from kbd_data.
*/
void
kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ebcasc, ' ', 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
keysym = keymap[j];
k = ((i & 1) << 7) + j;
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ebcasc[k] = KVAL(keysym);
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ebcasc[k] = ret_diacr[KVAL(keysym)];
}
}
}
#endif
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int
handle_diacr(struct kbd_data *kbd, unsigned int ch)
{
int i, d;
d = kbd->diacr;
kbd->diacr = 0;
for (i = 0; i < kbd->accent_table_size; i++) {
if (kbd->accent_table[i].diacr == d &&
kbd->accent_table[i].base == ch)
return kbd->accent_table[i].result;
}
if (ch == ' ' || ch == d)
return d;
kbd_put_queue(kbd->port, d);
return ch;
}
/*
* Handle dead key.
*/
static void
k_dead(struct kbd_data *kbd, unsigned char value)
{
value = ret_diacr[value];
kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
}
/*
* Normal character handler.
*/
static void
k_self(struct kbd_data *kbd, unsigned char value)
{
if (kbd->diacr)
value = handle_diacr(kbd, value);
kbd_put_queue(kbd->port, value);
}
/*
* Special key handlers
*/
static void
k_ignore(struct kbd_data *kbd, unsigned char value)
{
}
/*
* Function key handler.
*/
static void
k_fn(struct kbd_data *kbd, unsigned char value)
{
if (kbd->func_table[value])
kbd_puts_queue(kbd->port, kbd->func_table[value]);
}
static void
k_spec(struct kbd_data *kbd, unsigned char value)
{
if (value >= NR_FN_HANDLER)
return;
if (kbd->fn_handler[value])
kbd->fn_handler[value](kbd);
}
/*
* Put utf8 character to tty flip buffer.
* UTF-8 is defined for words of up to 31 bits,
* but we need only 16 bits here
*/
static void
to_utf8(struct tty_port *port, ushort c)
{
if (c < 0x80)
/* 0******* */
kbd_put_queue(port, c);
else if (c < 0x800) {
/* 110***** 10****** */
kbd_put_queue(port, 0xc0 | (c >> 6));
kbd_put_queue(port, 0x80 | (c & 0x3f));
} else {
/* 1110**** 10****** 10****** */
kbd_put_queue(port, 0xe0 | (c >> 12));
kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f));
kbd_put_queue(port, 0x80 | (c & 0x3f));
}
}
/*
* Process keycode.
*/
void
kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
{
unsigned short keysym;
unsigned char type, value;
if (!kbd)
return;
if (keycode >= 384)
keysym = kbd->key_maps[5][keycode - 384];
else if (keycode >= 256)
keysym = kbd->key_maps[4][keycode - 256];
else if (keycode >= 128)
keysym = kbd->key_maps[1][keycode - 128];
else
keysym = kbd->key_maps[0][keycode];
type = KTYP(keysym);
if (type >= 0xf0) {
type -= 0xf0;
if (type == KT_LETTER)
type = KT_LATIN;
value = KVAL(keysym);
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
handle_sysrq(value);
return;
}
if (value == '-') {
kbd->sysrq = K(KT_LATIN, '-');
return;
}
/* Incomplete sysrq sequence. */
(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
kbd->sysrq = 0;
} else if ((type == KT_LATIN && value == '^') ||
(type == KT_DEAD && ret_diacr[value] == '^')) {
kbd->sysrq = K(type, value);
return;
}
#endif
(*k_handler[type])(kbd, value);
} else
to_utf8(kbd->port, keysym);
}
/*
* Ioctl stuff.
*/
static int
do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
#if NR_KEYS < 256
if (tmp.kb_index >= NR_KEYS)
return -EINVAL;
#endif
#if MAX_NR_KEYMAPS < 256
if (tmp.kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBENT:
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
val = U(key_map[tmp.kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
kbd->key_maps[tmp.kb_table] = NULL;
kfree(key_map);
}
break;
}
if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
return -EINVAL;
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
if (!(key_map = kbd->key_maps[tmp.kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
kbd->key_maps[tmp.kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
ov = U(key_map[tmp.kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
* Attention Key.
*/
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
key_map[tmp.kb_index] = U(tmp.kb_value);
break;
}
return 0;
}
static int
do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
int cmd, int perm)
{
unsigned char kb_func;
char *p;
int len;
/* Get u_kbs->kb_func. */
if (get_user(kb_func, &u_kbs->kb_func))
return -EFAULT;
#if MAX_NR_FUNC < 256
if (kb_func >= MAX_NR_FUNC)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBSENT:
p = kbd->func_table[kb_func];
if (p) {
len = strlen(p);
if (len >= sizeof(u_kbs->kb_string))
len = sizeof(u_kbs->kb_string) - 1;
if (copy_to_user(u_kbs->kb_string, p, len))
return -EFAULT;
} else
len = 0;
if (put_user('\0', u_kbs->kb_string + len))
return -EFAULT;
break;
case KDSKBSENT:
if (!perm)
return -EPERM;
len = strnlen_user(u_kbs->kb_string,
sizeof(u_kbs->kb_string) - 1);
if (!len)
return -EFAULT;
if (len > sizeof(u_kbs->kb_string) - 1)
return -EINVAL;
p = kmalloc(len + 1, GFP_KERNEL);
if (!p)
return -ENOMEM;
if (copy_from_user(p, u_kbs->kb_string, len)) {
kfree(p);
return -EFAULT;
}
p[len] = 0;
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
}
return 0;
}
int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
{
struct tty_struct *tty;
void __user *argp;
unsigned int ct;
int perm;
argp = (void __user *)arg;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
tty = tty_port_tty_get(kbd->port);
/* FIXME this test is pretty racy */
perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
tty_kref_put(tty);
switch (cmd) {
case KDGKBTYPE:
return put_user(KB_101, (char __user *)argp);
case KDGKBENT:
case KDSKBENT:
return do_kdsk_ioctl(kbd, argp, cmd, perm);
case KDGKBSENT:
case KDSKBSENT:
return do_kdgkb_ioctl(kbd, argp, cmd, perm);
case KDGKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (put_user(kbd->accent_table_size, &a->kb_cnt))
return -EFAULT;
for (i = 0; i < kbd->accent_table_size; i++) {
diacr.diacr = kbd->accent_table[i].diacr;
diacr.base = kbd->accent_table[i].base;
diacr.result = kbd->accent_table[i].result;
if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
return -EFAULT;
}
return 0;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
ct = kbd->accent_table_size;
if (put_user(ct, &a->kb_cnt))
return -EFAULT;
if (copy_to_user(a->kbdiacruc, kbd->accent_table,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
for (i = 0; i < ct; i++) {
if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
return -EFAULT;
kbd->accent_table[i].diacr = diacr.diacr;
kbd->accent_table[i].base = diacr.base;
kbd->accent_table[i].result = diacr.result;
}
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
if (copy_from_user(kbd->accent_table, a->kbdiacruc,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
default:
return -ENOIOCTLCMD;
}
}
EXPORT_SYMBOL(kbd_ioctl);
EXPORT_SYMBOL(kbd_ascebc);
EXPORT_SYMBOL(kbd_free);
EXPORT_SYMBOL(kbd_alloc);
EXPORT_SYMBOL(kbd_keycode);

View file

@ -0,0 +1,56 @@
/*
* ebcdic keycode functions for s390 console drivers
*
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/keyboard.h>
#define NR_FN_HANDLER 20
struct kbd_data;
typedef void (fn_handler_fn)(struct kbd_data *);
/*
* FIXME: explain key_maps tricks.
*/
struct kbd_data {
struct tty_port *port;
unsigned short **key_maps;
char **func_table;
fn_handler_fn **fn_handler;
struct kbdiacruc *accent_table;
unsigned int accent_table_size;
unsigned int diacr;
unsigned short sysrq;
};
struct kbd_data *kbd_alloc(void);
void kbd_free(struct kbd_data *);
void kbd_ascebc(struct kbd_data *, unsigned char *);
void kbd_keycode(struct kbd_data *, unsigned int);
int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
/*
* Helper Functions.
*/
static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
tty_schedule_flip(port);
}
static inline void
kbd_puts_queue(struct tty_port *port, char *cp)
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
tty_schedule_flip(port);
}

View file

@ -0,0 +1,651 @@
/*
* Character device driver for reading z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2004, 2009
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#define KMSG_COMPONENT "monreader"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40
#define MON_SERVICE "*MONITOR"
#define MON_IN_USE 0x01
#define MON_MSGLIM 255
static char mon_dcss_name[9] = "MONDCSS\0";
struct mon_msg {
u32 pos;
u32 mca_offset;
struct iucv_message msg;
char msglim_reached;
char replied_msglim;
};
struct mon_private {
struct iucv_path *path;
struct mon_msg *msg_array[MON_MSGLIM];
unsigned int write_index;
unsigned int read_index;
atomic_t msglim_count;
atomic_t read_ready;
atomic_t iucv_connected;
atomic_t iucv_severed;
};
static unsigned long mon_in_use = 0;
static unsigned long mon_dcss_start;
static unsigned long mon_dcss_end;
static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
static u8 user_data_connect[16] = {
/* Version code, must be 0x01 for shared mode */
0x01,
/* what to collect */
MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
/* DCSS name in EBCDIC, 8 bytes padded with blanks */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static struct device *monreader_device;
/******************************************************************************
* helper functions *
*****************************************************************************/
/*
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
static void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
for (i = 0; i < 8; i++) {
if (ascii_name[i] == '\0')
break;
ebcdic_name[i] = toupper(ascii_name[i]);
};
for (; i < 8; i++)
ebcdic_name[i] = ' ';
ASCEBC(ebcdic_name, 8);
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg;
}
static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg[4];
}
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
{
return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
}
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
(mon_rec_end(monmsg) > mon_dcss_end) ||
(mon_mca_type(monmsg, 0) == 0) ||
(mon_mca_size(monmsg) % 12 != 0) ||
(mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
(mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
return -EINVAL;
return 0;
}
static int mon_send_reply(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
int rc;
rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
if (likely(!monmsg->msglim_reached)) {
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
} else
monmsg->replied_msglim = 1;
if (rc) {
pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO;
}
return 0;
}
static void mon_free_mem(struct mon_private *monpriv)
{
int i;
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
}
static struct mon_private *mon_alloc_mem(void)
{
int i;
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return NULL;
for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
mon_free_mem(monpriv);
return NULL;
}
}
return monpriv;
}
static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
monmsg->mca_offset += 12;
monmsg->pos = 0;
}
static struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
if (!atomic_read(&monpriv->read_ready))
return NULL;
monmsg = monpriv->msg_array[monpriv->read_index];
if (unlikely(monmsg->replied_msglim)) {
monmsg->replied_msglim = 0;
monmsg->msglim_reached = 0;
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
return ERR_PTR(-EOVERFLOW);
}
return monmsg;
}
/******************************************************************************
* IUCV handler *
*****************************************************************************/
static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
ipuser[0]);
iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
wake_up_interruptible(&mon_read_wait_queue);
}
static void mon_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct mon_private *monpriv = path->private;
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
pr_warning("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
atomic_inc(&monpriv->read_ready);
wake_up_interruptible(&mon_read_wait_queue);
}
static struct iucv_handler monreader_iucv_handler = {
.path_complete = mon_iucv_path_complete,
.path_severed = mon_iucv_path_severed,
.message_pending = mon_iucv_message_pending,
};
/******************************************************************************
* file operations *
*****************************************************************************/
static int mon_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
int rc;
/*
* only one user allowed
*/
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
rc = -ENOMEM;
monpriv = mon_alloc_mem();
if (!monpriv)
goto out_use;
/*
* Connect to *MONITOR service
*/
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out_priv;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
rc = -EIO;
goto out_path;
}
/*
* Wait for connection confirmation
*/
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed)) {
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
rc = -EIO;
goto out_path;
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
return nonseekable_open(inode, filp);
out_path:
iucv_path_free(monpriv->path);
out_priv:
mon_free_mem(monpriv);
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
return rc;
}
static int mon_close(struct inode *inode, struct file *filp)
{
int rc, i;
struct mon_private *monpriv = filp->private_data;
/*
* Close IUCV connection and unregister
*/
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
dev_set_drvdata(monreader_device, NULL);
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
clear_bit(MON_IN_USE, &mon_in_use);
return 0;
}
static ssize_t mon_read(struct file *filp, char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
struct mon_msg *monmsg;
int ret;
u32 mce_start;
monmsg = mon_next_message(monpriv);
if (IS_ERR(monmsg))
return PTR_ERR(monmsg);
if (!monmsg) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(mon_read_wait_queue,
atomic_read(&monpriv->read_ready) ||
atomic_read(&monpriv->iucv_severed));
if (ret)
return ret;
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return -EIO;
monmsg = monpriv->msg_array[monpriv->read_index];
}
if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
if (mon_check_mca(monmsg))
goto reply;
/* read monitor control element (12 bytes) first */
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos == mce_start + 12)
monmsg->pos = mon_rec_start(monmsg);
goto out_copy;
}
/* read records */
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos > mon_rec_end(monmsg))
mon_next_mca(monmsg);
goto out_copy;
}
reply:
ret = mon_send_reply(monmsg, monpriv);
return ret;
out_copy:
*ppos += count;
return count;
}
static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
poll_wait(filp, &mon_read_wait_queue, p);
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return POLLERR;
if (atomic_read(&monpriv->read_ready))
return POLLIN | POLLRDNORM;
return 0;
}
static const struct file_operations mon_fops = {
.owner = THIS_MODULE,
.open = &mon_open,
.release = &mon_close,
.read = &mon_read,
.poll = &mon_poll,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monreader",
.fops = &mon_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/******************************************************************************
* suspend / resume *
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
monpriv->path = NULL;
return 0;
}
static int monreader_thaw(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
rc = -ENOMEM;
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
goto out_path;
}
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed))
goto out_path;
return 0;
out_path:
rc = -EIO;
iucv_path_free(monpriv->path);
monpriv->path = NULL;
out:
atomic_set(&monpriv->iucv_severed, 1);
return rc;
}
static int monreader_restore(struct device *dev)
{
int rc;
segment_unload(mon_dcss_name);
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
panic("fatal monreader resume error: no monitor dcss\n");
}
return monreader_thaw(dev);
}
static const struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
};
static struct device_driver monreader_driver = {
.name = "monreader",
.bus = &iucv_bus,
.pm = &monreader_pm_ops,
};
/******************************************************************************
* module init/exit *
*****************************************************************************/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
/*
* Register with IUCV and connect to *MONITOR service
*/
rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) {
pr_err("The z/VM *MONITOR record device driver failed to "
"register with IUCV\n");
return rc;
}
rc = driver_register(&monreader_driver);
if (rc)
goto out_iucv;
monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!monreader_device) {
rc = -ENOMEM;
goto out_driver;
}
dev_set_name(monreader_device, "monreader-dev");
monreader_device->bus = &iucv_bus;
monreader_device->parent = iucv_root;
monreader_device->driver = &monreader_driver;
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
put_device(monreader_device);
goto out_driver;
}
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
goto out_device;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
return 0;
out:
segment_unload(mon_dcss_name);
out_device:
device_unregister(monreader_device);
out_driver:
driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
}
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
misc_deregister(&mon_dev);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
module_init(mon_init);
module_exit(mon_exit);
module_param_string(mondcss, mon_dcss_name, 9, 0444);
MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
"service, max. 8 chars. Default is MONDCSS");
MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
MODULE_DESCRIPTION("Character device driver for reading z/VM "
"monitor service records.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,399 @@
/*
* Character device driver for writing z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
#define MONWRITE_MAX_DATALEN 4010
static int mon_max_bufs = 255;
static int mon_buf_count;
struct mon_buf {
struct list_head list;
struct monwrite_hdr hdr;
int diag_done;
char *data;
};
static LIST_HEAD(mon_priv_list);
struct mon_private {
struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
struct mutex thread_mutex;
};
/*
* helper functions
*/
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_product_id id;
int rc;
strncpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
id.release_nr = myhdr->release;
id.mod_lvl = myhdr->mod_level;
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
return -EINVAL;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct monwrite_hdr *monhdr)
{
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
if ((entry->hdr.mon_function == monhdr->mon_function ||
monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
return NULL;
}
static int monwrite_new_hdr(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf;
int rc = 0;
if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
monbuf = NULL;
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
return -ENOMEM;
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
mon_buf_count++;
}
monpriv->current_buf = monbuf;
return rc;
}
static int monwrite_new_data(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf = monpriv->current_buf;
int rc = 0;
switch (monhdr->mon_function) {
case MONWRITE_START_INTERVAL:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_START_CONFIG:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_GEN_EVENT:
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_GEN_EVENT_REC);
list_del(&monpriv->current_buf->list);
kfree(monpriv->current_buf->data);
kfree(monpriv->current_buf);
monpriv->current_buf = NULL;
break;
default:
/* monhdr->mon_function is checked in monwrite_new_hdr */
BUG();
}
return rc;
}
/*
* file operations
*/
static int monwrite_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
return nonseekable_open(inode, filp);
}
static int monwrite_close(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv = filp->private_data;
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list) {
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
}
list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
static ssize_t monwrite_write(struct file *filp, const char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
size_t len, written;
void *to;
int rc;
mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
to = (char *) &monpriv->hdr +
sizeof(monpriv->hdr) - monpriv->hdr_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->hdr_to_read -= len;
written += len;
if (monpriv->hdr_to_read > 0)
continue;
rc = monwrite_new_hdr(monpriv);
if (rc)
goto out_error;
monpriv->data_to_read = monpriv->current_buf ?
monpriv->current_buf->hdr.datalen : 0;
}
if (monpriv->data_to_read) {
len = min(count - written, monpriv->data_to_read);
to = monpriv->current_buf->data +
monpriv->hdr.datalen - monpriv->data_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->data_to_read -= len;
written += len;
if (monpriv->data_to_read > 0)
continue;
rc = monwrite_new_data(monpriv);
if (rc)
goto out_error;
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
mutex_unlock(&monpriv->thread_mutex);
return rc;
}
static const struct file_operations monwrite_fops = {
.owner = THIS_MODULE,
.open = &monwrite_open,
.release = &monwrite_close,
.write = &monwrite_write,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monwriter",
.fops = &monwrite_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/*
* suspend/resume
*/
static int monwriter_freeze(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_STOP_REC);
}
}
return 0;
}
static int monwriter_restore(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
}
}
return 0;
}
static int monwriter_thaw(struct device *dev)
{
return monwriter_restore(dev);
}
static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
};
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
.owner = THIS_MODULE,
.pm = &monwriter_pm_ops,
},
};
static struct platform_device *monwriter_pdev;
/*
* module init/exit
*/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
rc = platform_driver_register(&monwriter_pdrv);
if (rc)
return rc;
monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
0);
if (IS_ERR(monwriter_pdev)) {
rc = PTR_ERR(monwriter_pdev);
goto out_driver;
}
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
return 0;
out_device:
platform_device_unregister(monwriter_pdev);
out_driver:
platform_driver_unregister(&monwriter_pdrv);
return rc;
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
module_exit(mon_exit);
module_param_named(max_bufs, mon_max_bufs, int, 0644);
MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
"that can be active at one time");
MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
MODULE_DESCRIPTION("Character device driver for writing z/VM "
"APPLDATA monitor records.");
MODULE_LICENSE("GPL");

1393
drivers/s390/char/raw3270.c Normal file

File diff suppressed because it is too large Load diff

288
drivers/s390/char/raw3270.h Normal file
View file

@ -0,0 +1,288 @@
/*
* IBM/3270 Driver
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <asm/idals.h>
#include <asm/ioctl.h>
/* ioctls for fullscreen 3270 */
#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
/* Local Channel Commands */
#define TC_WRITE 0x01 /* Write */
#define TC_RDBUF 0x02 /* Read Buffer */
#define TC_EWRITE 0x05 /* Erase write */
#define TC_READMOD 0x06 /* Read modified */
#define TC_EWRITEA 0x0d /* Erase write alternate */
#define TC_WRITESF 0x11 /* Write structured field */
/* Buffer Control Orders */
#define TO_SF 0x1d /* Start field */
#define TO_SBA 0x11 /* Set buffer address */
#define TO_IC 0x13 /* Insert cursor */
#define TO_PT 0x05 /* Program tab */
#define TO_RA 0x3c /* Repeat to address */
#define TO_SFE 0x29 /* Start field extended */
#define TO_EUA 0x12 /* Erase unprotected to address */
#define TO_MF 0x2c /* Modify field */
#define TO_SA 0x28 /* Set attribute */
/* Field Attribute Bytes */
#define TF_INPUT 0x40 /* Visible input */
#define TF_INPUTN 0x4c /* Invisible input */
#define TF_INMDT 0xc1 /* Visible, Set-MDT */
#define TF_LOG 0x60
/* Character Attribute Bytes */
#define TAT_RESET 0x00
#define TAT_FIELD 0xc0
#define TAT_EXTHI 0x41
#define TAT_COLOR 0x42
#define TAT_CHARS 0x43
#define TAT_TRANS 0x46
/* Extended-Highlighting Bytes */
#define TAX_RESET 0x00
#define TAX_BLINK 0xf1
#define TAX_REVER 0xf2
#define TAX_UNDER 0xf4
/* Reset value */
#define TAR_RESET 0x00
/* Color values */
#define TAC_RESET 0x00
#define TAC_BLUE 0xf1
#define TAC_RED 0xf2
#define TAC_PINK 0xf3
#define TAC_GREEN 0xf4
#define TAC_TURQ 0xf5
#define TAC_YELLOW 0xf6
#define TAC_WHITE 0xf7
#define TAC_DEFAULT 0x00
/* Write Control Characters */
#define TW_NONE 0x40 /* No particular action */
#define TW_KR 0xc2 /* Keyboard restore */
#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
#define RAW3270_FIRSTMINOR 1 /* First minor number */
#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
/* For TUBGETMOD and TUBSETMOD. Should include. */
struct raw3270_iocb {
short model;
short line_cnt;
short col_cnt;
short pf_cnt;
short re_cnt;
short map;
};
struct raw3270;
struct raw3270_view;
extern struct class *class3270;
/* 3270 CCW request */
struct raw3270_request {
struct list_head list; /* list head for request queueing. */
struct raw3270_view *view; /* view of this request */
struct ccw1 ccw; /* single ccw. */
void *buffer; /* output buffer. */
size_t size; /* size of output buffer. */
int rescnt; /* residual count from devstat. */
int rc; /* return code for this request. */
/* Callback for delivering final status. */
void (*callback)(struct raw3270_request *, void *);
void *callback_data;
};
struct raw3270_request *raw3270_request_alloc(size_t size);
struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
void raw3270_request_free(struct raw3270_request *);
void raw3270_request_reset(struct raw3270_request *);
void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
static inline int
raw3270_request_final(struct raw3270_request *rq)
{
return list_empty(&rq->list);
}
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
/* Return value of *intv (see raw3270_fn below) can be one of the following: */
#define RAW3270_IO_DONE 0 /* request finished */
#define RAW3270_IO_BUSY 1 /* request still active */
#define RAW3270_IO_RETRY 2 /* retry current request */
#define RAW3270_IO_STOP 3 /* kill current request */
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
int (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
void (*resize)(struct raw3270_view *, int, int, int);
};
/*
* View structure chaining. The raw3270_view structure is meant to
* be embedded at the start of the real view data structure, e.g.:
* struct example {
* struct raw3270_view view;
* ...
* };
*/
struct raw3270_view {
struct list_head list;
spinlock_t lock;
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
unsigned int model;
unsigned int rows, cols; /* # of rows & colums of the view */
unsigned char *ascebc; /* ascii -> ebcdic table */
};
int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
int raw3270_start(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
int raw3270_reset(struct raw3270_view *);
struct raw3270_view *raw3270_view(struct raw3270_view *);
int raw3270_view_active(struct raw3270_view *);
/* Reference count inliner for view structures. */
static inline void
raw3270_get_view(struct raw3270_view *view)
{
atomic_inc(&view->ref_count);
}
extern wait_queue_head_t raw3270_wait_queue;
static inline void
raw3270_put_view(struct raw3270_view *view)
{
if (atomic_dec_return(&view->ref_count) == 0)
wake_up(&raw3270_wait_queue);
}
struct raw3270 *raw3270_setup_console(void);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
struct raw3270_notifier {
struct list_head list;
void (*create)(int minor);
void (*destroy)(int minor);
};
int raw3270_register_notifier(struct raw3270_notifier *);
void raw3270_unregister_notifier(struct raw3270_notifier *);
void raw3270_pm_unfreeze(struct raw3270_view *);
/*
* Little memory allocator for string objects.
*/
struct string
{
struct list_head list;
struct list_head update;
unsigned long size;
unsigned long len;
char string[0];
} __attribute__ ((aligned(8)));
static inline struct string *
alloc_string(struct list_head *free_list, unsigned long len)
{
struct string *cs, *tmp;
unsigned long size;
size = (len + 7L) & -8L;
list_for_each_entry(cs, free_list, list) {
if (cs->size < size)
continue;
if (cs->size > size + sizeof(struct string)) {
char *endaddr = (char *) (cs + 1) + cs->size;
tmp = (struct string *) (endaddr - size) - 1;
tmp->size = size;
cs->size -= size + sizeof(struct string);
cs = tmp;
} else
list_del(&cs->list);
cs->len = len;
INIT_LIST_HEAD(&cs->list);
INIT_LIST_HEAD(&cs->update);
return cs;
}
return NULL;
}
static inline unsigned long
free_string(struct list_head *free_list, struct string *cs)
{
struct string *tmp;
struct list_head *p, *left;
/* Find out the left neighbour in free memory list. */
left = free_list;
list_for_each(p, free_list) {
if (list_entry(p, struct string, list) > cs)
break;
left = p;
}
/* Try to merge with right neighbour = next element from left. */
if (left->next != free_list) {
tmp = list_entry(left->next, struct string, list);
if ((char *) (cs + 1) + cs->size == (char *) tmp) {
list_del(&tmp->list);
cs->size += tmp->size + sizeof(struct string);
}
}
/* Try to merge with left neighbour. */
if (left != free_list) {
tmp = list_entry(left, struct string, list);
if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
tmp->size += cs->size + sizeof(struct string);
return tmp->size;
}
}
__list_add(&cs->list, left, left->next);
return cs->size;
}
static inline void
add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
{
struct string *cs;
cs = (struct string *) mem;
cs->size = size - sizeof(struct string);
free_string(free_list, cs);
}

1273
drivers/s390/char/sclp.c Normal file

File diff suppressed because it is too large Load diff

244
drivers/s390/char/sclp.h Normal file
View file

@ -0,0 +1,244 @@
/*
* Copyright IBM Corp. 1999,2012
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_H__
#define __SCLP_H__
#include <linux/types.h>
#include <linux/list.h>
#include <asm/sclp.h>
#include <asm/ebcdic.h>
/* maximum number of pages concerning our own memory management */
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
#define EVTYP_CNTLPROGIDENT 0x0B
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_OCF 0x1E
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
#define EVTYP_DIAG_TEST_MASK 0x02000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
#define EVTYP_CTLPROGIDENT_MASK 0x00200000
#define EVTYP_SIGQUIESCE_MASK 0x00000008
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define EVTYP_OCF_MASK 0x00000004
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
#define GNRLMSGFLGS_HOLDMSG 0x2000
#define LNTPFLGS_CNTLTEXT 0x8000
#define LNTPFLGS_LABELTEXT 0x4000
#define LNTPFLGS_DATATEXT 0x2000
#define LNTPFLGS_ENDTEXT 0x1000
#define LNTPFLGS_PROMPTTEXT 0x0800
typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
#define GDS_ID_MDSMU 0x1310
#define GDS_ID_MDSROUTEINFO 0x1311
#define GDS_ID_AGUNWRKCORR 0x1549
#define GDS_ID_SNACONDREPORT 0x1532
#define GDS_ID_CPMSU 0x1212
#define GDS_ID_ROUTTARGINSTR 0x154D
#define GDS_ID_OPREQ 0x8070
#define GDS_ID_TEXTCMD 0x1320
#define GDS_KEY_SELFDEFTEXTMSG 0x31
enum sclp_pm_event {
SCLP_PM_EVENT_FREEZE,
SCLP_PM_EVENT_THAW,
SCLP_PM_EVENT_RESTORE,
};
#define SCLP_PANIC_PRIO 1
#define SCLP_PANIC_PRIO_CLIENT 0
typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
struct sccb_header {
u16 length;
u8 function_code;
u8 control_mask[3];
u16 response_code;
} __attribute__((packed));
struct init_sccb {
struct sccb_header header;
u16 _reserved;
u16 mask_length;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
sccb_mask_t sclp_receive_mask;
sccb_mask_t sclp_send_mask;
} __attribute__((packed));
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
struct gds_subvector {
u8 length;
u8 key;
} __attribute__((packed));
struct gds_vector {
u16 length;
u16 gds_id;
} __attribute__((packed));
struct evbuf_header {
u16 length;
u8 type;
u8 flags;
u16 _reserved;
} __attribute__((packed));
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
void *sccb; /* pointer to the sccb to execute */
char status; /* status of this request */
int start_count; /* number of SVCs done for this req */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_req *, void *data);
void *callback_data;
int queue_timeout; /* request queue timeout (sec), set by
caller of sclp_add_request(), if
needed */
/* Internal fields */
unsigned long queue_expires; /* request queue timeout (jiffies) */
};
#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */
#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */
/* function pointers that a high level driver has to use for registration */
/* of some routines it wants to be called from the low level driver */
struct sclp_register {
struct list_head list;
/* User wants to receive: */
sccb_mask_t receive_mask;
/* User wants to send: */
sccb_mask_t send_mask;
/* H/W can receive: */
sccb_mask_t sclp_receive_mask;
/* H/W can send: */
sccb_mask_t sclp_send_mask;
/* called if event type availability changes */
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
/* called for power management events */
void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
/* pm event posted flag */
int pm_event_posted;
};
/* externals from sclp.c */
int sclp_add_request(struct sclp_req *req);
void sclp_sync_wait(void);
int sclp_register(struct sclp_register *reg);
void sclp_unregister(struct sclp_register *reg);
int sclp_remove_processed(struct sccb_header *sccb);
int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern u8 sclp_fac84;
extern unsigned long long sclp_rzm;
extern unsigned long long sclp_rnmax;
/* useful inlines */
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
/* translate single character from ASCII to EBCDIC */
static inline unsigned char
sclp_ascebc(unsigned char ch)
{
return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
}
/* translate string from EBCDIC to ASCII */
static inline void
sclp_ebcasc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
sclp_ascebc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
static inline struct gds_vector *
sclp_find_gds_vector(void *start, void *end, u16 id)
{
struct gds_vector *v;
for (v = start; (void *) v < end; v = (void *) v + v->length)
if (v->gds_id == id)
return v;
return NULL;
}
static inline struct gds_subvector *
sclp_find_gds_subvector(void *start, void *end, u8 key)
{
struct gds_subvector *sv;
for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == key)
return sv;
return NULL;
}
#endif /* __SCLP_H__ */

View file

@ -0,0 +1,211 @@
/*
* Enable Asynchronous Notification via SCLP.
*
* Copyright IBM Corp. 2009
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include "sclp.h"
static int callhome_enabled;
static struct sclp_req *request;
static struct sclp_async_sccb *sccb;
static int sclp_async_send_wait(char *message);
static struct ctl_table_header *callhome_sysctl_header;
static DEFINE_SPINLOCK(sclp_async_lock);
#define SCLP_NORMAL_WRITE 0x00
struct async_evbuf {
struct evbuf_header header;
u64 reserved;
u8 rflags;
u8 empty;
u8 rtype;
u8 otype;
char comp_id[12];
char data[3000]; /* there is still some space left */
} __attribute__((packed));
struct sclp_async_sccb {
struct sccb_header header;
struct async_evbuf evbuf;
} __attribute__((packed));
static struct sclp_register sclp_async_register = {
.send_mask = EVTYP_ASYNC_MASK,
};
static int call_home_on_panic(struct notifier_block *self,
unsigned long event, void *data)
{
strncat(data, init_utsname()->nodename,
sizeof(init_utsname()->nodename));
sclp_async_send_wait(data);
return NOTIFY_DONE;
}
static struct notifier_block call_home_panic_nb = {
.notifier_call = call_home_on_panic,
.priority = INT_MAX,
};
static int proc_handler_callhome(struct ctl_table *ctl, int write,
void __user *buffer, size_t *count,
loff_t *ppos)
{
unsigned long val;
int len, rc;
char buf[3];
if (!*count || (*ppos && !write)) {
*count = 0;
return 0;
}
if (!write) {
len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
rc = copy_to_user(buffer, buf, sizeof(buf));
if (rc != 0)
return -EFAULT;
} else {
len = *count;
rc = kstrtoul_from_user(buffer, len, 0, &val);
if (rc)
return rc;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
}
*count = len;
*ppos += len;
return 0;
}
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
.mode = 0644,
.proc_handler = proc_handler_callhome,
},
{}
};
static struct ctl_table kern_dir_table[] = {
{
.procname = "kernel",
.maxlen = 0,
.mode = 0555,
.child = callhome_table,
},
{}
};
/*
* Function used to transfer asynchronous notification
* records which waits for send completion
*/
static int sclp_async_send_wait(char *message)
{
struct async_evbuf *evb;
int rc;
unsigned long flags;
if (!callhome_enabled)
return 0;
sccb->evbuf.header.type = EVTYP_ASYNC;
sccb->evbuf.rtype = 0xA5;
sccb->evbuf.otype = 0x00;
evb = &sccb->evbuf;
request->command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
/*
* Retain Queue
* e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
*/
strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
sccb->evbuf.header.length = sizeof(sccb->evbuf);
sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
sccb->header.function_code = SCLP_NORMAL_WRITE;
rc = sclp_add_request(request);
if (rc)
return rc;
spin_lock_irqsave(&sclp_async_lock, flags);
while (request->status != SCLP_REQ_DONE &&
request->status != SCLP_REQ_FAILED) {
sclp_sync_wait();
}
spin_unlock_irqrestore(&sclp_async_lock, flags);
if (request->status != SCLP_REQ_DONE)
return -EIO;
rc = ((struct sclp_async_sccb *)
request->sccb)->header.response_code;
if (rc != 0x0020)
return -EIO;
if (evb->header.flags != 0x80)
return -EIO;
return rc;
}
static int __init sclp_async_init(void)
{
int rc;
rc = sclp_register(&sclp_async_register);
if (rc)
return rc;
rc = -EOPNOTSUPP;
if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
goto out_sclp;
rc = -ENOMEM;
callhome_sysctl_header = register_sysctl_table(kern_dir_table);
if (!callhome_sysctl_header)
goto out_sclp;
request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!request || !sccb)
goto out_mem;
rc = atomic_notifier_chain_register(&panic_notifier_list,
&call_home_panic_nb);
if (!rc)
goto out;
out_mem:
kfree(request);
free_page((unsigned long) sccb);
unregister_sysctl_table(callhome_sysctl_header);
out_sclp:
sclp_unregister(&sclp_async_register);
out:
return rc;
}
module_init(sclp_async_init);
static void __exit sclp_async_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&call_home_panic_nb);
unregister_sysctl_table(callhome_sysctl_header);
sclp_unregister(&sclp_async_register);
free_page((unsigned long) sccb);
kfree(request);
}
module_exit(sclp_async_exit);
MODULE_AUTHOR("Copyright IBM Corp. 2009");
MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");

View file

@ -0,0 +1,716 @@
/*
* Copyright IBM Corp. 2007,2012
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include "sclp.h"
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
{
return sclp_sync_request_timeout(cmd, sccb, 0);
}
int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
{
struct completion completion;
struct sclp_req *request;
int rc;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
if (timeout)
request->queue_timeout = timeout;
request->command = cmd;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
request->callback = sclp_sync_callback;
request->callback_data = &completion;
init_completion(&completion);
/* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warning("sync request failed (cmd=0x%08x, "
"status=0x%02x)\n", cmd, request->status);
rc = -EIO;
}
out:
kfree(request);
return rc;
}
/*
* CPU configuration related functions.
*/
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
struct read_cpu_info_sccb {
struct sccb_header header;
u16 nr_configured;
u16 offset_configured;
u16 nr_standby;
u16 offset_standby;
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
struct read_cpu_info_sccb *sccb)
{
char *page = (char *) sccb;
memset(info, 0, sizeof(*info));
info->configured = sccb->nr_configured;
info->standby = sccb->nr_standby;
info->combined = sccb->nr_configured + sccb->nr_standby;
info->has_cpu_type = sclp_fac84 & 0x1;
memcpy(&info->cpu, page + sccb->offset_configured,
info->combined * sizeof(struct sclp_cpu_entry));
}
int sclp_get_cpu_info(struct sclp_cpu_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("readcpuinfo failed (response=0x%04x)\n",
sccb->header.response_code);
rc = -EIO;
goto out;
}
sclp_fill_cpu_info(info, sccb);
out:
free_page((unsigned long) sccb);
return rc;
}
struct cpu_configure_sccb {
struct sccb_header header;
} __attribute__((packed, aligned(8)));
static int do_cpu_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
int rc;
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
* This is not going to cross a page boundary since we force
* kmalloc to have a minimum alignment of 8 bytes on s390.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("configure cpu failed (cmd=0x%08x, "
"response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
kfree(sccb);
return rc;
}
int sclp_cpu_configure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
}
int sclp_cpu_deconfigure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
int arch_get_memory_phys_device(unsigned long start_pfn)
{
if (!sclp_rzm)
return 0;
return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
}
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * sclp_rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("assign storage failed (cmd=0x%08x, "
"response=0x%04x, rn=0x%04x)\n", cmd,
sccb->header.response_code, rn);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
return rc;
start = rn2addr(rn);
storage_key_init_range(start, start + sclp_rzm);
return 0;
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[0];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (sccb->entries[i])
sclp_unassign_storage(sccb->entries[i] >> 16);
}
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + sclp_rzm - 1)
continue;
if (online)
rc |= sclp_assign_storage(incr->rn);
else
sclp_unassign_storage(incr->rn);
}
return rc ? -EIO : 0;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
sclp_attach_storage(id);
switch (action) {
case MEM_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
case MEM_OFFLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
if (!rc)
sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long) num * sclp_rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
size = VMEM_MAX_PHYS - start;
if (memory_end_set && (start >= memory_end))
goto skip_add;
if (memory_end_set && (start + size > memory_end))
size = memory_end - start;
add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > sclp_rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
static int sclp_mem_freeze(struct device *dev)
{
if (!sclp_mem_state_changed)
return 0;
pr_err("Memory hotplug state changed, suspend refused.\n");
return -EPERM;
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
static struct platform_driver sclp_mem_pdrv = {
.driver = {
.name = "sclp_mem",
.pm = &sclp_mem_pm_ops,
},
};
static int __init sclp_detect_standby_memory(void)
{
struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request(0x00040001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= sclp_rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
rc = platform_driver_register(&sclp_mem_pdrv);
if (rc)
goto out;
sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto out_driver;
sclp_add_standby_memory();
goto out;
out_driver:
platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* PCI I/O adapter configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
/*
* Channel path configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
struct chp_cfg_sccb {
struct sccb_header header;
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
case 0x0440:
case 0x0450:
break;
default:
pr_warning("configure channel-path failed "
"(cmd=0x%08x, response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
/**
* sclp_chp_configure - perform configure channel-path sclp command
* @chpid: channel-path ID
*
* Perform configure channel-path command sclp command for specified chpid.
* Return 0 after command successfully finished, non-zero otherwise.
*/
int sclp_chp_configure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
}
/**
* sclp_chp_deconfigure - perform deconfigure channel-path sclp command
* @chpid: channel-path ID
*
* Perform deconfigure channel-path command sclp command for specified chpid
* and wait for completion. On success return 0. Return non-zero otherwise.
*/
int sclp_chp_deconfigure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
struct chp_info_sccb {
struct sccb_header header;
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
*
* Perform read channel-path information sclp command and wait for completion.
* On success, store channel-path information in @info and return 0. Return
* non-zero otherwise.
*/
int sclp_chp_read_info(struct sclp_chp_info *info)
{
struct chp_info_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("read channel-path info failed "
"(response=0x%04x)\n", sccb->header.response_code);
rc = -EIO;
goto out;
}
memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
free_page((unsigned long) sccb);
return rc;
}
bool sclp_has_sprp(void)
{
return !!(sclp_fac84 & 0x2);
}

View file

@ -0,0 +1,356 @@
/*
* SCLP line mode console driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <linux/gfp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
#define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64
#define sclp_console_name "ttyS"
/* Lock to guard over changes to global variables */
static spinlock_t sclp_con_lock;
/* List of free pages that can be used for console output buffering */
static struct list_head sclp_con_pages;
/* List of full struct sclp_buffer structures ready for output */
static struct list_head sclp_con_outqueue;
/* Pointer to current console buffer */
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
/* Suspend mode flag */
static int sclp_con_suspended;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
/* Output format for console messages */
static unsigned short sclp_con_columns;
static unsigned short sclp_con_width_htab;
static void
sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_con_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
list_add_tail((struct list_head *) page, &sclp_con_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
if (!buffer || sclp_con_suspended) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
/*
* Finalize and emit first pending buffer.
*/
static void sclp_conbuf_emit(void)
{
struct sclp_buffer* buffer;
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_con_lock, flags);
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
if (sclp_con_queue_running || sclp_con_suspended)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
list);
sclp_con_queue_running = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
if (rc)
sclp_conbuf_callback(buffer, rc);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* Wait until out queue is empty
*/
static void sclp_console_sync_queue(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer without further waiting on a final new line.
*/
static void
sclp_console_timeout(unsigned long data)
{
sclp_conbuf_emit();
}
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_console_drop_buffer(void)
{
struct list_head *list;
struct sclp_buffer *buffer;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_con_outqueue.next;
if (sclp_con_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_con_outqueue)
return 0;
list_del(list);
buffer = list_entry(list, struct sclp_buffer, list);
page = sclp_unmake_buffer(buffer);
list_add_tail((struct list_head *) page, &sclp_con_pages);
return 1;
}
/*
* Writes the given message to S390 system console
*/
static void
sclp_console_write(struct console *console, const char *message,
unsigned int count)
{
unsigned long flags;
void *page;
int written;
if (count == 0)
return;
spin_lock_irqsave(&sclp_con_lock, flags);
/*
* process escape characters, write message into buffer,
* send buffer to SCLP
*/
do {
/* make sure we have a console output buffer */
if (sclp_conbuf == NULL) {
if (list_empty(&sclp_con_pages))
sclp_console_full++;
while (list_empty(&sclp_con_pages)) {
if (sclp_con_suspended)
goto out;
if (sclp_console_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
page = sclp_con_pages.next;
list_del((struct list_head *) page);
sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
sclp_con_width_htab);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_conbuf, (const unsigned char *)
message, count);
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
spin_lock_irqsave(&sclp_con_lock, flags);
message += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
init_timer(&sclp_con_timer);
sclp_con_timer.function = sclp_console_timeout;
sclp_con_timer.data = 0UL;
sclp_con_timer.expires = jiffies + HZ/10;
add_timer(&sclp_con_timer);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
static struct tty_driver *
sclp_console_device(struct console *c, int *index)
{
*index = c->index;
return sclp_tty_driver;
}
/*
* Make sure that all buffers will be flushed to the SCLP.
*/
static void
sclp_console_flush(void)
{
sclp_conbuf_emit();
sclp_console_sync_queue();
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_console_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_console_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_console_flush();
}
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
sclp_console_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
.priority = SCLP_PANIC_PRIO_CLIENT,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_console_notify,
.priority = 1,
};
/*
* used to register the SCLP console to the kernel and to
* give printk necessary information
*/
static struct console sclp_console =
{
.name = sclp_console_name,
.write = sclp_console_write,
.device = sclp_console_device,
.flags = CON_PRINTBUFFER,
.index = 0 /* ttyS0 */
};
/*
* This function is called for SCLP suspend and resume events.
*/
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_console_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_console_resume();
break;
}
}
/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
sclp_console_init(void)
{
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
rc = sclp_rw_init();
if (rc)
return rc;
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
sclp_conbuf = NULL;
init_timer(&sclp_con_timer);
/* Set output format */
if (MACHINE_IS_VM)
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_con_columns = 76;
else
sclp_con_columns = 80;
sclp_con_width_htab = 8;
/* enable printk-access to this driver */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_console);
return 0;
}
console_initcall(sclp_console_init);

View file

@ -0,0 +1,77 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct device *dev;
s390_adjust_jiffies();
pr_info("CPU capability may have changed\n");
get_online_cpus();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
static int __init sclp_conf_init(void)
{
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
return sclp_register(&sclp_conf_register);
}
__initcall(sclp_conf_init);

View file

@ -0,0 +1,40 @@
/*
* SCLP control programm identification
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include "sclp_cpi_sys.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Identify this operating system instance "
"to the System z hardware");
MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
"Michael Ernst <mernst@de.ibm.com>");
static char *system_name = "";
static char *sysplex_name = "";
module_param(system_name, charp, 0);
MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
module_param(sysplex_name, charp, 0);
MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
static int __init cpi_module_init(void)
{
return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
LINUX_VERSION_CODE);
}
static void __exit cpi_module_exit(void)
{
}
module_init(cpi_module_init);
module_exit(cpi_module_exit);

View file

@ -0,0 +1,429 @@
/*
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cpi"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/export.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_cpi_sys.h"
#define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf {
struct evbuf_header header;
u8 id_format;
u8 reserved0;
u8 system_type[CPI_LENGTH_NAME];
u64 reserved1;
u8 system_name[CPI_LENGTH_NAME];
u64 reserved2;
u64 system_level;
u64 reserved3;
u8 sysplex_name[CPI_LENGTH_NAME];
u8 reserved4[16];
} __attribute__((packed));
struct cpi_sccb {
struct sccb_header header;
struct cpi_evbuf cpi_evbuf;
} __attribute__((packed));
static struct sclp_register sclp_cpi_event = {
.send_mask = EVTYP_CTLPROGIDENT_MASK,
};
static char system_name[CPI_LENGTH_NAME + 1];
static char sysplex_name[CPI_LENGTH_NAME + 1];
static char system_type[CPI_LENGTH_NAME + 1];
static u64 system_level;
static void set_data(char *field, char *data)
{
memset(field, ' ', CPI_LENGTH_NAME);
memcpy(field, data, strlen(data));
sclp_ascebc_str(field, CPI_LENGTH_NAME);
}
static void cpi_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static struct sclp_req *cpi_prepare_req(void)
{
struct sclp_req *req;
struct cpi_sccb *sccb;
struct cpi_evbuf *evb;
req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
kfree(req);
return ERR_PTR(-ENOMEM);
}
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = 0x0b;
evb = &sccb->cpi_evbuf;
/* set system type */
set_data(evb->system_type, system_type);
/* set system name */
set_data(evb->system_name, system_name);
/* set system level */
evb->system_level = system_level;
/* set sysplex name */
set_data(evb->sysplex_name, sysplex_name);
/* prepare request data structure presented to SCLP driver */
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = cpi_callback;
return req;
}
static void cpi_free_req(struct sclp_req *req)
{
free_page((unsigned long) req->sccb);
kfree(req);
}
static int cpi_req(void)
{
struct completion completion;
struct sclp_req *req;
int rc;
int response;
rc = sclp_register(&sclp_cpi_event);
if (rc)
goto out;
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
rc = -EOPNOTSUPP;
goto out_unregister;
}
req = cpi_prepare_req();
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto out_unregister;
}
init_completion(&completion);
req->callback_data = &completion;
/* Add request to sclp queue */
rc = sclp_add_request(req);
if (rc)
goto out_free_req;
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
pr_warning("request failed (status=0x%02x)\n",
req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
pr_warning("request failed with response code 0x%x\n",
response);
rc = -EIO;
}
out_free_req:
cpi_free_req(req);
out_unregister:
sclp_unregister(&sclp_cpi_event);
out:
return rc;
}
static int check_string(const char *attr, const char *str)
{
size_t len;
size_t i;
len = strlen(str);
if ((len > 0) && (str[len - 1] == '\n'))
len--;
if (len > CPI_LENGTH_NAME)
return -EINVAL;
for (i = 0; i < len ; i++) {
if (isalpha(str[i]) || isdigit(str[i]) ||
strchr("$@# ", str[i]))
continue;
return -EINVAL;
}
return 0;
}
static void set_string(char *attr, const char *value)
{
size_t len;
size_t i;
len = strlen(value);
if ((len > 0) && (value[len - 1] == '\n'))
len--;
for (i = 0; i < CPI_LENGTH_NAME; i++) {
if (i < len)
attr[i] = toupper(value[i]);
else
attr[i] = ' ';
}
}
static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_name_attr =
__ATTR(system_name, 0644, system_name_show, system_name_store);
static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t sysplex_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("sysplex_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute sysplex_name_attr =
__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_type", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_type_attr =
__ATTR(system_type, 0644, system_type_show, system_type_store);
static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
}
static ssize_t system_level_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
unsigned long long level;
char *endp;
level = simple_strtoull(buf, &endp, 16);
if (endp == buf)
return -EINVAL;
if (*endp == '\n')
endp++;
if (*endp)
return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_level_attr =
__ATTR(system_level, 0644, system_level_show, system_level_store);
static ssize_t set_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc)
return rc;
return len;
}
static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
static struct attribute *cpi_attrs[] = {
&system_name_attr.attr,
&sysplex_name_attr.attr,
&system_type_attr.attr,
&system_level_attr.attr,
&set_attr.attr,
NULL,
};
static struct attribute_group cpi_attr_group = {
.attrs = cpi_attrs,
};
static struct kset *cpi_kset;
int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
const u64 level)
{
int rc;
rc = check_string("system_name", system);
if (rc)
return rc;
rc = check_string("sysplex_name", sysplex);
if (rc)
return rc;
rc = check_string("system_type", type);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system);
set_string(sysplex_name, sysplex);
set_string(system_type, type);
system_level = level;
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
EXPORT_SYMBOL(sclp_cpi_set_data);
static int __init cpi_init(void)
{
int rc;
cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
if (!cpi_kset)
return -ENOMEM;
rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
if (rc)
kset_unregister(cpi_kset);
return rc;
}
__initcall(cpi_init);

View file

@ -0,0 +1,14 @@
/*
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2007
* Author(s): Michael Ernst <mernst@de.ibm.com>
*/
#ifndef __SCLP_CPI_SYS_H__
#define __SCLP_CPI_SYS_H__
int sclp_cpi_set_data(const char *system, const char *sysplex,
const char *type, u64 level);
#endif /* __SCLP_CPI_SYS_H__ */

View file

@ -0,0 +1,144 @@
/*
* IOCTL interface for SCLP
*
* Copyright IBM Corp. 2012
*
* Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <asm/compat.h>
#include <asm/sclp_ctl.h>
#include <asm/sclp.h>
#include "sclp.h"
/*
* Supported command words
*/
static unsigned int sclp_ctl_sccb_wlist[] = {
0x00400002,
0x00410002,
};
/*
* Check if command word is supported
*/
static int sclp_ctl_cmdw_supported(unsigned int cmdw)
{
int i;
for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
if (cmdw == sclp_ctl_sccb_wlist[i])
return 1;
}
return 0;
}
static void __user *u64_to_uptr(u64 value)
{
if (is_compat_task())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
}
/*
* Start SCLP request
*/
static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
return -EFAULT;
if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
rc = -EFAULT;
goto out_free;
}
if (sccb->length > PAGE_SIZE || sccb->length < 8)
return -EINVAL;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
rc = -EFAULT;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
if (rc)
goto out_free;
if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
rc = -EFAULT;
out_free:
free_page((unsigned long) sccb);
return rc;
}
/*
* SCLP SCCB ioctl function
*/
static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
void __user *argp;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *) arg;
switch (cmd) {
case SCLP_CTL_SCCB:
return sclp_ctl_ioctl_sccb(argp);
default: /* unknown ioctl number */
return -ENOTTY;
}
}
/*
* File operations
*/
static const struct file_operations sclp_ctl_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.unlocked_ioctl = sclp_ctl_ioctl,
.compat_ioctl = sclp_ctl_ioctl,
.llseek = no_llseek,
};
/*
* Misc device definition
*/
static struct miscdevice sclp_ctl_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sclp",
.fops = &sclp_ctl_fops,
};
/*
* Register sclp_ctl misc device
*/
static int __init sclp_ctl_init(void)
{
return misc_register(&sclp_ctl_device);
}
module_init(sclp_ctl_init);
/*
* Deregister sclp_ctl misc device
*/
static void __exit sclp_ctl_exit(void)
{
misc_deregister(&sclp_ctl_device);
}
module_exit(sclp_ctl_exit);

View file

@ -0,0 +1,89 @@
/*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef _SCLP_DIAG_H
#define _SCLP_DIAG_H
#include <linux/types.h>
/* return codes for Diagnostic Test FTP Service, as indicated in member
* sclp_diag_ftp::ldflg
*/
#define SCLP_DIAG_FTP_OK 0x80U /* success */
#define SCLP_DIAG_FTP_LDFAIL 0x01U /* load failed */
#define SCLP_DIAG_FTP_LDNPERM 0x02U /* not allowed */
#define SCLP_DIAG_FTP_LDRUNS 0x03U /* LD runs */
#define SCLP_DIAG_FTP_LDNRUNS 0x04U /* LD does not run */
#define SCLP_DIAG_FTP_XPCX 0x80 /* PCX communication code */
#define SCLP_DIAG_FTP_ROUTE 4 /* routing code for new FTP service */
/*
* length of Diagnostic Test FTP Service event buffer
*/
#define SCLP_DIAG_FTP_EVBUF_LEN \
(offsetof(struct sclp_diag_evbuf, mdd) + \
sizeof(struct sclp_diag_ftp))
/**
* struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
* @pcx: code for PCX communication (should be 0x80)
* @ldflg: load flag (see defines above)
* @cmd: FTP command
* @pgsize: page size (0 = 4kB, 1 = large page size)
* @srcflg: source flag
* @spare: reserved (zeroes)
* @offset: file offset
* @fsize: file size
* @length: buffer size resp. bytes transferred
* @failaddr: failing address
* @bufaddr: buffer address, virtual
* @asce: region or segment table designation
* @fident: file name (ASCII, zero-terminated)
*/
struct sclp_diag_ftp {
u8 pcx;
u8 ldflg;
u8 cmd;
u8 pgsize;
u8 srcflg;
u8 spare;
u64 offset;
u64 fsize;
u64 length;
u64 failaddr;
u64 bufaddr;
u64 asce;
u8 fident[256];
} __packed;
/**
* struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
* @hdr: event buffer header
* @route: diagnostic route
* @mdd: model-dependent data (@route dependent)
*/
struct sclp_diag_evbuf {
struct evbuf_header hdr;
u16 route;
union {
struct sclp_diag_ftp ftp;
} mdd;
} __packed;
/**
* struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
* @hdr: SCCB header
* @evbuf: event buffer
*/
struct sclp_diag_sccb {
struct sccb_header hdr;
struct sclp_diag_evbuf evbuf;
} __packed;
#endif /* _SCLP_DIAG_H */

View file

@ -0,0 +1,315 @@
/*
* SCLP early driver
*
* Copyright IBM Corp. 2013
*/
#define KMSG_COMPONENT "sclp_early"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sclp_sdias.h"
#include "sclp.h"
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
u8 _reserved0[16 - 11]; /* 11-15 */
u16 ncpurl; /* 16-17 */
u16 cpuoff; /* 18-19 */
u8 _reserved7[24 - 20]; /* 20-23 */
u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */
u8 _reserved2a[76 - 56]; /* 56-75 */
u32 ibc; /* 76-79 */
u8 _reserved2b[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */
u8 fac85; /* 85 */
u8 _reserved3[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u8 _reserved5[120 - 112]; /* 112-119 */
u16 hcpua; /* 120-121 */
u8 _reserved6[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static unsigned int sclp_con_has_vt220 __initdata;
static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif;
static u32 sclp_ibc;
u64 sclp_facilities;
u8 sclp_fac84;
unsigned long long sclp_rzm;
unsigned long long sclp_rnmax;
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
int rc;
__ctl_set_bit(0, 9);
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
local_irq_disable();
out:
/* Contents of the sccb might have changed. */
barrier();
__ctl_clear_bit(0, 9);
return rc;
}
static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
int rc, i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
if (rc)
break;
if (sccb->header.response_code == 0x10)
return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_cpu_entry *cpue;
u16 boot_cpu_address, cpu;
if (sclp_read_info_early(sccb))
return;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
sclp_ibc = sccb->ibc;
if (!sccb->hcpua) {
if (MACHINE_IS_VM)
sclp_max_cpu = 64;
else
sclp_max_cpu = sccb->ncpurl;
} else {
sclp_max_cpu = sccb->hcpua + 1;
}
boot_cpu_address = stap();
cpue = (void *)sccb + sccb->cpuoff;
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
if (boot_cpu_address != cpue->address)
continue;
sclp_siif = cpue->siif;
break;
}
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->flags & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
}
bool __init sclp_has_linemode(void)
{
return !!sclp_con_has_linemode;
}
bool __init sclp_has_vt220(void)
{
return !!sclp_con_has_vt220;
}
unsigned long long sclp_get_rnmax(void)
{
return sclp_rnmax;
}
unsigned long long sclp_get_rzm(void)
{
return sclp_rzm;
}
unsigned int sclp_get_max_cpu(void)
{
return sclp_max_cpu;
}
int sclp_has_siif(void)
{
return sclp_siif;
}
EXPORT_SYMBOL(sclp_has_siif);
unsigned int sclp_get_ibc(void)
{
return sclp_ibc;
}
EXPORT_SYMBOL(sclp_get_ibc);
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
* and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
*info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
{
int rc;
do {
rc = sclp_cmd_sync_early(cmd, sccb);
} while (rc == -EBUSY);
if (rc)
return -EIO;
if (((struct sccb_header *) sccb)->response_code != 0x0020)
return -EIO;
return 0;
}
static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
{
memset(sccb, 0, sizeof(*sccb));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.dbs = 1;
}
static int __init sclp_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask)
{
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
sccb->receive_mask = receive_mask;
sccb->send_mask = send_mask;
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
{
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
return 0;
return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
unsigned long sclp_get_hsa_size(void)
{
return sclp_hsa_size;
}
static void __init sclp_hsa_size_detect(void *sccb)
{
long size;
/* First try synchronous interface (LPAR) */
if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
goto out;
/* Then try asynchronous interface (z/VM) */
if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
out:
sclp_hsa_size = size;
}
static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
static void __init sclp_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
sclp_con_has_vt220 = 1;
if (sclp_con_check_linemode(sccb))
sclp_con_has_linemode = 1;
}
void __init sclp_early_detect(void)
{
void *sccb = &sccb_early;
sclp_facilities_detect(sccb);
sclp_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
sclp_set_event_mask(sccb, 0, 0);
sclp_console_detect(sccb);
}

View file

@ -0,0 +1,275 @@
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include "sclp.h"
#include "sclp_diag.h"
#include "sclp_ftp.h"
static DECLARE_COMPLETION(sclp_ftp_rx_complete);
static u8 sclp_ftp_ldflg;
static u64 sclp_ftp_fsize;
static u64 sclp_ftp_length;
/**
* sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
*/
static void sclp_ftp_txcb(struct sclp_req *req, void *data)
{
struct completion *completion = data;
#ifdef DEBUG
pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
req->sccb, 24, req->sccb);
#endif
complete(completion);
}
/**
* sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
*/
static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
{
struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
/*
* Check for Diagnostic Test FTP Service
*/
if (evbuf->type != EVTYP_DIAG_TEST ||
diag->route != SCLP_DIAG_FTP_ROUTE ||
diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
return;
#ifdef DEBUG
pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
evbuf, 24, evbuf);
#endif
/*
* Because the event buffer is located in a page which is owned
* by the SCLP core, all data of interest must be copied. The
* error indication is in 'sclp_ftp_ldflg'
*/
sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
sclp_ftp_fsize = diag->mdd.ftp.fsize;
sclp_ftp_length = diag->mdd.ftp.length;
complete(&sclp_ftp_rx_complete);
}
/**
* sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
* @ftp: pointer to FTP descriptor
*
* Return: 0 on success, else a (negative) error code
*/
static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
{
struct completion completion;
struct sclp_diag_sccb *sccb;
struct sclp_req *req;
size_t len;
int rc;
req = kzalloc(sizeof(*req), GFP_KERNEL);
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!req || !sccb) {
rc = -ENOMEM;
goto out_free;
}
sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
sizeof(struct sccb_header);
sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
sccb->evbuf.mdd.ftp.srcflg = 0;
sccb->evbuf.mdd.ftp.pgsize = 0;
sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
sccb->evbuf.mdd.ftp.fsize = 0;
sccb->evbuf.mdd.ftp.cmd = ftp->id;
sccb->evbuf.mdd.ftp.offset = ftp->ofs;
sccb->evbuf.mdd.ftp.length = ftp->len;
sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
if (len >= HMCDRV_FTP_FIDENT_MAX) {
rc = -EINVAL;
goto out_free;
}
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = sclp_ftp_txcb;
req->callback_data = &completion;
init_completion(&completion);
rc = sclp_add_request(req);
if (rc)
goto out_free;
/* Wait for end of ftp sclp command. */
wait_for_completion(&completion);
#ifdef DEBUG
pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
sccb->hdr.response_code, sccb->evbuf.hdr.flags);
#endif
/*
* Check if sclp accepted the request. The data transfer runs
* asynchronously and the completion is indicated with an
* sclp ET7 event.
*/
if (req->status != SCLP_REQ_DONE ||
(sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
(sccb->hdr.response_code & 0xffU) != 0x20U) {
rc = -EIO;
}
out_free:
free_page((unsigned long) sccb);
kfree(req);
return rc;
}
/**
* sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&sclp_ftp_rx_complete);
/* Start ftp sclp command. */
len = sclp_ftp_et7(ftp);
if (len)
goto out_unlock;
/*
* There is no way to cancel the sclp ET7 request, the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&sclp_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
#endif
switch (sclp_ftp_ldflg) {
case SCLP_DIAG_FTP_OK:
len = sclp_ftp_length;
if (fsize)
*fsize = sclp_ftp_fsize;
break;
case SCLP_DIAG_FTP_LDNPERM:
len = -EPERM;
break;
case SCLP_DIAG_FTP_LDRUNS:
len = -EBUSY;
break;
case SCLP_DIAG_FTP_LDFAIL:
len = -ENOENT;
break;
default:
len = -EIO;
break;
}
out_unlock:
return len;
}
/*
* ET7 event listener
*/
static struct sclp_register sclp_ftp_event = {
.send_mask = EVTYP_DIAG_TEST_MASK, /* want tx events */
.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
.receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
.state_change_fn = NULL,
.pm_event_fn = NULL,
};
/**
* sclp_ftp_startup() - startup of FTP services, when running on LPAR
*/
int sclp_ftp_startup(void)
{
#ifdef DEBUG
unsigned long info;
#endif
int rc;
rc = sclp_register(&sclp_ftp_event);
if (rc)
return rc;
#ifdef DEBUG
info = get_zeroed_page(GFP_KERNEL);
if (info != 0) {
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
info222->name[sizeof(info222->name) - 1] = '\0';
EBCASC_500(info222->name, sizeof(info222->name) - 1);
pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
info222->lpar_number, info222->name);
}
free_page(info);
}
#endif /* DEBUG */
return 0;
}
/**
* sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
*/
void sclp_ftp_shutdown(void)
{
sclp_unregister(&sclp_ftp_event);
}

View file

@ -0,0 +1,21 @@
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
* Notice that all functions exported here are not reentrant.
* So usage should be exclusive, ensured by the caller (e.g. using a
* mutex).
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __SCLP_FTP_H__
#define __SCLP_FTP_H__
#include "hmcdrv_ftp.h"
int sclp_ftp_startup(void);
void sclp_ftp_shutdown(void);
ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
#endif /* __SCLP_FTP_H__ */

View file

@ -0,0 +1,144 @@
/*
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_ocf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#define OCF_LENGTH_HMC_NETWORK 8UL
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
static struct kset *ocf_kset;
static void sclp_ocf_change_notify(struct work_struct *work)
{
kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
}
/* Handler for OCF event. Look for the CPC image name. */
static void sclp_ocf_handler(struct evbuf_header *evbuf)
{
struct gds_vector *v;
struct gds_subvector *sv, *netid, *cpc;
size_t size;
/* Find the 0x9f00 block. */
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
0x9f00);
if (!v)
return;
/* Find the 0x9f22 block inside the 0x9f00 block. */
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
if (!v)
return;
/* Find the 0x81 block inside the 0x9f22 block. */
sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
if (!sv)
return;
/* Find the 0x01 block inside the 0x81 block. */
netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
/* Find the 0x02 block inside the 0x81 block. */
cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
/* Copy network name and cpc name. */
spin_lock(&sclp_ocf_lock);
if (netid) {
size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
memcpy(hmc_network, netid + 1, size);
EBCASC(hmc_network, size);
hmc_network[size] = 0;
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
memcpy(cpc_name, cpc + 1, size);
EBCASC(cpc_name, size);
cpc_name[size] = 0;
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
}
static struct sclp_register sclp_ocf_event = {
.receive_mask = EVTYP_OCF_MASK,
.receiver_fn = sclp_ocf_handler,
};
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute cpc_name_attr =
__ATTR(cpc_name, 0444, cpc_name_show, NULL);
static ssize_t hmc_network_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute hmc_network_attr =
__ATTR(hmc_network, 0444, hmc_network_show, NULL);
static struct attribute *ocf_attrs[] = {
&cpc_name_attr.attr,
&hmc_network_attr.attr,
NULL,
};
static struct attribute_group ocf_attr_group = {
.attrs = ocf_attrs,
};
static int __init ocf_init(void)
{
int rc;
INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
if (!ocf_kset)
return -ENOMEM;
rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
if (rc) {
kset_unregister(ocf_kset);
return rc;
}
return sclp_register(&sclp_ocf_event);
}
device_initcall(ocf_init);

View file

@ -0,0 +1,84 @@
/*
* signal quiesce handler
*
* Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include "sclp.h"
static void (*old_machine_restart)(char *);
static void (*old_machine_halt)(void);
static void (*old_machine_power_off)(void);
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask =
PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
if (_machine_restart != (void *) do_machine_quiesce) {
old_machine_restart = _machine_restart;
old_machine_halt = _machine_halt;
old_machine_power_off = _machine_power_off;
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
}
ctrl_alt_del();
}
/* Undo machine restart/halt/power_off modification on resume */
static void sclp_quiesce_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_RESTORE:
if (old_machine_restart) {
_machine_restart = old_machine_restart;
_machine_halt = old_machine_halt;
_machine_power_off = old_machine_power_off;
old_machine_restart = NULL;
old_machine_halt = NULL;
old_machine_power_off = NULL;
}
break;
case SCLP_PM_EVENT_FREEZE:
case SCLP_PM_EVENT_THAW:
break;
}
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
.pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */
static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
module_init(sclp_quiesce_init);

469
drivers/s390/char/sclp_rw.c Normal file
View file

@ -0,0 +1,469 @@
/*
* driver: reading from and writing to system console on S/390 via SCLP
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
static void sclp_rw_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
sclp_console_pm_event(sclp_pm_event);
}
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
/*
* Setup a sclp write buffer. Gets a page as input (4K) and returns
* a pointer to a struct sclp_buffer structure that is located at the
* end of the input page. This reduces the buffer space by a few
* bytes but simplifies things.
*/
struct sclp_buffer *
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
{
struct sclp_buffer *buffer;
struct write_sccb *sccb;
sccb = (struct write_sccb *) page;
/*
* We keep the struct sclp_buffer structure at the end
* of the sccb page.
*/
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
buffer->sccb = sccb;
buffer->retry_count = 0;
buffer->mto_number = 0;
buffer->mto_char_sum = 0;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->columns = columns;
buffer->htab = htab;
/* initialize sccb */
memset(sccb, 0, sizeof(struct write_sccb));
sccb->header.length = sizeof(struct write_sccb);
sccb->msg_buf.header.length = sizeof(struct msg_buf);
sccb->msg_buf.header.type = EVTYP_MSG;
sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
sccb->msg_buf.mdb.header.type = 1;
sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
sccb->msg_buf.mdb.header.revision_code = 1;
sccb->msg_buf.mdb.go.length = sizeof(struct go);
sccb->msg_buf.mdb.go.type = 1;
return buffer;
}
/*
* Return a pointer to the original page that has been used to create
* the buffer.
*/
void *
sclp_unmake_buffer(struct sclp_buffer *buffer)
{
return buffer->sccb;
}
/*
* Initialize a new Message Text Object (MTO) at the end of the provided buffer
* with enough room for max_len characters. Return 0 on success.
*/
static int
sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
{
struct write_sccb *sccb;
struct mto *mto;
int mto_size;
/* max size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + max_len;
/* check if current buffer sccb can contain the mto */
sccb = buffer->sccb;
if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
return -ENOMEM;
/* find address of new message text object */
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/*
* fill the new Message-Text Object,
* starting behind the former last byte of the SCCB
*/
memset(mto, 0, sizeof(struct mto));
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_line = (char *) (mto + 1);
buffer->current_length = 0;
return 0;
}
/*
* Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
* MTO, enclosing MDB, event buffer and SCCB.
*/
static void
sclp_finalize_mto(struct sclp_buffer *buffer)
{
struct write_sccb *sccb;
struct mto *mto;
int str_len, mto_size;
str_len = buffer->current_length;
buffer->current_line = NULL;
buffer->current_length = 0;
/* real size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + str_len;
/* find address of new message text object */
sccb = buffer->sccb;
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/* set size of message text object */
mto->length = mto_size;
/*
* update values of sizes
* (SCCB, Event(Message) Buffer, Message Data Block)
*/
sccb->header.length += mto_size;
sccb->msg_buf.header.length += mto_size;
sccb->msg_buf.mdb.header.length += mto_size;
/*
* count number of buffered messages (= number of Message Text
* Objects) and number of buffered characters
* for the SCCB currently used for buffering and at all
*/
buffer->mto_number++;
buffer->mto_char_sum += str_len;
}
/*
* processing of a message including escape characters,
* returns number of characters written to the output sccb
* ("processed" means that is not guaranteed that the character have already
* been sent to the SCLP but that it will be done at least next time the SCLP
* is not busy)
*/
int
sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
{
int spaces, i_msg;
int rc;
/*
* parse msg for escape sequences (\t,\v ...) and put formated
* msg into an mto (created by sclp_initialize_mto).
*
* We have to do this work ourselfs because there is no support for
* these characters on the native machine and only partial support
* under VM (Why does VM interpret \n but the native machine doesn't ?)
*
* Depending on i/o-control setting the message is always written
* immediately or we wait for a final new line maybe coming with the
* next message. Besides we avoid a buffer overrun by writing its
* content.
*
* RESTRICTIONS:
*
* \r and \b work within one line because we are not able to modify
* previous output that have already been accepted by the SCLP.
*
* \t combined with following \r is not correctly represented because
* \t is expanded to some spaces but \r does not know about a
* previous \t and decreases the current position by one column.
* This is in order to a slim and quick implementation.
*/
for (i_msg = 0; i_msg < count; i_msg++) {
switch (msg[i_msg]) {
case '\n': /* new line, line feed (ASCII) */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer, 0);
if (rc)
return i_msg;
}
sclp_finalize_mto(buffer);
break;
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
/* "go to (next htab-boundary + 1, same line)" */
do {
if (buffer->current_length >= buffer->columns)
break;
/* ok, add a blank */
*buffer->current_line++ = 0x40;
buffer->current_length++;
} while (buffer->current_length % buffer->htab);
break;
case '\f': /* form feed */
case '\v': /* vertical tabulator */
/* "go to (actual column, actual line + 1)" */
/* = new line, leading spaces */
if (buffer->current_line != NULL) {
spaces = buffer->current_length;
sclp_finalize_mto(buffer);
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
memset(buffer->current_line, 0x40, spaces);
buffer->current_line += spaces;
buffer->current_length = spaces;
} else {
/* one an empty line this is the same as \n */
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
sclp_finalize_mto(buffer);
}
break;
case '\b': /* backspace */
/* "go to (actual column - 1, actual line)" */
/* decrement counter indicating position, */
/* do not remove last character */
if (buffer->current_line != NULL &&
buffer->current_length > 0) {
buffer->current_length--;
buffer->current_line--;
}
break;
case 0x00: /* end of string */
/* transfer current line to SCCB */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* skip the rest of the message including the 0 byte */
i_msg = count - 1;
break;
default: /* no escape character */
/* do not output unprintable characters */
if (!isprint(msg[i_msg]))
break;
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
buffer->current_length++;
break;
}
/* check if current mto is full */
if (buffer->current_line != NULL &&
buffer->current_length >= buffer->columns)
sclp_finalize_mto(buffer);
}
/* return number of processed characters */
return i_msg;
}
/*
* Return the number of free bytes in the sccb
*/
int
sclp_buffer_space(struct sclp_buffer *buffer)
{
int count;
count = MAX_SCCB_ROOM - buffer->sccb->header.length;
if (buffer->current_line != NULL)
count -= sizeof(struct mto) + buffer->current_length;
return count;
}
/*
* Return number of characters in buffer
*/
int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
int count;
count = buffer->mto_char_sum;
if (buffer->current_line != NULL)
count += buffer->current_length;
return count;
}
/*
* sets or provides some values that influence the drivers behaviour
*/
void
sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
{
buffer->columns = columns;
if (buffer->current_line != NULL &&
buffer->current_length > buffer->columns)
sclp_finalize_mto(buffer);
}
void
sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
{
buffer->htab = htab;
}
/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
sclp_rw_init(void)
{
static int init_done = 0;
int rc;
if (init_done)
return 0;
rc = sclp_register(&sclp_rw_event);
if (rc == 0)
init_done = 1;
return rc;
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* second half of Write Event Data-function that has to be done after
* interruption indicating completion of Service Call.
*/
static void
sclp_writedata_callback(struct sclp_req *request, void *data)
{
int rc;
struct sclp_buffer *buffer;
struct write_sccb *sccb;
buffer = (struct sclp_buffer *) data;
sccb = buffer->sccb;
if (request->status == SCLP_REQ_FAILED) {
if (buffer->callback != NULL)
buffer->callback(buffer, -EIO);
return;
}
/* check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
/* Normal completion, buffer processed, message(s) sent */
rc = 0;
break;
case 0x0340: /* Contained SCLP equipment check */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* not all buffers were processed */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
} else
rc = 0;
break;
case 0x0040: /* SCLP equipment check */
case 0x05f0: /* Target resource in improper state */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* retry request */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
break;
default:
if (sccb->header.response_code == 0x71f0)
rc = -ENOMEM;
else
rc = -EINVAL;
break;
}
if (buffer->callback != NULL)
buffer->callback(buffer, rc);
}
/*
* Setup the request structure in the struct sclp_buffer to do SCLP Write
* Event Data and pass the request to the core SCLP loop. Return zero on
* success, non-zero otherwise.
*/
int
sclp_emit_buffer(struct sclp_buffer *buffer,
void (*callback)(struct sclp_buffer *, int))
{
struct write_sccb *sccb;
/* add current line if there is one */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* Are there messages in the output buffer ? */
if (buffer->mto_number == 0)
return -EIO;
sccb = buffer->sccb;
/* Use normal write message */
sccb->msg_buf.header.type = EVTYP_MSG;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
buffer->request.callback_data = buffer;
buffer->request.sccb = sccb;
buffer->callback = callback;
return sclp_add_request(&buffer->request);
}

101
drivers/s390/char/sclp_rw.h Normal file
View file

@ -0,0 +1,101 @@
/*
* interface to the SCLP-read/write driver
*
* Copyright IBM Corporation 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_RW_H__
#define __SCLP_RW_H__
#include <linux/list.h>
struct mto {
u16 length;
u16 type;
u16 line_type_flags;
u8 alarm_control;
u8 _reserved[3];
} __attribute__((packed));
struct go {
u16 length;
u16 type;
u32 domid;
u8 hhmmss_time[8];
u8 th_time[3];
u8 reserved_0;
u8 dddyyyy_date[7];
u8 _reserved_1;
u16 general_msg_flags;
u8 _reserved_2[10];
u8 originating_system_name[8];
u8 job_guest_name[8];
} __attribute__((packed));
struct mdb_header {
u16 length;
u16 type;
u32 tag;
u32 revision_code;
} __attribute__((packed));
struct mdb {
struct mdb_header header;
struct go go;
} __attribute__((packed));
struct msg_buf {
struct evbuf_header header;
struct mdb mdb;
} __attribute__((packed));
struct write_sccb {
struct sccb_header header;
struct msg_buf msg_buf;
} __attribute__((packed));
/* The number of empty mto buffers that can be contained in a single sccb. */
#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
sizeof(struct write_sccb)) / sizeof(struct mto))
/*
* data structure for information about list of SCCBs (only for writing),
* will be located at the end of a SCCBs page
*/
struct sclp_buffer {
struct list_head list; /* list_head for sccb_info chain */
struct sclp_req request;
struct write_sccb *sccb;
char *current_line;
int current_length;
int retry_count;
/* output format settings */
unsigned short columns;
unsigned short htab;
/* statistics about this buffer */
unsigned int mto_char_sum; /* # chars in sccb */
unsigned int mto_number; /* # mtos in sccb */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_buffer *, int);
};
int sclp_rw_init(void);
struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
#else
static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
#endif
#endif /* __SCLP_RW_H__ */

View file

@ -0,0 +1,281 @@
/*
* SCLP "store data in absolute storage"
*
* Copyright IBM Corp. 2003, 2013
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "sclp_sdias"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include "sclp_sdias.h"
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
static struct sdias_sccb sccb __attribute__((aligned(4096)));
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
static DECLARE_COMPLETION(evbuf_done);
static DEFINE_MUTEX(sdias_mutex);
/*
* Called by SCLP base when read event data has been completed (async mode only)
*/
static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
{
memcpy(&sdias_evbuf, evbuf,
min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
complete(&evbuf_done);
TRACE("sclp_sdias_receiver_fn done\n");
}
/*
* Called by SCLP base when sdias event has been accepted
*/
static void sdias_callback(struct sclp_req *request, void *data)
{
complete(&evbuf_accepted);
TRACE("callback done\n");
}
static int sdias_sclp_send(struct sclp_req *req)
{
int retries;
int rc;
for (retries = SDIAS_RETRIES; retries; retries--) {
TRACE("add request\n");
rc = sclp_add_request(req);
if (rc) {
/* not initiated, wait some time and retry */
set_current_state(TASK_INTERRUPTIBLE);
TRACE("add request failed: rc = %i\n",rc);
schedule_timeout(SDIAS_SLEEP_TICKS);
continue;
}
/* initiated, wait for completion of service call */
wait_for_completion(&evbuf_accepted);
if (req->status == SCLP_REQ_FAILED) {
TRACE("sclp request failed\n");
continue;
}
/* if not accepted, retry */
if (!(sccb.evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
sccb.evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
/* otherwise we wait for completion */
wait_for_completion(&evbuf_done);
TRACE("request done\n");
return 0;
}
return -EIO;
}
/*
* Get number of blocks (4K) available in the HSA
*/
int sclp_sdias_blk_count(void)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("send failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case 0:
rc = sdias_evbuf.blk_cnt;
break;
default:
pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
rc = -EIO;
goto out;
}
TRACE("%i blocks\n", rc);
out:
mutex_unlock(&sdias_mutex);
return rc;
}
/*
* Copy from HSA to absolute storage (not reentrant):
*
* @dest : Address of buffer where data should be copied
* @start_blk: Start Block (beginning with 1)
* @nr_blks : Number of 4K blocks to copy
*
* Return Value: 0 : Requested 'number' of blocks of data copied
* <0: ERROR - negative event status
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.hdr.flags = 0;
sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
#else
sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32;
#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
sccb.evbuf.asa = (unsigned long)dest;
sccb.evbuf.fbn = start_blk;
sccb.evbuf.lbn = 0;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("copy failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case SDIAS_EVSTATE_ALL_STORED:
TRACE("all stored\n");
break;
case SDIAS_EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case SDIAS_EVSTATE_NO_DATA:
TRACE("no data\n");
/* fall through */
default:
pr_err("Error from SCLP while copying hsa. Event status = %x\n",
sdias_evbuf.event_status);
rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
return rc;
}
static int __init sclp_sdias_register_check(void)
{
int rc;
rc = sclp_register(&sclp_sdias_register);
if (rc)
return rc;
if (sclp_sdias_blk_count() == 0) {
sclp_unregister(&sclp_sdias_register);
return -ENODEV;
}
return 0;
}
static int __init sclp_sdias_init_sync(void)
{
TRACE("Try synchronous mode\n");
sclp_sdias_register.receive_mask = 0;
sclp_sdias_register.receiver_fn = NULL;
return sclp_sdias_register_check();
}
static int __init sclp_sdias_init_async(void)
{
TRACE("Try asynchronous mode\n");
sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
return sclp_sdias_register_check();
}
int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
if (sclp_sdias_init_sync() == 0)
goto out;
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
return -ENODEV;
out:
TRACE("init done\n");
return 0;
}
void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
}

View file

@ -0,0 +1,46 @@
/*
* SCLP "store data in absolute storage"
*
* Copyright IBM Corp. 2003, 2013
*/
#ifndef SCLP_SDIAS_H
#define SCLP_SDIAS_H
#include "sclp.h"
#define SDIAS_EQ_STORE_DATA 0x0
#define SDIAS_EQ_SIZE 0x1
#define SDIAS_DI_FCP_DUMP 0x0
#define SDIAS_ASA_SIZE_32 0x0
#define SDIAS_ASA_SIZE_64 0x1
#define SDIAS_EVSTATE_ALL_STORED 0x0
#define SDIAS_EVSTATE_NO_DATA 0x3
#define SDIAS_EVSTATE_PART_STORED 0x10
struct sdias_evbuf {
struct evbuf_header hdr;
u8 event_qual;
u8 data_id;
u64 reserved2;
u32 event_id;
u16 reserved3;
u8 asa_size;
u8 event_status;
u32 reserved4;
u32 blk_cnt;
u64 asa;
u32 reserved5;
u32 fbn;
u32 reserved6;
u32 lbn;
u16 reserved7;
u16 dbs;
} __packed;
struct sdias_sccb {
struct sccb_header hdr;
struct sdias_evbuf evbuf;
} __packed;
#endif /* SCLP_SDIAS_H */

View file

@ -0,0 +1,576 @@
/*
* SCLP line mode terminal driver.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <asm/uaccess.h>
#include "ctrlchar.h"
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
/*
* size of a buffer that collects single characters coming in
* via sclp_tty_put_char()
*/
#define SCLP_TTY_BUF_SIZE 512
/*
* There is exactly one SCLP terminal, so we can keep things simple
* and allocate all variables statically.
*/
/* Lock to guard over changes to global variables. */
static spinlock_t sclp_tty_lock;
/* List of free pages that can be used for console output buffering. */
static struct list_head sclp_tty_pages;
/* List of full struct sclp_buffer structures ready for output. */
static struct list_head sclp_tty_outqueue;
/* Counter how many buffers are emitted. */
static int sclp_tty_buffer_count;
/* Pointer to current console buffer. */
static struct sclp_buffer *sclp_ttybuf;
/* Timer for delayed output of console messages. */
static struct timer_list sclp_tty_timer;
static struct tty_port sclp_port;
static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
static int sclp_tty_tolower;
static int sclp_tty_columns = 80;
#define SPACES_PER_TAB 8
#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
/* This routine is called whenever we try to open a SCLP terminal. */
static int
sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
sclp_port.low_latency = 0;
return 0;
}
/* This routine is called when the SCLP terminal is closed. */
static void
sclp_tty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count > 1)
return;
tty_port_tty_set(&sclp_port, NULL);
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted. This is not an exact number because not every
* character needs the same space in the sccb. The worst case is
* a string of newlines. Every newlines creates a new mto which
* needs 8 bytes.
*/
static int
sclp_tty_write_room (struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
list_for_each(l, &sclp_tty_pages)
count += NR_EMPTY_MTO_PER_SCCB;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
static void
sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_tty_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
sclp_tty_buffer_count--;
list_add_tail((struct list_head *) page, &sclp_tty_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_tty_outqueue))
buffer = list_entry(sclp_tty_outqueue.next,
struct sclp_buffer, list);
spin_unlock_irqrestore(&sclp_tty_lock, flags);
} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
tty_port_tty_wakeup(&sclp_port);
}
static inline void
__sclp_ttybuf_emit(struct sclp_buffer *buffer)
{
unsigned long flags;
int count;
int rc;
spin_lock_irqsave(&sclp_tty_lock, flags);
list_add_tail(&buffer->list, &sclp_tty_outqueue);
count = sclp_tty_buffer_count++;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (count)
return;
rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
if (rc)
sclp_ttybuf_callback(buffer, rc);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer.
*/
static void
sclp_tty_timeout(unsigned long data)
{
unsigned long flags;
struct sclp_buffer *buf;
spin_lock_irqsave(&sclp_tty_lock, flags);
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (buf != NULL) {
__sclp_ttybuf_emit(buf);
}
}
/*
* Write a string to the sclp tty.
*/
static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
struct sclp_buffer *buf;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
do {
/* Create a sclp output buffer if none exists yet */
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (may_fail)
goto out;
else
sclp_sync_wait();
spin_lock_irqsave(&sclp_tty_lock, flags);
}
page = sclp_tty_pages.next;
list_del((struct list_head *) page);
sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_ttybuf, str, count);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
__sclp_ttybuf_emit(buf);
spin_lock_irqsave(&sclp_tty_lock, flags);
str += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
init_timer(&sclp_tty_timer);
sclp_tty_timer.function = sclp_tty_timeout;
sclp_tty_timer.data = 0UL;
sclp_tty_timer.expires = jiffies + HZ/10;
add_timer(&sclp_tty_timer);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
return overall_written;
}
/*
* This routine is called by the kernel to write a series of characters to the
* tty device. The characters may come from user space or kernel space. This
* routine will return the number of characters actually accepted for writing.
*/
static int
sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return sclp_tty_write_string(buf, count, 1);
}
/*
* This routine is called by the kernel to write a single character to the tty
* device. If the kernel uses this routine, it must call the flush_chars()
* routine (if defined) when it is done stuffing characters into the driver.
*
* Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
* If the given character is a '\n' the contents of the SCLP write buffer
* - including previous characters from sclp_tty_put_char() and strings from
* sclp_write() without final '\n' - will be written.
*/
static int
sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return 1;
}
/*
* This routine is called by the kernel after it has written a series of
* characters to the tty device using put_char().
*/
static void
sclp_tty_flush_chars(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* This routine returns the number of characters in the write buffer of the
* SCLP driver. The provided number includes all characters that are stored
* in the SCCB (will be written next time the SCLP is not busy) as well as
* characters in the write buffer (will not be written as long as there is a
* final line feed missing).
*/
static int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_buffer *t;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
list_for_each(l, &sclp_tty_outqueue) {
t = list_entry(l, struct sclp_buffer, list);
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
/*
* removes all content from buffers of low level driver
*/
static void
sclp_tty_flush_buffer(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* push input to tty
*/
static void
sclp_tty_input(unsigned char* buf, unsigned int count)
{
struct tty_struct *tty = tty_port_tty_get(&sclp_port);
unsigned int cchar;
/*
* If this tty driver is currently closed
* then throw the received input away.
*/
if (tty == NULL)
return;
cchar = ctrlchar_handle(buf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
tty_flip_buffer_push(&sclp_port);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
if (count < 2 ||
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
tty_insert_flip_string(&sclp_port, buf, count);
tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
} else
tty_insert_flip_string(&sclp_port, buf, count - 2);
tty_flip_buffer_push(&sclp_port);
break;
}
tty_kref_put(tty);
}
/*
* get a EBCDIC string in upper/lower case,
* find out characters in lower/upper case separated by a special character,
* modifiy original string,
* returns length of resulting string
*/
static int sclp_switch_cases(unsigned char *buf, int count)
{
unsigned char *ip, *op;
int toggle;
/* initially changing case is off */
toggle = 0;
ip = op = buf;
while (count-- > 0) {
/* compare with special character */
if (*ip == CASE_DELIMITER) {
/* followed by another special character? */
if (count && ip[1] == CASE_DELIMITER) {
/*
* ... then put a single copy of the special
* character to the output string
*/
*op++ = *ip++;
count--;
} else
/*
* ... special character follower by a normal
* character toggles the case change behaviour
*/
toggle = ~toggle;
/* skip special character */
ip++;
} else
/* not the special character */
if (toggle)
/* but case switching is on */
if (sclp_tty_tolower)
/* switch to uppercase */
*op++ = _ebc_toupper[(int) *ip++];
else
/* switch to lowercase */
*op++ = _ebc_tolower[(int) *ip++];
else
/* no case switching, copy the character */
*op++ = *ip++;
}
/* return length of reformatted string. */
return op - buf;
}
static void sclp_get_input(struct gds_subvector *sv)
{
unsigned char *str;
int count;
str = (unsigned char *) (sv + 1);
count = sv->length - sizeof(*sv);
if (sclp_tty_tolower)
EBC_TOLOWER(str, count);
count = sclp_switch_cases(str, count);
/* convert EBCDIC to ASCII (modify original input in SCCB) */
sclp_ebcasc_str(str, count);
/* transfer input to high level driver */
sclp_tty_input(str, count);
}
static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
{
void *end;
end = (void *) sv + sv->length;
for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == 0x30)
sclp_get_input(sv);
}
static inline void sclp_eval_textcmd(struct gds_vector *v)
{
struct gds_subvector *sv;
void *end;
end = (void *) v + v->length;
for (sv = (struct gds_subvector *) (v + 1);
(void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
sclp_eval_selfdeftextmsg(sv);
}
static inline void sclp_eval_cpmsu(struct gds_vector *v)
{
void *end;
end = (void *) v + v->length;
for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
if (v->gds_id == GDS_ID_TEXTCMD)
sclp_eval_textcmd(v);
}
static inline void sclp_eval_mdsmu(struct gds_vector *v)
{
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
if (v)
sclp_eval_cpmsu(v);
}
static void sclp_tty_receiver(struct evbuf_header *evbuf)
{
struct gds_vector *v;
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
GDS_ID_MDSMU);
if (v)
sclp_eval_mdsmu(v);
}
static void
sclp_tty_state_change(struct sclp_register *reg)
{
}
static struct sclp_register sclp_input_event =
{
.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
.state_change_fn = sclp_tty_state_change,
.receiver_fn = sclp_tty_receiver
};
static const struct tty_operations sclp_ops = {
.open = sclp_tty_open,
.close = sclp_tty_close,
.write = sclp_tty_write,
.put_char = sclp_tty_put_char,
.flush_chars = sclp_tty_flush_chars,
.write_room = sclp_tty_write_room,
.chars_in_buffer = sclp_tty_chars_in_buffer,
.flush_buffer = sclp_tty_flush_buffer,
};
static int __init
sclp_tty_init(void)
{
struct tty_driver *driver;
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = sclp_rw_init();
if (rc) {
put_tty_driver(driver);
return rc;
}
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_tty_pages);
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
put_tty_driver(driver);
return -ENOMEM;
}
list_add_tail((struct list_head *) page, &sclp_tty_pages);
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
init_timer(&sclp_tty_timer);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_tty_columns = 76;
/* case input lines to lowercase */
sclp_tty_tolower = 1;
}
sclp_tty_chars_count = 0;
rc = sclp_register(&sclp_input_event);
if (rc) {
put_tty_driver(driver);
return rc;
}
tty_port_init(&sclp_port);
driver->driver_name = "sclp_line";
driver->name = "sclp_line";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
driver->init_termios.c_oflag = ONLCR;
driver->init_termios.c_lflag = ISIG | ECHO;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_ops);
tty_port_link_device(&sclp_port, driver, 0);
rc = tty_register_driver(driver);
if (rc) {
put_tty_driver(driver);
tty_port_destroy(&sclp_port);
return rc;
}
sclp_tty_driver = driver;
return 0;
}
module_init(sclp_tty_init);

View file

@ -0,0 +1,17 @@
/*
* interface to the SCLP-read/write driver
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_TTY_H__
#define __SCLP_TTY_H__
#include <linux/tty_driver.h>
extern struct tty_driver *sclp_tty_driver;
#endif /* __SCLP_TTY_H__ */

View file

@ -0,0 +1,849 @@
/*
* SCLP VT220 terminal driver.
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "sclp.h"
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_NAME "ttyS"
#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
/* Representation of a single write request */
struct sclp_vt220_request {
struct list_head list;
struct sclp_req sclp_req;
int retry_count;
};
/* VT220 SCCB */
struct sclp_vt220_sccb {
struct sccb_header header;
struct evbuf_header evbuf;
};
#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
sizeof(struct sclp_vt220_request) - \
sizeof(struct sclp_vt220_sccb))
/* Structures and data needed to register tty driver */
static struct tty_driver *sclp_vt220_driver;
static struct tty_port sclp_vt220_port;
/* Lock to protect internal data from concurrent access */
static spinlock_t sclp_vt220_lock;
/* List of empty pages to be used as write request buffers */
static struct list_head sclp_vt220_empty;
/* List of pending requests */
static struct list_head sclp_vt220_outqueue;
/* Suspend mode flag */
static int sclp_vt220_suspended;
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
static struct timer_list sclp_vt220_timer;
/* Pointer to current request buffer which has been partially filled but not
* yet sent */
static struct sclp_vt220_request *sclp_vt220_current_request;
/* Number of characters in current request buffer */
static int sclp_vt220_buffered_chars;
/* Counter controlling core driver initialization. */
static int __initdata sclp_vt220_init_count;
/* Flag indicating that sclp_vt220_current_request should really
* have been already queued but wasn't because the SCLP was processing
* another buffer */
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
.pm_event_fn = sclp_vt220_pm_event_fn,
};
/* Registration structure for SCLP input event buffers */
static struct sclp_register sclp_vt220_register_input = {
.receive_mask = EVTYP_VT220MSG_MASK,
.receiver_fn = sclp_vt220_receiver_fn,
};
/*
* Put provided request buffer back into queue and check emit pending
* buffers if necessary.
*/
static void
sclp_vt220_process_queue(struct sclp_vt220_request *request)
{
unsigned long flags;
void *page;
do {
/* Put buffer back to list of empty buffers */
page = request->sclp_req.sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
/* Move request from outqueue to empty queue */
list_del(&request->list);
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
/* Check if there is a pending buffer on the out queue. */
request = NULL;
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
if (!request || sclp_vt220_suspended) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
} while (__sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
tty_port_tty_wakeup(&sclp_vt220_port);
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* Callback through which the result of a write request is reported by the
* SCLP.
*/
static void
sclp_vt220_callback(struct sclp_req *request, void *data)
{
struct sclp_vt220_request *vt220_request;
struct sclp_vt220_sccb *sccb;
vt220_request = (struct sclp_vt220_request *) data;
if (request->status == SCLP_REQ_FAILED) {
sclp_vt220_process_queue(vt220_request);
return;
}
sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
/* Check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
break;
case 0x05f0: /* Target resource in improper state */
break;
case 0x0340: /* Contained SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
/* Remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* Not all buffers were processed */
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
}
break;
case 0x0040: /* SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
break;
default:
break;
}
sclp_vt220_process_queue(vt220_request);
}
/*
* Emit vt220 request buffer to SCLP. Return zero on success, non-zero
* otherwise.
*/
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sclp_req.status = SCLP_REQ_FILLED;
request->sclp_req.callback = sclp_vt220_callback;
request->sclp_req.callback_data = (void *) request;
return sclp_add_request(&request->sclp_req);
}
/*
* Queue and emit current request.
*/
static void
sclp_vt220_emit_current(void)
{
unsigned long flags;
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (sclp_vt220_current_request) {
sccb = (struct sclp_vt220_sccb *)
sclp_vt220_current_request->sclp_req.sccb;
/* Only emit buffers with content */
if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
list_add_tail(&sclp_vt220_current_request->list,
&sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
if (sclp_vt220_queue_running || sclp_vt220_suspended)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
request = list_first_entry(&sclp_vt220_outqueue,
struct sclp_vt220_request, list);
sclp_vt220_queue_running = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (__sclp_vt220_emit(request))
sclp_vt220_process_queue(request);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
#define SCLP_NORMAL_WRITE 0x00
/*
* Helper function to initialize a page with the sclp request structure.
*/
static struct sclp_vt220_request *
sclp_vt220_initialize_page(void *page)
{
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
/* Place request structure at end of page */
request = ((struct sclp_vt220_request *)
((addr_t) page + PAGE_SIZE)) - 1;
request->retry_count = 0;
request->sclp_req.sccb = page;
/* SCCB goes at start of page */
sccb = (struct sclp_vt220_sccb *) page;
memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
sccb->header.length = sizeof(struct sclp_vt220_sccb);
sccb->header.function_code = SCLP_NORMAL_WRITE;
sccb->header.response_code = 0x0000;
sccb->evbuf.type = EVTYP_VT220MSG;
sccb->evbuf.length = sizeof(struct evbuf_header);
return request;
}
static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
sccb->header.length;
}
static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return sccb->evbuf.length - sizeof(struct evbuf_header);
}
/*
* Add msg to buffer associated with request. Return the number of characters
* added.
*/
static int
sclp_vt220_add_msg(struct sclp_vt220_request *request,
const unsigned char *msg, int count, int convertlf)
{
struct sclp_vt220_sccb *sccb;
void *buffer;
unsigned char c;
int from;
int to;
if (count > sclp_vt220_space_left(request))
count = sclp_vt220_space_left(request);
if (count <= 0)
return 0;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
/* Retrieve character */
c = msg[from];
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
} else
break;
} else
((unsigned char *) buffer)[to++] = c;
}
sccb->header.length += to;
sccb->evbuf.length += to;
return from;
} else {
memcpy(buffer, (const void *) msg, count);
sccb->header.length += count;
sccb->evbuf.length += count;
return count;
}
}
/*
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
sclp_vt220_timeout(unsigned long data)
{
sclp_vt220_emit_current();
}
#define BUFFER_MAX_DELAY HZ/20
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_vt220_drop_buffer(void)
{
struct list_head *list;
struct sclp_vt220_request *request;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_vt220_outqueue.next;
if (sclp_vt220_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_vt220_outqueue)
return 0;
list_del(list);
request = list_entry(list, struct sclp_vt220_request, list);
page = request->sclp_req.sccb;
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
return 1;
}
/*
* Internal implementation of the write function. Write COUNT bytes of data
* from memory at BUF
* to the SCLP interface. In case that the data does not fit into the current
* write buffer, emit the current one and allocate a new one. If there are no
* more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
* is non-zero, the buffer will be scheduled for emitting after a timeout -
* otherwise the user has to explicitly call the flush function.
* A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
* buffer should be converted to 0x0a 0x0d. After completion, return the number
* of bytes written.
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
do {
/* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
if (list_empty(&sclp_vt220_empty))
sclp_console_full++;
while (list_empty(&sclp_vt220_empty)) {
if (may_fail || sclp_vt220_suspended)
goto out;
if (sclp_vt220_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
page = (void *) sclp_vt220_empty.next;
list_del((struct list_head *) page);
sclp_vt220_current_request =
sclp_vt220_initialize_page(page);
}
/* Try to write the string to the current request buffer */
written = sclp_vt220_add_msg(sclp_vt220_current_request,
buf, count, convertlf);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
buf += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
sclp_vt220_timer.function = sclp_vt220_timeout;
sclp_vt220_timer.data = 0UL;
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
out:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return overall_written;
}
/*
* This routine is called by the kernel to write a series of
* characters to the tty device. The characters may come from
* user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*/
static int
sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
#define SCLP_VT220_SESSION_ENDED 0x01
#define SCLP_VT220_SESSION_STARTED 0x80
#define SCLP_VT220_SESSION_DATA 0x00
/*
* Called by the SCLP to report incoming event buffers.
*/
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
char *buffer;
unsigned int count;
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
switch (*buffer) {
case SCLP_VT220_SESSION_ENDED:
case SCLP_VT220_SESSION_STARTED:
break;
case SCLP_VT220_SESSION_DATA:
/* Send input to line discipline */
buffer++;
count--;
tty_insert_flip_string(&sclp_vt220_port, buffer, count);
tty_flip_buffer_push(&sclp_vt220_port);
break;
}
}
/*
* This routine is called when a particular tty device is opened.
*/
static int
sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
sclp_vt220_port.low_latency = 0;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
}
}
return 0;
}
/*
* This routine is called when a particular tty device is closed.
*/
static void
sclp_vt220_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1)
tty_port_tty_set(&sclp_vt220_port, NULL);
}
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver.
*/
static int
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{
return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void
sclp_vt220_flush_chars(struct tty_struct *tty)
{
if (!sclp_vt220_queue_running)
sclp_vt220_emit_current();
else
sclp_vt220_flush_later = 1;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted.
*/
static int
sclp_vt220_write_room(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_space_left(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_empty)
count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Return number of buffered chars.
*/
static int
sclp_vt220_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_vt220_request *r;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_chars_stored(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_outqueue) {
r = list_entry(l, struct sclp_vt220_request, list);
count += sclp_vt220_chars_stored(r);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Pass on all buffers to the hardware. Return only when there are no more
* buffers pending.
*/
static void
sclp_vt220_flush_buffer(struct tty_struct *tty)
{
sclp_vt220_emit_current();
}
/* Release allocated pages. */
static void __init __sclp_vt220_free_pages(void)
{
struct list_head *page, *p;
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
free_page((unsigned long) page);
}
}
/* Release memory and unregister from sclp core. Controlled by init counting -
* only the last invoker will actually perform these actions. */
static void __init __sclp_vt220_cleanup(void)
{
sclp_vt220_init_count--;
if (sclp_vt220_init_count != 0)
return;
sclp_unregister(&sclp_vt220_register);
__sclp_vt220_free_pages();
tty_port_destroy(&sclp_vt220_port);
}
/* Allocate buffer pages and register with sclp core. Controlled by init
* counting - only the first invoker will actually perform these actions. */
static int __init __sclp_vt220_init(int num_pages)
{
void *page;
int i;
int rc;
sclp_vt220_init_count++;
if (sclp_vt220_init_count != 1)
return 0;
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
init_timer(&sclp_vt220_timer);
tty_port_init(&sclp_vt220_port);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
goto out;
list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
if (rc) {
__sclp_vt220_free_pages();
sclp_vt220_init_count--;
tty_port_destroy(&sclp_vt220_port);
}
return rc;
}
static const struct tty_operations sclp_vt220_ops = {
.open = sclp_vt220_open,
.close = sclp_vt220_close,
.write = sclp_vt220_write,
.put_char = sclp_vt220_put_char,
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
.flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
* symmetry between VM and LPAR systems regarding ttyS1. */
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = __sclp_vt220_init(MAX_KMEM_PAGES);
if (rc)
goto out_driver;
driver->driver_name = SCLP_VT220_DRIVER_NAME;
driver->name = SCLP_VT220_DEVICE_NAME;
driver->major = SCLP_VT220_MAJOR;
driver->minor_start = SCLP_VT220_MINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_vt220_ops);
tty_port_link_device(&sclp_vt220_port, driver, 0);
rc = tty_register_driver(driver);
if (rc)
goto out_init;
rc = sclp_register(&sclp_vt220_register_input);
if (rc)
goto out_reg;
sclp_vt220_driver = driver;
return 0;
out_reg:
tty_unregister_driver(driver);
out_init:
__sclp_vt220_cleanup();
out_driver:
put_tty_driver(driver);
return rc;
}
__initcall(sclp_vt220_tty_init);
static void __sclp_vt220_flush_buffer(void)
{
unsigned long flags;
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_vt220_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_vt220_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
__sclp_vt220_flush_buffer();
}
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_vt220_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_vt220_resume();
break;
}
}
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
{
__sclp_vt220_flush_buffer();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
/* Structure needed to register with printk */
static struct console sclp_vt220_console =
{
.name = SCLP_VT220_CONSOLE_NAME,
.write = sclp_vt220_con_write,
.device = sclp_vt220_con_device,
.flags = CON_PRINTBUFFER,
.index = SCLP_VT220_CONSOLE_INDEX
};
static int __init
sclp_vt220_con_init(void)
{
int rc;
rc = __sclp_vt220_init(sclp_console_pages);
if (rc)
return rc;
/* Attach linux console */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_vt220_console);
return 0;
}
console_initcall(sclp_vt220_con_init);
#endif /* CONFIG_SCLP_VT220_CONSOLE */

369
drivers/s390/char/tape.h Normal file
View file

@ -0,0 +1,369 @@
/*
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#ifndef _TAPE_H
#define _TAPE_H
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
struct gendisk;
/*
* Define DBF_LIKE_HELL for lots of messages in the debug feature.
*/
#define DBF_LIKE_HELL
#ifdef DBF_LIKE_HELL
#define DBF_LH(level, str, ...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
} while (0)
#else
#define DBF_LH(level, str, ...) do {} while(0)
#endif
/*
* macros s390 debug feature (dbf)
*/
#define DBF_EVENT(d_level, d_str...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define DBF_EXCEPTION(d_level, d_str...) \
do { \
debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define TAPE_VERSION_MAJOR 2
#define TAPE_VERSION_MINOR 0
#define TAPE_MAGIC "tape"
#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
#define TAPEBLOCK_HSEC_SIZE 2048
#define TAPEBLOCK_HSEC_S2B 2
#define TAPEBLOCK_RETRIES 5
enum tape_medium_state {
MS_UNKNOWN,
MS_LOADED,
MS_UNLOADED,
MS_SIZE
};
enum tape_state {
TS_UNUSED=0,
TS_IN_USE,
TS_BLKUSE,
TS_INIT,
TS_NOT_OPER,
TS_SIZE
};
enum tape_op {
TO_BLOCK, /* Block read */
TO_BSB, /* Backward space block */
TO_BSF, /* Backward space filemark */
TO_DSE, /* Data security erase */
TO_FSB, /* Forward space block */
TO_FSF, /* Forward space filemark */
TO_LBL, /* Locate block label */
TO_NOP, /* No operation */
TO_RBA, /* Read backward */
TO_RBI, /* Read block information */
TO_RFO, /* Read forward */
TO_REW, /* Rewind tape */
TO_RUN, /* Rewind and unload tape */
TO_WRI, /* Write block */
TO_WTM, /* Write tape mark */
TO_MSEN, /* Medium sense */
TO_LOAD, /* Load tape */
TO_READ_CONFIG, /* Read configuration data */
TO_READ_ATTMSG, /* Read attention message */
TO_DIS, /* Tape display */
TO_ASSIGN, /* Assign tape to channel path */
TO_UNASSIGN, /* Unassign tape from channel path */
TO_CRYPT_ON, /* Enable encrpytion */
TO_CRYPT_OFF, /* Disable encrpytion */
TO_KEKL_SET, /* Set KEK label */
TO_KEKL_QUERY, /* Query KEK label */
TO_RDC, /* Read device characteristics */
TO_SIZE, /* #entries in tape_op_t */
};
/* Forward declaration */
struct tape_device;
/* tape_request->status can be: */
enum tape_request_status {
TAPE_REQUEST_INIT, /* request is ready to be processed */
TAPE_REQUEST_QUEUED, /* request is queued to be processed */
TAPE_REQUEST_IN_IO, /* request is currently in IO */
TAPE_REQUEST_DONE, /* request is completed. */
TAPE_REQUEST_CANCEL, /* request should be canceled. */
TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
};
/* Tape CCW request */
struct tape_request {
struct list_head list; /* list head for request queueing. */
struct tape_device *device; /* tape device of this request */
struct ccw1 *cpaddr; /* address of the channel program. */
void *cpdata; /* pointer to ccw data. */
enum tape_request_status status;/* status of this request */
int options; /* options for execution. */
int retries; /* retry counter for error recovery. */
int rescnt; /* residual count from devstat. */
/* Callback for delivering final status. */
void (*callback)(struct tape_request *, void *);
void *callback_data;
enum tape_op op;
int rc;
};
/* Function type for magnetic tape commands */
typedef int (*tape_mtop_fn)(struct tape_device *, int);
/* Size of the array containing the mtops for a discipline */
#define TAPE_NR_MTOPS (MTMKPART+1)
/* Tape Discipline */
struct tape_discipline {
struct module *owner;
int (*setup_device)(struct tape_device *);
void (*cleanup_device)(struct tape_device *);
int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
struct tape_request *(*read_block)(struct tape_device *, size_t);
struct tape_request *(*write_block)(struct tape_device *, size_t);
void (*process_eov)(struct tape_device*);
/* ioctl function for additional ioctls. */
int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
/* Array of tape commands with TAPE_NR_MTOPS entries */
tape_mtop_fn *mtop_array;
};
/*
* The discipline irq function either returns an error code (<0) which
* means that the request has failed with an error or one of the following:
*/
#define TAPE_IO_SUCCESS 0 /* request successful */
#define TAPE_IO_PENDING 1 /* request still running */
#define TAPE_IO_RETRY 2 /* retry to current request */
#define TAPE_IO_STOP 3 /* stop the running request */
#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
/* Char Frontend Data */
struct tape_char_data {
struct idal_buffer *idal_buf; /* idal buffer for user char data */
int block_size; /* of size block_size. */
};
/* Tape Info */
struct tape_device {
/* entry in tape_device_list */
struct list_head node;
int cdev_id;
struct ccw_device * cdev;
struct tape_class_device * nt;
struct tape_class_device * rt;
/* Device mutex to serialize tape commands. */
struct mutex mutex;
/* Device discipline information. */
struct tape_discipline * discipline;
void * discdata;
/* Generic status flags */
long tape_generic_status;
/* Device state information. */
wait_queue_head_t state_change_wq;
enum tape_state tape_state;
enum tape_medium_state medium_state;
unsigned char * modeset_byte;
/* Reference count. */
atomic_t ref_count;
/* Request queue. */
struct list_head req_queue;
/* Request wait queue. */
wait_queue_head_t wait_queue;
/* Each tape device has (currently) two minor numbers. */
int first_minor;
/* Number of tapemarks required for correct termination. */
int required_tapemarks;
/* Block ID of the BOF */
unsigned int bof;
/* Character device frontend data */
struct tape_char_data char_data;
/* Function to start or stop the next request later. */
struct delayed_work tape_dnr;
/* Timer for long busy */
struct timer_list lb_timeout;
};
/* Externals from tape_core.c */
extern struct tape_request *tape_alloc_request(int cplength, int datasize);
extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request)
{
int rc;
rc = tape_do_io(device, request);
tape_free_request(request);
return rc;
}
static inline void
tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
{
request->callback = (void *) tape_free_request;
request->callback_data = NULL;
tape_do_io_async(device, request);
}
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int);
extern void tape_state_set(struct tape_device *, enum tape_state);
extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
extern int tape_generic_offline(struct ccw_device *);
extern int tape_generic_pm_suspend(struct ccw_device *);
/* Externals from tape_devmap.c */
extern int tape_generic_probe(struct ccw_device *);
extern void tape_generic_remove(struct ccw_device *);
extern struct tape_device *tape_find_device(int devindex);
extern struct tape_device *tape_get_device(struct tape_device *);
extern void tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
extern void tapechar_exit(void);
extern int tapechar_setup_device(struct tape_device *);
extern void tapechar_cleanup_device(struct tape_device *);
/* tape initialisation functions */
#ifdef CONFIG_PROC_FS
extern void tape_proc_init (void);
extern void tape_proc_cleanup (void);
#else
static inline void tape_proc_init (void) {;}
static inline void tape_proc_cleanup (void) {;}
#endif
/* a function for dumping device sense info */
extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
struct irb *);
/* functions for handling the status of a device */
extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
/* The debug area */
extern debug_info_t *TAPE_DBF_AREA;
/* functions for building ccws */
static inline struct ccw1 *
tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
{
while (count-- > 0) {
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
ccw++;
}
return ccw;
}
static inline struct ccw1 *
tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
static inline struct ccw1 *
tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
/* Global vars */
extern const char *tape_state_verbose[];
extern const char *tape_op_verbose[];
#endif /* for ifdef tape.h */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,174 @@
/*
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_3590_H
#define _TAPE_3590_H
#define MEDIUM_SENSE 0xc2
#define READ_PREVIOUS 0x0a
#define MODE_SENSE 0xcf
#define PERFORM_SS_FUNC 0x77
#define READ_SS_DATA 0x3e
#define PREP_RD_SS_DATA 0x18
#define RD_ATTMSG 0x3
#define SENSE_BRA_PER 0
#define SENSE_BRA_CONT 1
#define SENSE_BRA_RE 2
#define SENSE_BRA_DRE 3
#define SENSE_FMT_LIBRARY 0x23
#define SENSE_FMT_UNSOLICITED 0x40
#define SENSE_FMT_COMMAND_REJ 0x41
#define SENSE_FMT_COMMAND_EXEC0 0x50
#define SENSE_FMT_COMMAND_EXEC1 0x51
#define SENSE_FMT_EVENT0 0x60
#define SENSE_FMT_EVENT1 0x61
#define SENSE_FMT_MIM 0x70
#define SENSE_FMT_SIM 0x71
#define MSENSE_UNASSOCIATED 0x00
#define MSENSE_ASSOCIATED_MOUNT 0x01
#define MSENSE_ASSOCIATED_UMOUNT 0x02
#define MSENSE_CRYPT_MASK 0x00000010
#define TAPE_3590_MAX_MSG 0xb0
/* Datatypes */
struct tape_3590_disc_data {
struct tape390_crypt_info crypt_info;
int read_back_op;
};
#define TAPE_3590_CRYPT_INFO(device) \
((struct tape_3590_disc_data*)(device->discdata))->crypt_info
#define TAPE_3590_READ_BACK_OP(device) \
((struct tape_3590_disc_data*)(device->discdata))->read_back_op
struct tape_3590_sense {
unsigned int command_rej:1;
unsigned int interv_req:1;
unsigned int bus_out_check:1;
unsigned int eq_check:1;
unsigned int data_check:1;
unsigned int overrun:1;
unsigned int def_unit_check:1;
unsigned int assgnd_elsew:1;
unsigned int locate_fail:1;
unsigned int inst_online:1;
unsigned int reserved:1;
unsigned int blk_seq_err:1;
unsigned int begin_part:1;
unsigned int wr_mode:1;
unsigned int wr_prot:1;
unsigned int not_cap:1;
unsigned int bra:2;
unsigned int lc:3;
unsigned int vlf_active:1;
unsigned int stm:1;
unsigned int med_pos:1;
unsigned int rac:8;
unsigned int rc_rqc:16;
unsigned int mc:8;
unsigned int sense_fmt:8;
union {
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved:6;
unsigned int md:8;
unsigned int refcode:8;
unsigned int mid:16;
unsigned int mp:16;
unsigned char volid[6];
unsigned int fid:8;
} f70;
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved1:5;
unsigned int mdf:1;
unsigned char md[3];
unsigned int simid:8;
unsigned int uid:16;
unsigned int refcode1:16;
unsigned int refcode2:16;
unsigned int refcode3:16;
unsigned int reserved2:8;
} f71;
unsigned char data[14];
} fmt;
unsigned char pad[10];
} __attribute__ ((packed));
struct tape_3590_med_sense {
unsigned int macst:4;
unsigned int masst:4;
char pad1[7];
unsigned int flags;
char pad2[116];
} __attribute__ ((packed));
struct tape_3590_rdc_data {
char data[64];
} __attribute__ ((packed));
/* Datastructures for 3592 encryption support */
struct tape3592_kekl {
__u8 flags;
char label[64];
} __attribute__ ((packed));
struct tape3592_kekl_pair {
__u8 count;
struct tape3592_kekl kekl[2];
} __attribute__ ((packed));
struct tape3592_kekl_query_data {
__u16 len;
__u8 fmt;
__u8 mc;
__u32 id;
__u8 flags;
struct tape3592_kekl_pair kekls;
char reserved[116];
} __attribute__ ((packed));
struct tape3592_kekl_query_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 max_count;
char reserved2[35];
} __attribute__ ((packed));
struct tape3592_kekl_set_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 op;
struct tape3592_kekl_pair kekls;
char reserved2[120];
} __attribute__ ((packed));
#endif /* _TAPE_3590_H */

View file

@ -0,0 +1,500 @@
/*
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
#include "tape_class.h"
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
* file operation structure for tape character frontend
*/
static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
#endif
static const struct file_operations tape_fops =
{
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
.unlocked_ioctl = tapechar_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tapechar_compat_ioctl,
#endif
.open = tapechar_open,
.release = tapechar_release,
.llseek = no_llseek,
};
static int tapechar_major = TAPECHAR_MAJOR;
/*
* This function is called for every new tapedevice
*/
int
tapechar_setup_device(struct tape_device * device)
{
char device_name[20];
sprintf(device_name, "ntibm%i", device->first_minor / 2);
device->nt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor),
&tape_fops,
device_name,
"non-rewinding"
);
device_name[0] = 'r';
device->rt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor + 1),
&tape_fops,
device_name,
"rewinding"
);
return 0;
}
void
tapechar_cleanup_device(struct tape_device *device)
{
unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL;
unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL;
}
static int
tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
{
struct idal_buffer *new;
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == block_size)
return 0;
if (block_size > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
block_size, MAX_BLOCKSIZE);
return -EINVAL;
}
/* The current idal buffer is not correct. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
return 0;
}
/*
* Tape device read function
*/
static ssize_t
tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
int rc;
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
/*
* If the tape isn't terminated yet, do it now. And since we then
* are at the end of the tape there wouldn't be anything to read
* anyways. So we return immediately.
*/
if(device->required_tapemarks) {
return tape_std_terminate_write(device);
}
/* Find out block size to use */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:read smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
} else {
block_size = count;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
/* Let the discipline build the ccw chain. */
request = device->discipline->read_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
/* Execute it. */
rc = tape_do_io(device, request);
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
rc = -EFAULT;
}
tape_free_request(request);
return rc;
}
/*
* Tape device write function
*/
static ssize_t
tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
size_t written;
int nblocks;
int i, rc;
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:write smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
nblocks = count / block_size;
} else {
block_size = count;
nblocks = 1;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
/* Let the discipline build the ccw chain. */
request = device->discipline->write_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
rc = 0;
written = 0;
for (i = 0; i < nblocks; i++) {
/* Copy data from user space to idal buffer. */
if (idal_buffer_from_user(device->char_data.idal_buf,
data, block_size)) {
rc = -EFAULT;
break;
}
rc = tape_do_io(device, request);
if (rc)
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
data += block_size;
}
tape_free_request(request);
if (rc == -ENOSPC) {
/*
* Ok, the device has no more space. It has NOT written
* the block.
*/
if (device->discipline->process_eov)
device->discipline->process_eov(device);
if (written > 0)
rc = 0;
}
/*
* After doing a write we always need two tapemarks to correctly
* terminate the tape (one to terminate the file, the second to
* flag the end of recorded data.
* Since process_eov positions the tape in front of the written
* tapemark it doesn't hurt to write two marks again.
*/
if (!rc)
device->required_tapemarks = 2;
return rc ? rc : written;
}
/*
* Character frontend tape device open function.
*/
static int
tapechar_open (struct inode *inode, struct file *filp)
{
struct tape_device *device;
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
imajor(file_inode(filp)),
iminor(file_inode(filp)));
if (imajor(file_inode(filp)) != tapechar_major)
return -ENODEV;
minor = iminor(file_inode(filp));
device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
return PTR_ERR(device);
}
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
nonseekable_open(inode, filp);
} else
tape_put_device(device);
return rc;
}
/*
* Character frontend tape device release function.
*/
static int
tapechar_release(struct inode *inode, struct file *filp)
{
struct tape_device *device;
DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
device = (struct tape_device *) filp->private_data;
/*
* If this is the rewinding tape minor then rewind. In that case we
* write all required tapemarks. Otherwise only one to terminate the
* file.
*/
if ((iminor(inode) & 1) != 0) {
if (device->required_tapemarks)
tape_std_terminate_write(device);
tape_mtop(device, MTREW, 1);
} else {
if (device->required_tapemarks > 1) {
if (tape_mtop(device, MTWEOF, 1) == 0)
device->required_tapemarks--;
}
}
if (device->char_data.idal_buf != NULL) {
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = NULL;
}
tape_release(device);
filp->private_data = NULL;
tape_put_device(device);
return 0;
}
/*
* Tape device io controls.
*/
static int
__tapechar_ioctl(struct tape_device *device,
unsigned int no, unsigned long data)
{
int rc;
if (no == MTIOCTOP) {
struct mtop op;
if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
/*
* Operations that change tape position should write final
* tapemarks.
*/
switch (op.mt_op) {
case MTFSF:
case MTBSF:
case MTFSR:
case MTBSR:
case MTREW:
case MTOFFL:
case MTEOM:
case MTRETEN:
case MTBSFM:
case MTFSFM:
case MTSEEK:
if (device->required_tapemarks)
tape_std_terminate_write(device);
default:
;
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
if (op.mt_op == MTWEOF && rc == 0) {
if (op.mt_count > device->required_tapemarks)
device->required_tapemarks = 0;
else
device->required_tapemarks -= op.mt_count;
}
return rc;
}
if (no == MTIOCPOS) {
/* MTIOCPOS: query the tape position. */
struct mtpos pos;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pos.mt_blkno = rc;
if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
return -EFAULT;
return 0;
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
struct mtget get;
memset(&get, 0, sizeof(get));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = 0 /* device->devstat.rescnt */;
get.mt_dsreg =
((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
& MT_ST_BLKSIZE_MASK);
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_gstat = 0;
get.mt_erreg = 0;
get.mt_fileno = 0;
get.mt_gstat = device->tape_generic_status;
if (device->medium_state == MS_LOADED) {
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
if (rc == 0)
get.mt_gstat |= GMT_BOT(~0);
get.mt_blkno = rc;
}
if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
return -EFAULT;
return 0;
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
return device->discipline->ioctl_fn(device, no, data);
}
static long
tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device;
long rc;
DBF_EVENT(6, "TCHAR:ioct\n");
device = (struct tape_device *) filp->private_data;
mutex_lock(&device->mutex);
rc = __tapechar_ioctl(device, no, data);
mutex_unlock(&device->mutex);
return rc;
}
#ifdef CONFIG_COMPAT
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
unsigned long argp;
/* The 'arg' argument of any ioctl function may only be used for
* pointers because of the compat pointer conversion.
* Consider this when adding new ioctls.
*/
argp = (unsigned long) compat_ptr(data);
if (device->discipline->ioctl_fn) {
mutex_lock(&device->mutex);
rval = device->discipline->ioctl_fn(device, no, argp);
mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
}
return rval;
}
#endif /* CONFIG_COMPAT */
/*
* Initialize character device frontend.
*/
int
tapechar_init (void)
{
dev_t dev;
if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
return -1;
tapechar_major = MAJOR(dev);
return 0;
}
/*
* cleanup
*/
void
tapechar_exit(void)
{
unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
}

View file

@ -0,0 +1,132 @@
/*
* Copyright IBM Corp. 2004
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
"Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
tcd->char_device->dev = dev;
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_RET(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create(THIS_MODULE, "tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);

View file

@ -0,0 +1,58 @@
/*
* Copyright IBM Corp. 2004 All Rights Reserved.
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#ifndef __TAPE_CLASS_H__
#define __TAPE_CLASS_H__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#define TAPECLASS_NAME_LEN 32
struct tape_class_device {
struct cdev *char_device;
struct device *class_device;
char device_name[TAPECLASS_NAME_LEN];
char mode_name[TAPECLASS_NAME_LEN];
};
/*
* Register a tape device and return a pointer to the tape class device
* created by the call.
*
* device
* The pointer to the struct device of the physical (base) device.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* device_name
* Pointer to the logical device name (will also be used as kobject name
* of the cdev). This can also be called the name of the tape class
* device.
* mode_name
* Points to the name of the tape mode. This creates a link with that
* name from the physical device to the logical device (class).
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * node_name
);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
#endif /* __TAPE_CLASS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,143 @@
/*
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
* PROCFS Functions
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= 256 / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
static int tape_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tape_proc_seq);
}
static const struct file_operations tape_proc_ops =
{
.owner = THIS_MODULE,
.open = tape_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices =
proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&tape_proc_ops);
if (tape_proc_devices == NULL) {
return;
}
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", NULL);
}

View file

@ -0,0 +1,750 @@
/*
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2002
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/timer.h>
#include <asm/types.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
/*
* tape_std_assign
*/
static void
tape_std_assign_timeout(unsigned long data)
{
struct tape_request * request;
struct tape_device * device;
int rc;
request = (struct tape_request *) data;
device = request->device;
BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
"%i\n", device->cdev_id, rc);
}
int
tape_std_assign(struct tape_device *device)
{
int rc;
struct timer_list timeout;
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_ASSIGN;
tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/*
* The assign command sometimes blocks if the device is assigned
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;
add_timer(&timeout);
rc = tape_do_io_interruptible(device, request);
del_timer_sync(&timeout);
destroy_timer_on_stack(&timeout);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* tape_std_unassign
*/
int
tape_std_unassign (struct tape_device *device)
{
int rc;
struct tape_request *request;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
return -EIO;
}
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_UNASSIGN;
tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* TAPE390_DISPLAY: Show a string on the tape display.
*/
int
tape_std_display(struct tape_device *device, struct display_struct *disp)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(2, 17);
if (IS_ERR(request)) {
DBF_EVENT(3, "TAPE: load display failed\n");
return PTR_ERR(request);
}
request->op = TO_DIS;
*(unsigned char *) request->cpdata = disp->cntrl;
DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return rc;
}
/*
* Read block id.
*/
int
tape_std_read_block_id(struct tape_device *device, __u64 *id)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(3, 8);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RBI;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0)
/* Get result from read buffer. */
*id = *(__u64 *) request->cpdata;
tape_free_request(request);
return rc;
}
int
tape_std_terminate_write(struct tape_device *device)
{
int rc;
if(device->required_tapemarks == 0)
return 0;
DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
device->required_tapemarks);
rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
if (rc)
return rc;
device->required_tapemarks = 0;
return tape_mtop(device, MTBSR, 1);
}
/*
* MTLOAD: Loads the tape.
* The default implementation just wait until the tape medium state changes
* to MS_LOADED.
*/
int
tape_std_mtload(struct tape_device *device, int count)
{
return wait_event_interruptible(device->state_change_wq,
(device->medium_state == MS_LOADED));
}
/*
* MTSETBLK: Set block size.
*/
int
tape_std_mtsetblk(struct tape_device *device, int count)
{
struct idal_buffer *new;
DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
if (count <= 0) {
/*
* Just set block_size to 0. tapechar_read/tapechar_write
* will realloc the idal buffer if a bigger one than the
* current is needed.
*/
device->char_data.block_size = 0;
return 0;
}
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == count)
/* We already have a idal buffer of that size. */
return 0;
if (count > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
count, MAX_BLOCKSIZE);
return -EINVAL;
}
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
device->char_data.block_size = count;
DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
return 0;
}
/*
* MTRESET: Set block size to 0.
*/
int
tape_std_mtreset(struct tape_device *device, int count)
{
DBF_EVENT(6, "TCHAR:devreset:\n");
device->char_data.block_size = 0;
return 0;
}
/*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*/
int
tape_std_mtfsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*/
int
tape_std_mtfsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "FSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*/
int
tape_std_mtbsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "BSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTWEOF: Write 'count' file marks at the current position.
*/
int
tape_std_mtweof(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_WTM;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*/
int
tape_std_mtbsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*/
int
tape_std_mtbsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTFSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*/
int
tape_std_mtfsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTBSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTREW: Rewind the tape.
*/
int
tape_std_mtrew(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_REW;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*/
int
tape_std_mtoffl(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTNOP: 'No operation'.
*/
int
tape_std_mtnop(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
*/
int
tape_std_mteom(struct tape_device *device, int mt_count)
{
int rc;
/*
* Seek from the beginning of tape (rewind).
*/
if ((rc = tape_mtop(device, MTREW, 1)) < 0)
return rc;
/*
* The logical end of volume is given by two sewuential tapemarks.
* Look for this by skipping to the next file (over one tapemark)
* and then test for another one (fsr returns 1 if a tapemark was
* encountered).
*/
do {
if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
return rc;
if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
return rc;
} while (rc == 0);
return tape_mtop(device, MTBSR, 1);
}
/*
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*/
int
tape_std_mtreten(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it, MTRETEN rc gets ignored */
tape_do_io_interruptible(device, request);
tape_free_request(request);
return tape_mtop(device, MTREW, 1);
}
/*
* MTERASE: erases the tape.
*/
int
tape_std_mterase(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(6, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_DSE;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTUNLOAD: Rewind the tape and unload it.
*/
int
tape_std_mtunload(struct tape_device *device, int mt_count)
{
return tape_mtop(device, MTOFFL, mt_count);
}
/*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*/
int
tape_std_mtcompression(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
if (mt_count == 0)
*device->modeset_byte &= ~0x08;
else
*device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* Read Block
*/
struct tape_request *
tape_std_read_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
/*
* We have to alloc 4 ccws in order to be able to transform request
* into a read backward request in error case.
*/
request = tape_alloc_request(4, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xrbl fail");
return request;
}
request->op = TO_RFO;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
device->char_data.idal_buf);
DBF_EVENT(6, "xrbl ccwg\n");
return request;
}
/*
* Read Block backward transformation function.
*/
void
tape_std_read_backward(struct tape_device *device, struct tape_request *request)
{
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg");}
/*
* Write Block
*/
struct tape_request *
tape_std_write_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xwbl fail\n");
return request;
}
request->op = TO_WRI;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
device->char_data.idal_buf);
DBF_EVENT(6, "xwbl ccwg\n");
return request;
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void
tape_std_process_eov(struct tape_device *device)
{
/*
* End of volume: We have to backspace the last written record, then
* we TRY to write a tapemark and then backspace over the written TM
*/
if (tape_mtop(device, MTBSR, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0) {
tape_mtop(device, MTBSR, 1);
}
}
EXPORT_SYMBOL(tape_std_assign);
EXPORT_SYMBOL(tape_std_unassign);
EXPORT_SYMBOL(tape_std_display);
EXPORT_SYMBOL(tape_std_read_block_id);
EXPORT_SYMBOL(tape_std_mtload);
EXPORT_SYMBOL(tape_std_mtsetblk);
EXPORT_SYMBOL(tape_std_mtreset);
EXPORT_SYMBOL(tape_std_mtfsf);
EXPORT_SYMBOL(tape_std_mtfsr);
EXPORT_SYMBOL(tape_std_mtbsr);
EXPORT_SYMBOL(tape_std_mtweof);
EXPORT_SYMBOL(tape_std_mtbsfm);
EXPORT_SYMBOL(tape_std_mtbsf);
EXPORT_SYMBOL(tape_std_mtfsfm);
EXPORT_SYMBOL(tape_std_mtrew);
EXPORT_SYMBOL(tape_std_mtoffl);
EXPORT_SYMBOL(tape_std_mtnop);
EXPORT_SYMBOL(tape_std_mteom);
EXPORT_SYMBOL(tape_std_mtreten);
EXPORT_SYMBOL(tape_std_mterase);
EXPORT_SYMBOL(tape_std_mtunload);
EXPORT_SYMBOL(tape_std_mtcompression);
EXPORT_SYMBOL(tape_std_read_block);
EXPORT_SYMBOL(tape_std_read_backward);
EXPORT_SYMBOL(tape_std_write_block);
EXPORT_SYMBOL(tape_std_process_eov);

View file

@ -0,0 +1,154 @@
/*
* standard tape device functions for ibm tapes.
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_STD_H
#define _TAPE_STD_H
#include <asm/tape390.h>
/*
* Biggest block size to handle. Currently 64K because we only build
* channel programs without data chaining.
*/
#define MAX_BLOCKSIZE 65535
/*
* The CCW commands for the Tape type of command.
*/
#define INVALID_00 0x00 /* Invalid cmd */
#define BACKSPACEBLOCK 0x27 /* Back Space block */
#define BACKSPACEFILE 0x2f /* Back Space file */
#define DATA_SEC_ERASE 0x97 /* Data security erase */
#define ERASE_GAP 0x17 /* Erase Gap */
#define FORSPACEBLOCK 0x37 /* Forward space block */
#define FORSPACEFILE 0x3F /* Forward Space file */
#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
#define NOP 0x03 /* No operation */
#define READ_FORWARD 0x02 /* Read forward */
#define REWIND 0x07 /* Rewind */
#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
#define SENSE 0x04 /* Sense */
#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
#define WRITE_CMD 0x01 /* Write */
#define WRITETAPEMARK 0x1F /* Write Tape Mark */
#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
#define CONTROL_ACCESS 0xE3 /* Set high speed */
#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
#define MODE_SET_C3 0xC3 /* for 3420 */
#define MODE_SET_CB 0xCB /* for 3420 */
#define MODE_SET_D3 0xD3 /* for 3420 */
#define READ_BACKWARD 0x0C /* */
#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
#define READ_DEV_CHAR 0x64 /* Read device characteristics */
#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
#define SYNC 0x43 /* Synchronize (flush buffer) */
#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
#define SENSE_COMMAND_REJECT 0x80
#define SENSE_INTERVENTION_REQUIRED 0x40
#define SENSE_BUS_OUT_CHECK 0x20
#define SENSE_EQUIPMENT_CHECK 0x10
#define SENSE_DATA_CHECK 0x08
#define SENSE_OVERRUN 0x04
#define SENSE_DEFERRED_UNIT_CHECK 0x02
#define SENSE_ASSIGNED_ELSEWHERE 0x01
#define SENSE_LOCATE_FAILURE 0x80
#define SENSE_DRIVE_ONLINE 0x40
#define SENSE_RESERVED 0x20
#define SENSE_RECORD_SEQUENCE_ERR 0x10
#define SENSE_BEGINNING_OF_TAPE 0x08
#define SENSE_WRITE_MODE 0x04
#define SENSE_WRITE_PROTECT 0x02
#define SENSE_NOT_CAPABLE 0x01
#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
#define SENSE_CHANNEL_ADAPTER_LOC 0x10
#define SENSE_REPORTING_CU 0x08
#define SENSE_AUTOMATIC_LOADER 0x04
#define SENSE_TAPE_SYNC_MODE 0x02
#define SENSE_TAPE_POSITIONING 0x01
/* discipline functions */
struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
int tape_std_unassign(struct tape_device *);
int tape_std_read_block_id(struct tape_device *device, __u64 *id);
int tape_std_display(struct tape_device *, struct display_struct *disp);
int tape_std_terminate_write(struct tape_device *);
/* Standard magnetic tape commands. */
int tape_std_mtbsf(struct tape_device *, int);
int tape_std_mtbsfm(struct tape_device *, int);
int tape_std_mtbsr(struct tape_device *, int);
int tape_std_mtcompression(struct tape_device *, int);
int tape_std_mteom(struct tape_device *, int);
int tape_std_mterase(struct tape_device *, int);
int tape_std_mtfsf(struct tape_device *, int);
int tape_std_mtfsfm(struct tape_device *, int);
int tape_std_mtfsr(struct tape_device *, int);
int tape_std_mtload(struct tape_device *, int);
int tape_std_mtnop(struct tape_device *, int);
int tape_std_mtoffl(struct tape_device *, int);
int tape_std_mtreset(struct tape_device *, int);
int tape_std_mtreten(struct tape_device *, int);
int tape_std_mtrew(struct tape_device *, int);
int tape_std_mtsetblk(struct tape_device *, int);
int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */
void tape_std_default_handler(struct tape_device *);
void tape_std_unexpect_uchk_handler(struct tape_device *);
void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *);
// the error recovery stuff:
void tape_std_error_recovery(struct tape_device *);
void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
void tape_std_error_recovery_succeded(struct tape_device *);
void tape_std_error_recovery_do_retry(struct tape_device *);
void tape_std_error_recovery_read_opposite(struct tape_device *);
void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
/* S390 tape types */
enum s390_tape_type {
tape_3480,
tape_3490,
tape_3590,
tape_3592,
};
#endif // _TAPE_STD_H

1918
drivers/s390/char/tty3270.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,14 @@
/*
* Copyright IBM Corp. 2007
*
*/
#ifndef __DRIVERS_S390_CHAR_TTY3270_H
#define __DRIVERS_S390_CHAR_TTY3270_H
#include <linux/tty.h>
#include <linux/tty_driver.h>
extern struct tty_driver *tty3270_driver;
#endif /* __DRIVERS_S390_CHAR_TTY3270_H */

213
drivers/s390/char/vmcp.c Normal file
View file

@ -0,0 +1,213 @@
/*
* Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
#include "vmcp.h"
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
session = kmalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
static int vmcp_release(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
session = file->private_data;
file->private_data = NULL;
free_pages((unsigned long)session->response, get_order(session->bufsize));
kfree(session);
return 0;
}
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
mutex_unlock(&session->mutex);
return ret;
}
static ssize_t
vmcp_write(struct file *file, const char __user *buff, size_t count,
loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
if (count > 240)
return -EINVAL;
cmd = kmalloc(count + 1, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd, buff, count)) {
kfree(cmd);
return -EFAULT;
}
cmd[count] = '\0';
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
| __GFP_REPEAT | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
session->resp_size = cpcmd(cmd, session->response, session->bufsize,
&session->resp_code);
mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
}
/*
* These ioctls are available, as the semantics of the diagnose 8 call
* does not fit very well into a Linux call. Diagnose X'08' is described in
* CP Programming Services SC24-6084-00
*
* VMCP_GETCODE: gives the CP return code back to user space
* VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
* expects adjacent pages in real storage and to make matters worse, we
* dont know the size of the response. Therefore we default to PAGESIZE and
* let userspace to change the response size, if userspace expects a bigger
* response
*/
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
int __user *argp;
int temp;
session = file->private_data;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
}
mutex_unlock(&session->mutex);
return temp;
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
}
}
static const struct file_operations vmcp_fops = {
.owner = THIS_MODULE,
.open = vmcp_open,
.release = vmcp_release,
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
.llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
.name = "vmcp",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmcp_fops,
};
static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM)
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
return -ENOMEM;
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret)
debug_unregister(vmcp_debug);
return ret;
}
device_initcall(vmcp_init);

30
drivers/s390/char/vmcp.h Normal file
View file

@ -0,0 +1,30 @@
/*
* Copyright IBM Corp. 2004, 2005
* Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
*
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson
*/
#include <linux/ioctl.h>
#include <linux/mutex.h>
#define VMCP_GETCODE _IOR(0x10, 1, int)
#define VMCP_SETBUF _IOW(0x10, 2, int)
#define VMCP_GETSIZE _IOR(0x10, 3, int)
struct vmcp_session {
unsigned int bufsize;
char *response;
int resp_size;
int resp_code;
/* As we use copy_from/to_user, which might *
* sleep and cannot use a spinlock */
struct mutex mutex;
};

View file

@ -0,0 +1,907 @@
/*
* character device driver for reading z/VM system service records
*
*
* Copyright IBM Corp. 2004, 2009
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <xenia@us.ibm.com>
* Stefan Weinhuber <wein@de.ibm.com>
*
*/
#define KMSG_COMPONENT "vmlogrdr"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/string.h>
MODULE_AUTHOR
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
" Stefan Weinhuber (wein@de.ibm.com)");
MODULE_DESCRIPTION ("Character device driver for reading z/VM "
"system service records.");
MODULE_LICENSE("GPL");
/*
* The size of the buffer for iucv data transfer is one page,
* but in addition to the data we read from iucv we also
* place an integer and some characters into that buffer,
* so the maximum size for record data is a little less then
* one page.
*/
#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
/*
* The elements that are concurrently accessed by bottom halves are
* connection_established, iucv_path_severed, local_interrupt_buffer
* and receive_ready. The first three can be protected by
* priv_lock. receive_ready is atomic, so it can be incremented and
* decremented without holding a lock.
* The variable dev_in_use needs to be protected by the lock, since
* it's a flag used by open to make sure that the device is opened only
* by one user at the same time.
*/
struct vmlogrdr_priv_t {
char system_service[8];
char internal_name[8];
char recording_name[8];
struct iucv_path *path;
int connection_established;
int iucv_path_severed;
struct iucv_message local_interrupt_buffer;
atomic_t receive_ready;
int minor_num;
char * buffer;
char * current_position;
int remaining;
ulong residual_length;
int buffer_free;
int dev_in_use; /* 1: already opened, 0: not opened*/
spinlock_t priv_lock;
struct device *device;
struct device *class_device;
int autorecording;
int autopurge;
};
/*
* File operation structure for vmlogrdr devices
*/
static int vmlogrdr_open(struct inode *, struct file *);
static int vmlogrdr_release(struct inode *, struct file *);
static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
size_t count, loff_t * ppos);
static const struct file_operations vmlogrdr_fops = {
.owner = THIS_MODULE,
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
.llseek = no_llseek,
};
static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_message_pending(struct iucv_path *,
struct iucv_message *);
static struct iucv_handler vmlogrdr_iucv_handler = {
.path_complete = vmlogrdr_iucv_path_complete,
.path_severed = vmlogrdr_iucv_path_severed,
.message_pending = vmlogrdr_iucv_message_pending,
};
static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
/*
* pointer to system service private structure
* minor number 0 --> logrec
* minor number 1 --> account
* minor number 2 --> symptom
*/
static struct vmlogrdr_priv_t sys_ser[] = {
{ .system_service = "*LOGREC ",
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*ACCOUNT",
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*SYMPTOM",
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
.autorecording = 1,
.autopurge = 1,
}
};
#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;
static struct cdev *vmlogrdr_cdev = NULL;
static int recording_class_AB;
static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
}
static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
u8 reason = (u8) ipuser[8];
pr_err("vmlogrdr: connection severed with reason %i\n", reason);
iucv_path_sever(path, NULL);
kfree(path);
logptr->path = NULL;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 0;
logptr->iucv_path_severed = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
/* just in case we're sleeping waiting for a record */
wake_up_interruptible(&read_wait_queue);
}
static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct vmlogrdr_priv_t * logptr = path->private;
/*
* This function is the bottom half so it should be quick.
* Copy the external interrupt data into our local eib and increment
* the usage count
*/
spin_lock(&logptr->priv_lock);
memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
atomic_inc(&logptr->receive_ready);
spin_unlock(&logptr->priv_lock);
wake_up_interruptible(&read_wait_queue);
}
static int vmlogrdr_get_recording_class_AB(void)
{
static const char cp_command[] = "QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
int len,i;
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
len = strnlen(cp_response,sizeof(cp_response));
// now the parsing
tail=strnchr(cp_response,len,'=');
if (!tail)
return 0;
tail++;
if (!strncmp("ANY",tail,3))
return 1;
if (!strncmp("NONE",tail,4))
return 0;
/*
* expect comma separated list of classes here, if one of them
* is A or B return 1 otherwise 0
*/
for (i=tail-cp_response; i<len; i++)
if ( cp_response[i]=='A' || cp_response[i]=='B' )
return 1;
return 0;
}
static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
int action, int purge)
{
char cp_command[80];
char cp_response[160];
char *onoff, *qid_string;
int rc;
onoff = ((action == 1) ? "ON" : "OFF");
qid_string = ((recording_class_AB == 1) ? " QID * " : "");
/*
* The recording commands needs to be called with option QID
* for guests that have previlege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
*/
if (purge && (action == 1)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
logptr->recording_name,
onoff,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
* 'HCPCRC8072I Recording entry not found' before the
* 'Command complete'. So I use strstr rather then the strncmp.
*/
if (strstr(cp_response,"Command complete"))
rc = 0;
else
rc = -EIO;
/*
* If we turn recording off, we have to purge any remaining records
* afterwards, as a large number of queued records may impact z/VM
* performance.
*/
if (purge && (action == 0)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
return rc;
}
static int vmlogrdr_open (struct inode *inode, struct file *filp)
{
int dev_num = 0;
struct vmlogrdr_priv_t * logptr = NULL;
int connect_rc = 0;
int ret;
dev_num = iminor(inode);
if (dev_num >= MAXMINOR)
return -ENODEV;
logptr = &sys_ser[dev_num];
/*
* only allow for blocking reads to be open
*/
if (filp->f_flags & O_NONBLOCK)
return -EOPNOTSUPP;
/* Besure this device hasn't already been opened */
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
return -EBUSY;
}
logptr->dev_in_use = 1;
logptr->connection_established = 0;
logptr->iucv_path_severed = 0;
atomic_set(&logptr->receive_ready, 0);
logptr->buffer_free = 1;
spin_unlock_bh(&logptr->priv_lock);
/* set the file options */
filp->private_data = logptr;
/* start recording for this service*/
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to start "
"recording automatically\n");
}
/* create connection to the system service */
logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
if (!logptr->path)
goto out_dev;
connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
logptr->system_service, NULL, NULL,
logptr);
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
logptr->system_service, connect_rc);
goto out_path;
}
/* We've issued the connect and now we must wait for a
* ConnectionComplete or ConnectinSevered Interrupt
* before we can continue to process.
*/
wait_event(conn_wait_queue, (logptr->connection_established)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
nonseekable_open(inode, filp);
return 0;
out_record:
if (logptr->autorecording)
vmlogrdr_recording(logptr,0,logptr->autopurge);
out_path:
kfree(logptr->path); /* kfree(NULL) is ok. */
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
return -EIO;
}
static int vmlogrdr_release (struct inode *inode, struct file *filp)
{
int ret;
struct vmlogrdr_priv_t * logptr = filp->private_data;
iucv_path_sever(logptr->path, NULL);
kfree(logptr->path);
logptr->path = NULL;
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to stop "
"recording automatically\n");
}
logptr->dev_in_use = 0;
return 0;
}
static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
{
int rc, *temp;
/* we need to keep track of two data sizes here:
* The number of bytes we need to receive from iucv and
* the total number of bytes we actually write into the buffer.
*/
int user_data_count, iucv_data_count;
char * buffer;
if (atomic_read(&priv->receive_ready)) {
spin_lock_bh(&priv->priv_lock);
if (priv->residual_length){
/* receive second half of a record */
iucv_data_count = priv->residual_length;
user_data_count = 0;
buffer = priv->buffer;
} else {
/* receive a new record:
* We need to return the total length of the record
* + size of FENCE in the first 4 bytes of the buffer.
*/
iucv_data_count = priv->local_interrupt_buffer.length;
user_data_count = sizeof(int);
temp = (int*)priv->buffer;
*temp= iucv_data_count + sizeof(FENCE);
buffer = priv->buffer + sizeof(int);
}
/*
* If the record is bigger than our buffer, we receive only
* a part of it. We can get the rest later.
*/
if (iucv_data_count > NET_BUFFER_SIZE)
iucv_data_count = NET_BUFFER_SIZE;
rc = iucv_message_receive(priv->path,
&priv->local_interrupt_buffer,
0, buffer, iucv_data_count,
&priv->residual_length);
spin_unlock_bh(&priv->priv_lock);
/* An rc of 5 indicates that the record was bigger than
* the buffer, which is OK for us. A 9 indicates that the
* record was purged befor we could receive it.
*/
if (rc == 5)
rc = 0;
if (rc == 9)
atomic_set(&priv->receive_ready, 0);
} else {
rc = 1;
}
if (!rc) {
priv->buffer_free = 0;
user_data_count += iucv_data_count;
priv->current_position = priv->buffer;
if (priv->residual_length == 0){
/* the whole record has been captured,
* now add the fence */
atomic_dec(&priv->receive_ready);
buffer = priv->buffer + user_data_count;
memcpy(buffer, FENCE, sizeof(FENCE));
user_data_count += sizeof(FENCE);
}
priv->remaining = user_data_count;
}
return rc;
}
static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
size_t count, loff_t * ppos)
{
int rc;
struct vmlogrdr_priv_t * priv = filp->private_data;
while (priv->buffer_free) {
rc = vmlogrdr_receive_data(priv);
if (rc) {
rc = wait_event_interruptible(read_wait_queue,
atomic_read(&priv->receive_ready));
if (rc)
return rc;
}
}
/* copy only up to end of record */
if (count > priv->remaining)
count = priv->remaining;
if (copy_to_user(data, priv->current_position, count))
return -EFAULT;
*ppos += count;
priv->current_position += count;
priv->remaining -= count;
/* if all data has been transferred, set buffer free */
if (priv->remaining == 0)
priv->buffer_free = 1;
return count;
}
static ssize_t vmlogrdr_autopurge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autopurge=0;
break;
case '1':
priv->autopurge=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autopurge_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autopurge);
}
static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
vmlogrdr_autopurge_store);
static ssize_t vmlogrdr_purge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
char cp_command[80];
char cp_response[80];
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
if (buf[0] != '1')
return -EINVAL;
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
/*
* The recording command needs to be called with option QID
* for guests that have previlege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
if (recording_class_AB)
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE QID * ",
priv->recording_name);
else
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE ",
priv->recording_name);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
return count;
}
static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
static ssize_t vmlogrdr_autorecording_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autorecording=0;
break;
case '1':
priv->autorecording=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autorecording_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autorecording);
}
static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
vmlogrdr_autorecording_store);
static ssize_t vmlogrdr_recording_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret;
switch (buf[0]) {
case '0':
ret = vmlogrdr_recording(priv,0,0);
break;
case '1':
ret = vmlogrdr_recording(priv,1,0);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
else
return count;
}
static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
char *buf)
{
static const char cp_command[] = "QUERY RECORDING ";
int len;
cpcmd(cp_command, buf, 4096, NULL);
len = strlen(buf);
return len;
}
static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
NULL);
static struct attribute *vmlogrdr_drv_attrs[] = {
&driver_attr_recording_status.attr,
NULL,
};
static struct attribute_group vmlogrdr_drv_attr_group = {
.attrs = vmlogrdr_drv_attrs,
};
static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
&vmlogrdr_drv_attr_group,
NULL,
};
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
&dev_attr_purge.attr,
&dev_attr_autorecording.attr,
&dev_attr_recording.attr,
NULL,
};
static struct attribute_group vmlogrdr_attr_group = {
.attrs = vmlogrdr_attrs,
};
static const struct attribute_group *vmlogrdr_attr_groups[] = {
&vmlogrdr_attr_group,
NULL,
};
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
spin_lock_bh(&priv->priv_lock);
if (priv->dev_in_use)
rc = -EBUSY;
spin_unlock_bh(&priv->priv_lock);
}
if (rc)
pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
dev_name(dev));
return rc;
}
static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
.groups = vmlogrdr_drv_attr_groups,
};
static int vmlogrdr_register_driver(void)
{
int ret;
/* Register with iucv driver */
ret = iucv_register(&vmlogrdr_iucv_handler, 1);
if (ret)
goto out;
ret = driver_register(&vmlogrdr_driver);
if (ret)
goto out_iucv;
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
goto out_driver;
}
return 0;
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
iucv_unregister(&vmlogrdr_iucv_handler, 1);
out:
return ret;
}
static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
{
struct device *dev;
int ret;
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
dev_set_name(dev, "%s", priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
dev->groups = vmlogrdr_attr_groups;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
* free the struct. Therefore, we specify kfree()
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
} else
return -ENOMEM;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
device_unregister(dev);
return ret;
}
priv->device = dev;
return 0;
}
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
}
return 0;
}
static int vmlogrdr_register_cdev(dev_t dev)
{
int rc = 0;
vmlogrdr_cdev = cdev_alloc();
if (!vmlogrdr_cdev) {
return -ENOMEM;
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
vmlogrdr_cdev->dev = dev;
rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
if (!rc)
return 0;
// cleanup: cdev is not fully registered, no cdev_del here!
kobject_put(&vmlogrdr_cdev->kobj);
vmlogrdr_cdev=NULL;
return rc;
}
static void vmlogrdr_cleanup(void)
{
int i;
if (vmlogrdr_cdev) {
cdev_del(vmlogrdr_cdev);
vmlogrdr_cdev=NULL;
}
for (i=0; i < MAXMINOR; ++i ) {
vmlogrdr_unregister_device(&sys_ser[i]);
free_page((unsigned long)sys_ser[i].buffer);
}
vmlogrdr_unregister_driver();
if (vmlogrdr_major) {
unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
vmlogrdr_major=0;
}
}
static int __init vmlogrdr_init(void)
{
int rc;
int i;
dev_t dev;
if (! MACHINE_IS_VM) {
pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
recording_class_AB = vmlogrdr_get_recording_class_AB();
rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
if (rc)
return rc;
vmlogrdr_major = MAJOR(dev);
rc=vmlogrdr_register_driver();
if (rc)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
}
sys_ser[i].current_position = sys_ser[i].buffer;
rc=vmlogrdr_register_device(&sys_ser[i]);
if (rc)
break;
}
if (rc)
goto cleanup;
rc = vmlogrdr_register_cdev(dev);
if (rc)
goto cleanup;
return 0;
cleanup:
vmlogrdr_cleanup();
return rc;
}
static void __exit vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
return;
}
module_init(vmlogrdr_init);
module_exit(vmlogrdr_exit);

1070
drivers/s390/char/vmur.c Normal file

File diff suppressed because it is too large Load diff

110
drivers/s390/char/vmur.h Normal file
View file

@ -0,0 +1,110 @@
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
*
* Copyright IBM Corp. 2001, 2007
* Authors: Malcolm Beattie <beattiem@uk.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Frank Munzert <munzert@de.ibm.com>
*/
#ifndef _VMUR_H_
#define _VMUR_H_
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
* we only support z/VM's default unit record devices:
* both in SPOOL directory control statement and in CP DEFINE statement
* RDR defaults to 2540 reader
* PUN defaults to 2540 punch
* PRT defaults to 1403 printer
*/
#define READER_PUNCH_DEVTYPE 0x2540
#define PRINTER_DEVTYPE 0x1403
/* z/VM spool file control block SFBLOK */
struct file_control_block {
char reserved_1[8];
char user_owner[8];
char user_orig[8];
__s32 data_recs;
__s16 rec_len;
__s16 file_num;
__u8 file_stat;
__u8 dev_type;
char reserved_2[6];
char file_name[12];
char file_type[12];
char create_date[8];
char create_time[8];
char reserved_3[6];
__u8 file_class;
__u8 sfb_lok;
__u64 distr_code;
__u32 reserved_4;
__u8 current_starting_copy_number;
__u8 sfblock_cntrl_flags;
__u8 reserved_5;
__u8 more_status_flags;
char rest[200];
} __attribute__ ((packed));
#define FLG_SYSTEM_HOLD 0x04
#define FLG_CP_DUMP 0x10
#define FLG_USER_HOLD 0x20
#define FLG_IN_USE 0x80
/*
* A struct urdev is created for each ur device that is made available
* via the ccw_device driver model.
*/
struct urdev {
struct ccw_device *cdev; /* Backpointer to ccw device */
struct mutex io_mutex; /* Serialises device IO */
struct completion *io_done; /* do_ur_io waits; irq completes */
struct device *device;
struct cdev *char_device;
struct ccw_dev_id dev_id; /* device id */
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
atomic_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
};
/*
* A struct urfile is allocated at open() time for each device and
* freed on release().
*/
struct urfile {
struct urdev *urd;
unsigned int flags;
size_t dev_reclen;
__u16 file_reclen;
};
/*
* Device major/minor definitions.
*/
#define UR_MAJOR 0 /* get dynamic major */
/*
* We map minor numbers directly to device numbers (0-FFFF) for simplicity.
* This avoids having to allocate (and manage) slot numbers.
*/
#define NUM_MINORS 65536
/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
#define MAX_RECS_PER_IO 511
#define WRITE_CCW_CMD 0x01
#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
#define CCWDEV_CU_DI(cutype, di) \
CCW_DEVICE(cutype, 0x00), .driver_info = (di)
#define FILE_RECLEN_OFFSET 4064 /* reclen offset in spool data block */
#endif /* _VMUR_H_ */

782
drivers/s390/char/zcore.c Normal file
View file

@ -0,0 +1,782 @@
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "zdump"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/switch_to.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
#define TO_USER 1
#define TO_KERNEL 0
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
};
/* dump system info */
struct sys_info {
enum arch_id arch;
unsigned long sa_base;
u32 sa_size;
int cpu_map[NR_CPUS];
unsigned long mem_size;
struct save_area lc_mask;
};
struct ipib_info {
unsigned long ipib;
u32 checksum;
} __attribute__((packed));
static struct sys_info sys_info;
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *ipl_block;
/*
* Copy memory from HSA to kernel or user memory (not reentrant):
*
* @dest: Kernel or user buffer where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
* @mode: Either TO_KERNEL or TO_USER
*/
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
{
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
if (!hsa_available)
return -ENODATA;
if (count == 0)
return 0;
/* copy first block */
offs = 0;
if ((src % PAGE_SIZE) != 0) {
blk_num = src / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest,
buf + (src % PAGE_SIZE), offs))
return -EFAULT;
} else
memcpy(dest, buf + (src % PAGE_SIZE), offs);
}
if (offs == count)
goto out;
/* copy middle */
for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs,
buf, PAGE_SIZE))
return -EFAULT;
} else
memcpy(dest + offs, buf, PAGE_SIZE);
}
if (offs == count)
goto out;
/* copy last block */
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs, buf,
count - offs))
return -EFAULT;
} else
memcpy(dest + offs, buf, count - offs);
out:
return 0;
}
static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
{
return memcpy_hsa((void __force *) dest, src, count, TO_USER);
}
static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
{
return memcpy_hsa(dest, src, count, TO_KERNEL);
}
static int __init init_cpu_info(enum arch_id arch)
{
struct save_area_ext *sa_ext;
/* get info for boot cpu from lowcore, stored in the HSA */
sa_ext = dump_save_area_create(0);
if (!sa_ext)
return -ENOMEM;
if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
sys_info.sa_size) < 0) {
TRACE("could not copy from HSA\n");
kfree(sa_ext);
return -EIO;
}
if (MACHINE_HAS_VX)
save_vx_regs_safe(sa_ext->vx_regs);
return 0;
}
static DEFINE_MUTEX(zcore_mutex);
#define DUMP_VERSION 0x5
#define DUMP_MAGIC 0xa8190173618f23fdULL
#define DUMP_ARCH_S390X 2
#define DUMP_ARCH_S390 1
#define HEADER_SIZE 4096
/* dump header dumped according to s390 crash dump format */
struct zcore_header {
u64 magic;
u32 version;
u32 header_size;
u32 dump_level;
u32 page_size;
u64 mem_size;
u64 mem_start;
u64 mem_end;
u32 num_pages;
u32 pad1;
u64 tod;
struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;
u64 rmem_size;
u8 mvdump;
u16 cpu_cnt;
u16 real_cpu_cnt;
u8 end_pad1[0x200-0x061];
u64 mvdump_sign;
u64 mvdump_zipl_time;
u8 end_pad2[0x800-0x210];
u32 lc_vec[512];
} __attribute__((packed,__aligned__(16)));
static struct zcore_header zcore_header = {
.magic = DUMP_MAGIC,
.version = DUMP_VERSION,
.header_size = 4096,
.dump_level = 0,
.page_size = PAGE_SIZE,
.mem_start = 0,
#ifdef CONFIG_64BIT
.build_arch = DUMP_ARCH_S390X,
#else
.build_arch = DUMP_ARCH_S390,
#endif
};
/*
* Copy lowcore info to buffer. Use map in order to copy only register parts.
*
* @buf: User buffer
* @sa: Pointer to save area
* @sa_off: Offset in save area to copy
* @len: Number of bytes to copy
*/
static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
{
int i;
char *lc_mask = (char*)&sys_info.lc_mask;
for (i = 0; i < len; i++) {
if (!lc_mask[i + sa_off])
continue;
if (copy_to_user(buf + i, sa + sa_off + i, 1))
return -EFAULT;
}
return 0;
}
/*
* Copy lowcores info to memory, if necessary
*
* @buf: User buffer
* @addr: Start address of buffer in dump memory
* @count: Size of buffer
*/
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
int i;
if (count == 0)
return 0;
end = start + count;
for (i = 0; i < dump_save_areas.count; i++) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
struct save_area *save_area = &dump_save_areas.areas[i]->sa;
prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
continue;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
buf_off = cp_start - start;
sa_off = cp_start - sa_start;
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
if (copy_lc(buf + buf_off, save_area, sa_off, len))
return -EFAULT;
}
return 0;
}
/*
* Release the HSA
*/
static void release_hsa(void)
{
diag308(DIAG308_REL_HSA, NULL);
hsa_available = 0;
}
/*
* Read routine for zcore character device
* First 4K are dump header
* Next 32MB are HSA Memory
* Rest is read from absolute Memory
*/
static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned long mem_start; /* Start address in memory */
size_t mem_offs; /* Offset in dump memory */
size_t hdr_count; /* Size of header part of output buffer */
size_t size;
int rc;
mutex_lock(&zcore_mutex);
if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
rc = -EINVAL;
goto fail;
}
count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
/* Copy dump header */
if (*ppos < HEADER_SIZE) {
size = min(count, (size_t) (HEADER_SIZE - *ppos));
if (copy_to_user(buf, &zcore_header + *ppos, size)) {
rc = -EFAULT;
goto fail;
}
hdr_count = size;
mem_start = 0;
} else {
hdr_count = 0;
mem_start = *ppos - HEADER_SIZE;
}
mem_offs = 0;
/* Copy from HSA data */
if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) {
size = min((count - hdr_count),
(size_t) (sclp_get_hsa_size() - mem_start));
rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
if (rc)
goto fail;
mem_offs += size;
}
/* Copy from real mem */
size = count - mem_offs - hdr_count;
rc = copy_to_user_real(buf + hdr_count + mem_offs,
(void *) mem_start + mem_offs, size);
if (rc)
goto fail;
/*
* Since s390 dump analysis tools like lcrash or crash
* expect register sets in the prefix pages of the cpus,
* we copy them into the read buffer, if necessary.
* buf + hdr_count: Start of memory part of output buffer
* mem_start: Start memory address to copy from
* count - hdr_count: Size of memory area to copy
*/
if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
rc = -EFAULT;
goto fail;
}
*ppos += count;
fail:
mutex_unlock(&zcore_mutex);
return (rc < 0) ? rc : count;
}
static int zcore_open(struct inode *inode, struct file *filp)
{
if (!hsa_available)
return -ENODATA;
else
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
static int zcore_release(struct inode *inode, struct file *filep)
{
if (hsa_available)
release_hsa();
return 0;
}
static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
{
loff_t rc;
mutex_lock(&zcore_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
rc = file->f_pos;
break;
case 1:
file->f_pos += offset;
rc = file->f_pos;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&zcore_mutex);
return rc;
}
static const struct file_operations zcore_fops = {
.owner = THIS_MODULE,
.llseek = zcore_lseek,
.read = zcore_read,
.open = zcore_open,
.release = zcore_release,
};
static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, filp->private_data,
memblock.memory.cnt * CHUNK_INFO_SIZE);
}
static int zcore_memmap_open(struct inode *inode, struct file *filp)
{
struct memblock_region *reg;
char *buf;
int i = 0;
buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) {
return -ENOMEM;
}
for_each_memblock(memory, reg) {
sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
(unsigned long long) reg->base,
(unsigned long long) reg->size);
}
filp->private_data = buf;
return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations zcore_memmap_fops = {
.owner = THIS_MODULE,
.read = zcore_memmap_read,
.open = zcore_memmap_open,
.release = zcore_memmap_release,
.llseek = no_llseek,
};
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
if (ipl_block) {
diag308(DIAG308_SET, ipl_block);
diag308(DIAG308_IPL, NULL);
}
return count;
}
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations zcore_reipl_fops = {
.owner = THIS_MODULE,
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
.llseek = no_llseek,
};
static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
static char str[18];
if (hsa_available)
snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size());
else
snprintf(str, sizeof(str), "0\n");
return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
}
static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
char value;
if (*ppos != 0)
return -EPIPE;
if (copy_from_user(&value, buf, 1))
return -EFAULT;
if (value != '0')
return -EINVAL;
release_hsa();
return count;
}
static const struct file_operations zcore_hsa_fops = {
.owner = THIS_MODULE,
.write = zcore_hsa_write,
.read = zcore_hsa_read,
.open = nonseekable_open,
.llseek = no_llseek,
};
#ifdef CONFIG_32BIT
static void __init set_lc_mask(struct save_area *map)
{
memset(&map->ext_save, 0xff, sizeof(map->ext_save));
memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
#else /* CONFIG_32BIT */
static void __init set_lc_mask(struct save_area *map)
{
memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
#endif /* CONFIG_32BIT */
/*
* Initialize dump globals for a given architecture
*/
static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
{
int rc;
switch (arch) {
case ARCH_S390X:
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
break;
case ARCH_S390:
pr_alert("DETECTED 'S390 (32 bit) OS'\n");
break;
default:
pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL;
}
sys_info.sa_base = SAVE_AREA_BASE;
sys_info.sa_size = sizeof(struct save_area);
sys_info.arch = arch;
set_lc_mask(&sys_info.lc_mask);
rc = init_cpu_info(arch);
if (rc)
return rc;
sys_info.mem_size = mem_end;
return 0;
}
static int __init check_sdias(void)
{
if (!sclp_get_hsa_size()) {
TRACE("Could not determine HSA size\n");
return -ENODEV;
}
return 0;
}
static int __init get_mem_info(unsigned long *mem, unsigned long *end)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
*mem += reg->size;
*end = max_t(unsigned long, *end, reg->base + reg->size);
}
return 0;
}
static void __init zcore_header_init(int arch, struct zcore_header *hdr,
unsigned long mem_size)
{
u32 prefix;
int i;
if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X;
else
hdr->arch_id = DUMP_ARCH_S390;
hdr->mem_size = mem_size;
hdr->rmem_size = mem_size;
hdr->mem_end = sys_info.mem_size;
hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; i < dump_save_areas.count; i++) {
prefix = dump_save_areas.areas[i]->sa.pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
hdr->lc_vec[hdr->cpu_cnt] = prefix;
hdr->cpu_cnt++;
}
}
/*
* Provide IPL parameter information block from either HSA or memory
* for future reipl
*/
static int __init zcore_reipl_init(void)
{
struct ipib_info ipib_info;
int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
if (rc)
return rc;
if (ipib_info.ipib == 0)
return 0;
ipl_block = (void *) __get_free_page(GFP_KERNEL);
if (!ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp_get_hsa_size())
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
ipl_block = NULL;
}
return 0;
}
static int __init zcore_init(void)
{
unsigned long mem_size, mem_end;
unsigned char arch;
int rc;
mem_size = mem_end = 0;
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return -ENODATA;
if (OLDMEM_BASE)
return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
debug_register_view(zcore_dbf, &debug_sprintf_view);
debug_set_level(zcore_dbf, 6);
TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
rc = sclp_sdias_init();
if (rc)
goto fail;
rc = check_sdias();
if (rc)
goto fail;
hsa_available = 1;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
goto fail;
#ifdef CONFIG_64BIT
if (arch == ARCH_S390) {
pr_alert("The 64-bit dump tool cannot be used for a "
"32-bit system\n");
rc = -EINVAL;
goto fail;
}
#else /* CONFIG_64BIT */
if (arch == ARCH_S390X) {
pr_alert("The 32-bit dump tool cannot be used for a "
"64-bit system\n");
rc = -EINVAL;
goto fail;
}
#endif /* CONFIG_64BIT */
rc = get_mem_info(&mem_size, &mem_end);
if (rc)
goto fail;
rc = sys_info_init(arch, mem_end);
if (rc)
goto fail;
zcore_header_init(arch, &zcore_header, mem_size);
rc = zcore_reipl_init();
if (rc)
goto fail;
zcore_dir = debugfs_create_dir("zcore" , NULL);
if (!zcore_dir) {
rc = -ENOMEM;
goto fail;
}
zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
&zcore_fops);
if (!zcore_file) {
rc = -ENOMEM;
goto fail_dir;
}
zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
NULL, &zcore_memmap_fops);
if (!zcore_memmap_file) {
rc = -ENOMEM;
goto fail_file;
}
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
if (!zcore_reipl_file) {
rc = -ENOMEM;
goto fail_memmap_file;
}
zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
NULL, &zcore_hsa_fops);
if (!zcore_hsa_file) {
rc = -ENOMEM;
goto fail_reipl_file;
}
return 0;
fail_reipl_file:
debugfs_remove(zcore_reipl_file);
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_file:
debugfs_remove(zcore_file);
fail_dir:
debugfs_remove(zcore_dir);
fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
static void __exit zcore_exit(void)
{
debug_unregister(zcore_dbf);
sclp_sdias_exit();
free_page((unsigned long) ipl_block);
debugfs_remove(zcore_hsa_file);
debugfs_remove(zcore_reipl_file);
debugfs_remove(zcore_memmap_file);
debugfs_remove(zcore_file);
debugfs_remove(zcore_dir);
diag308(DIAG308_REL_HSA, NULL);
}
MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
MODULE_DESCRIPTION("zcore module for zfcpdump support");
MODULE_LICENSE("GPL");
subsys_initcall(zcore_init);
module_exit(zcore_exit);

16
drivers/s390/cio/Makefile Normal file
View file

@ -0,0 +1,16 @@
#
# Makefile for the S/390 common i/o drivers
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_EADM_SCH) += eadm_sch.o
obj-$(CONFIG_SCM_BUS) += scm.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o

275
drivers/s390/cio/airq.c Normal file
View file

@ -0,0 +1,275 @@
/*
* Support for adapter interruptions
*
* Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
/**
* register_adapter_interrupt() - register adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
*
* Returns 0 on success, or -EINVAL.
*/
int register_adapter_interrupt(struct airq_struct *airq)
{
char dbf_txt[32];
if (!airq->handler || airq->isc > MAX_ISC)
return -EINVAL;
if (!airq->lsi_ptr) {
airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
if (!airq->lsi_ptr)
return -ENOMEM;
airq->flags |= AIRQ_PTR_ALLOCATED;
}
if (!airq->lsi_mask)
airq->lsi_mask = 0xff;
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
isc_register(airq->isc);
spin_lock(&airq_lists_lock);
hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
spin_unlock(&airq_lists_lock);
return 0;
}
EXPORT_SYMBOL(register_adapter_interrupt);
/**
* unregister_adapter_interrupt - unregister adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
*/
void unregister_adapter_interrupt(struct airq_struct *airq)
{
char dbf_txt[32];
if (hlist_unhashed(&airq->list))
return;
snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
spin_lock(&airq_lists_lock);
hlist_del_rcu(&airq->list);
spin_unlock(&airq_lists_lock);
synchronize_rcu();
isc_unregister(airq->isc);
if (airq->flags & AIRQ_PTR_ALLOCATED) {
kfree(airq->lsi_ptr);
airq->lsi_ptr = NULL;
airq->flags &= ~AIRQ_PTR_ALLOCATED;
}
}
EXPORT_SYMBOL(unregister_adapter_interrupt);
static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{
struct tpi_info *tpi_info;
struct airq_struct *airq;
struct hlist_head *head;
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
airq->handler(airq);
rcu_read_unlock();
return IRQ_HANDLED;
}
static struct irqaction airq_interrupt = {
.name = "AIO",
.handler = do_airq_interrupt,
};
void __init init_airq_interrupts(void)
{
irq_set_chip_and_handler(THIN_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
* @flags: allocation flags
*
* Returns a pointer to an interrupt vector structure
*/
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
{
struct airq_iv *iv;
unsigned long size;
iv = kzalloc(sizeof(*iv), GFP_KERNEL);
if (!iv)
goto out;
iv->bits = bits;
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
iv->vector = kzalloc(size, GFP_KERNEL);
if (!iv->vector)
goto out_free;
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
goto out_free;
memset(iv->avail, 0xff, size);
iv->end = 0;
} else
iv->end = bits;
if (flags & AIRQ_IV_BITLOCK) {
iv->bitlock = kzalloc(size, GFP_KERNEL);
if (!iv->bitlock)
goto out_free;
}
if (flags & AIRQ_IV_PTR) {
size = bits * sizeof(unsigned long);
iv->ptr = kzalloc(size, GFP_KERNEL);
if (!iv->ptr)
goto out_free;
}
if (flags & AIRQ_IV_DATA) {
size = bits * sizeof(unsigned int);
iv->data = kzalloc(size, GFP_KERNEL);
if (!iv->data)
goto out_free;
}
spin_lock_init(&iv->lock);
return iv;
out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
kfree(iv->vector);
kfree(iv);
out:
return NULL;
}
EXPORT_SYMBOL(airq_iv_create);
/**
* airq_iv_release - release an interrupt vector
* @iv: pointer to interrupt vector structure
*/
void airq_iv_release(struct airq_iv *iv)
{
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->vector);
kfree(iv->avail);
kfree(iv);
}
EXPORT_SYMBOL(airq_iv_release);
/**
* airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure
* @num: number of consecutive irq bits to allocate
*
* Returns the bit number of the first irq in the allocated block of irqs,
* or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
* specified
*/
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
unsigned long bit, i, flags;
if (!iv->avail || num == 0)
return -1UL;
spin_lock_irqsave(&iv->lock, flags);
bit = find_first_bit_inv(iv->avail, iv->bits);
while (bit + num <= iv->bits) {
for (i = 1; i < num; i++)
if (!test_bit_inv(bit + i, iv->avail))
break;
if (i >= num) {
/* Found a suitable block of irqs */
for (i = 0; i < num; i++)
clear_bit_inv(bit + i, iv->avail);
if (bit + num >= iv->end)
iv->end = bit + num + 1;
break;
}
bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
}
if (bit + num > iv->bits)
bit = -1UL;
spin_unlock_irqrestore(&iv->lock, flags);
return bit;
}
EXPORT_SYMBOL(airq_iv_alloc);
/**
* airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure
* @bit: number of the first irq bit to free
* @num: number of consecutive irq bits to free
*/
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
unsigned long i, flags;
if (!iv->avail || num == 0)
return;
spin_lock_irqsave(&iv->lock, flags);
for (i = 0; i < num; i++) {
/* Clear (possibly left over) interrupt bit */
clear_bit_inv(bit + i, iv->vector);
/* Make the bit positions available again */
set_bit_inv(bit + i, iv->avail);
}
if (bit + num >= iv->end) {
/* Find new end of bit-field */
while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
iv->end--;
}
spin_unlock_irqrestore(&iv->lock, flags);
}
EXPORT_SYMBOL(airq_iv_free);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
* @iv: pointer to interrupt vector structure
* @start: bit number to start the search
* @end: bit number to end the search
*
* Returns the bit number of the next non-zero interrupt bit, or
* -1UL if the scan completed without finding any more any non-zero bits.
*/
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
bit = find_next_bit_inv(iv->vector, end, start);
if (bit >= end)
return -1UL;
clear_bit_inv(bit, iv->vector);
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);

View file

@ -0,0 +1,421 @@
/*
* S/390 common I/O routines -- blacklisting of specific devices
*
* Copyright IBM Corp. 1999, 2013
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ipl.h>
#include "blacklist.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
/*
* "Blacklisting" of certain devices:
* Device numbers given in the commandline as cio_ignore=... won't be known
* to Linux.
*
* These can be single devices or ranges of devices
*/
/* 65536 bits for each set to indicate if a devno is blacklisted or not */
#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
* Function: blacklist_range
* (Un-)blacklist the devices from-to
*/
static int blacklist_range(range_action action, unsigned int from_ssid,
unsigned int to_ssid, unsigned int from,
unsigned int to, int msgtrigger)
{
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger)
pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
"range for cio_ignore\n", from_ssid, from,
to_ssid, to);
return 1;
}
while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
(from <= to))) {
if (action == add)
set_bit(from, bl_dev[from_ssid]);
else
clear_bit(from, bl_dev[from_ssid]);
from++;
if (from > __MAX_SUBCHANNEL) {
from_ssid++;
from = 0;
}
}
return 0;
}
static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
diff = 0;
*val = 0;
while (diff <= max_digit) {
int value = hex_to_bin(**cp);
if (value < 0)
break;
*val = *val * 16 + value;
(*cp)++;
diff++;
}
if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
return 1;
return 0;
}
static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
unsigned int *devno, int msgtrigger)
{
char *str_work;
int val, rc, ret;
rc = 1;
if (*str == '\0')
goto out;
/* old style */
str_work = str;
val = simple_strtoul(str, &str_work, 16);
if (*str_work == '\0') {
if (val <= __MAX_SUBCHANNEL) {
*devno = val;
*ssid = 0;
*cssid = 0;
rc = 0;
}
goto out;
}
/* new style */
str_work = str;
ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
if (ret || (str_work[0] != '\0'))
goto out;
rc = 0;
out:
if (rc && msgtrigger)
pr_warning("%s is not a valid device for the cio_ignore "
"kernel parameter\n", str);
return rc;
}
static int blacklist_parse_parameters(char *str, range_action action,
int msgtrigger)
{
unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
int rc, totalrc;
char *parm;
range_action ra;
totalrc = 0;
while ((parm = strsep(&str, ","))) {
rc = 0;
ra = action;
if (*parm == '!') {
if (ra == add)
ra = free;
else
ra = add;
parm++;
}
if (strcmp(parm, "all") == 0) {
from_cssid = 0;
from_ssid = 0;
from = 0;
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
} else if (strcmp(parm, "ipldev") == 0) {
if (ipl_info.type == IPL_TYPE_CCW) {
from_cssid = 0;
from_ssid = ipl_info.data.ccw.dev_id.ssid;
from = ipl_info.data.ccw.dev_id.devno;
} else if (ipl_info.type == IPL_TYPE_FCP ||
ipl_info.type == IPL_TYPE_FCP_DUMP) {
from_cssid = 0;
from_ssid = ipl_info.data.fcp.dev_id.ssid;
from = ipl_info.data.fcp.dev_id.devno;
} else {
continue;
}
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
} else if (strcmp(parm, "condev") == 0) {
if (console_devno == -1)
continue;
from_cssid = to_cssid = 0;
from_ssid = to_ssid = 0;
from = to = console_devno;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
if (!rc) {
if (parm != NULL)
rc = parse_busid(parm, &to_cssid,
&to_ssid, &to,
msgtrigger);
else {
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
}
}
}
if (!rc) {
rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
msgtrigger);
if (rc)
totalrc = -EINVAL;
} else
totalrc = -EINVAL;
}
return totalrc;
}
static int __init
blacklist_setup (char *str)
{
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
if (blacklist_parse_parameters(str, add, 1))
return 0;
return 1;
}
__setup ("cio_ignore=", blacklist_setup);
/* Checking if devices are blacklisted */
/*
* Function: is_blacklisted
* Returns 1 if the given devicenumber can be found in the blacklist,
* otherwise 0.
* Used by validate_subchannel()
*/
int
is_blacklisted (int ssid, int devno)
{
return test_bit (devno, bl_dev[ssid]);
}
#ifdef CONFIG_PROC_FS
/*
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
*/
static int blacklist_parse_proc_parameters(char *buf)
{
int rc;
char *parm;
parm = strsep(&buf, " ");
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
css_schedule_eval_all_unreg(0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
return rc;
}
/* Iterator struct for all devices. */
struct ccwdev_iter {
int devno;
int ssid;
int in_range;
};
static void *
cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
struct ccwdev_iter *iter = s->private;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
memset(iter, 0, sizeof(*iter));
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
return iter;
}
static void
cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
{
}
static void *
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
{
struct ccwdev_iter *iter;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
iter = it;
if (iter->devno == __MAX_SUBCHANNEL) {
iter->devno = 0;
iter->ssid++;
if (iter->ssid > __MAX_SSID)
return NULL;
} else
iter->devno++;
(*offset)++;
return iter;
}
static int
cio_ignore_proc_seq_show(struct seq_file *s, void *it)
{
struct ccwdev_iter *iter;
iter = it;
if (!is_blacklisted(iter->ssid, iter->devno))
/* Not blacklisted, nothing to output. */
return 0;
if (!iter->in_range) {
/* First device in range. */
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1))
/* Singular device. */
return seq_printf(s, "0.%x.%04x\n",
iter->ssid, iter->devno);
iter->in_range = 1;
return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
}
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Last device in range. */
iter->in_range = 0;
return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
}
return 0;
}
static ssize_t
cio_ignore_write(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char *buf;
ssize_t rc, ret, i;
if (*offset)
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
buf = vzalloc(user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
goto out_free;
}
i = user_len - 1;
while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
buf[i] = '\0';
i--;
}
ret = blacklist_parse_proc_parameters(buf);
if (ret)
rc = ret;
else
rc = user_len;
out_free:
vfree (buf);
return rc;
}
static const struct seq_operations cio_ignore_proc_seq_ops = {
.start = cio_ignore_proc_seq_start,
.stop = cio_ignore_proc_seq_stop,
.next = cio_ignore_proc_seq_next,
.show = cio_ignore_proc_seq_show,
};
static int
cio_ignore_proc_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &cio_ignore_proc_seq_ops,
sizeof(struct ccwdev_iter));
}
static const struct file_operations cio_ignore_proc_fops = {
.open = cio_ignore_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.write = cio_ignore_write,
};
static int
cio_ignore_proc_init (void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&cio_ignore_proc_fops);
if (!entry)
return -ENOENT;
return 0;
}
__initcall (cio_ignore_proc_init);
#endif /* CONFIG_PROC_FS */

View file

@ -0,0 +1,6 @@
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
extern int is_blacklisted (int ssid, int devno);
#endif

641
drivers/s390/cio/ccwgroup.c Normal file
View file

@ -0,0 +1,641 @@
/*
* bus driver for ccwgroup
*
* Copyright IBM Corp. 2002, 2012
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include "device.h"
#define CCW_BUS_ID_SIZE 10
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
static struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/*
* Remove references from ccw devices to ccw group device and from
* ccw group device to ccw devices.
*/
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
{
struct ccw_device *cdev;
int i;
for (i = 0; i < gdev->count; i++) {
cdev = gdev->cdev[i];
if (!cdev)
continue;
spin_lock_irq(cdev->ccwlock);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irq(cdev->ccwlock);
gdev->cdev[i] = NULL;
put_device(&cdev->dev);
}
}
/**
* ccwgroup_set_online() - enable a ccwgroup device
* @gdev: target ccwgroup device
*
* This function attempts to put the ccwgroup device into the online state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE)
goto out;
if (gdrv->set_online)
ret = gdrv->set_online(gdev);
if (ret)
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_online);
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
if (gdrv->set_offline)
ret = gdrv->set_offline(gdev);
if (ret)
goto out;
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_offline);
static ssize_t ccwgroup_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
unsigned long value;
int ret;
device_lock(dev);
if (!dev->driver) {
ret = -EINVAL;
goto out;
}
ret = kstrtoul(buf, 0, &value);
if (ret)
goto out;
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
device_unlock(dev);
return (ret == 0) ? count : ret;
}
static ssize_t ccwgroup_online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
int online;
online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
static ssize_t ccwgroup_ungroup_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
int rc = 0;
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state != CCWGROUP_OFFLINE) {
rc = -EINVAL;
goto out;
}
if (device_remove_file_self(dev, attr))
ccwgroup_ungroup(gdev);
else
rc = -ENODEV;
out:
if (rc) {
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static struct attribute *ccwgroup_attrs[] = {
&dev_attr_online.attr,
&dev_attr_ungroup.attr,
NULL,
};
static struct attribute_group ccwgroup_attr_group = {
.attrs = ccwgroup_attrs,
};
static const struct attribute_group *ccwgroup_attr_groups[] = {
&ccwgroup_attr_group,
NULL,
};
static void ccwgroup_ungroup_workfn(struct work_struct *work)
{
struct ccwgroup_device *gdev =
container_of(work, struct ccwgroup_device, ungroup_work);
ccwgroup_ungroup(gdev);
put_device(&gdev->dev);
}
static void ccwgroup_release(struct device *dev)
{
kfree(to_ccwgroupdev(dev));
}
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
&gdev->dev.kobj, "group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj,
&gdev->cdev[i]->dev.kobj, str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
static int __get_next_id(const char **buf, struct ccw_dev_id *id)
{
unsigned int cssid, ssid, devno;
int ret = 0, len;
char *start, *end;
start = (char *)*buf;
end = strchr(start, ',');
if (!end) {
/* Last entry. Strip trailing newline, if applicable. */
end = strchr(start, '\n');
if (end)
*end = '\0';
len = strlen(start) + 1;
} else {
len = end - start + 1;
end++;
}
if (len <= CCW_BUS_ID_SIZE) {
if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
ret = -EINVAL;
} else
ret = -EINVAL;
if (!ret) {
id->ssid = ssid;
id->devno = devno;
}
*buf = end;
return ret;
}
/**
* ccwgroup_create_dev() - create and register a ccw group device
* @parent: parent device for the new device
* @gdrv: driver for the new group device
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
* Create and register a new ccw group device as a child of @parent. Slave
* devices are obtained from the list of bus ids given in @buf.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf)
{
struct ccwgroup_device *gdev;
struct ccw_dev_id dev_id;
int rc, i;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = parent;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
for (i = 0; i < num_devices && buf; i++) {
rc = __get_next_id(&buf, &dev_id);
if (rc != 0)
goto error;
gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
if (i == num_devices && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
gdev->dev.groups = ccwgroup_attr_groups;
if (gdrv) {
gdev->dev.driver = &gdrv->driver;
rc = gdrv->setup ? gdrv->setup(gdev) : 0;
if (rc)
goto error;
}
rc = device_add(&gdev->dev);
if (rc)
goto error;
rc = __ccwgroup_create_symlinks(gdev);
if (rc) {
device_del(&gdev->dev);
goto error;
}
mutex_unlock(&gdev->reg_mutex);
return 0;
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
}
EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
if (action == BUS_NOTIFY_UNBIND_DRIVER) {
get_device(&gdev->dev);
schedule_work(&gdev->ungroup_work);
}
return NOTIFY_OK;
}
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
};
static int __init init_ccwgroup(void)
{
int ret;
ret = bus_register(&ccwgroup_bus_type);
if (ret)
return ret;
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
if (ret)
bus_unregister(&ccwgroup_bus_type);
return ret;
}
static void __exit cleanup_ccwgroup(void)
{
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static int ccwgroup_remove(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return 0;
if (gdrv->remove)
gdrv->remove(gdev);
return 0;
}
static void ccwgroup_shutdown(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return;
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
static int ccwgroup_pm_prepare(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
/* Fail while device is being set online/offline. */
if (atomic_read(&gdev->onoff))
return -EAGAIN;
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->prepare ? gdrv->prepare(gdev) : 0;
}
static void ccwgroup_pm_complete(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return;
if (gdrv->complete)
gdrv->complete(gdev);
}
static int ccwgroup_pm_freeze(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->freeze ? gdrv->freeze(gdev) : 0;
}
static int ccwgroup_pm_thaw(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->thaw ? gdrv->thaw(gdev) : 0;
}
static int ccwgroup_pm_restore(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
.thaw = ccwgroup_pm_thaw,
.restore = ccwgroup_pm_restore,
};
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
*/
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
return driver_register(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_register);
static int __ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
ccwgroup_ungroup(gdev);
put_device(dev);
}
driver_unregister(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
* This is a dummy probe function for ccw devices that are slave devices in
* a ccw group device.
* Returns:
* always %0
*/
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
* @cdev: ccw device to be removed
*
* This is a remove function for ccw devices that are slave devices in a ccw
* group device. It sets the ccw device offline and also deregisters the
* embedding ccw group device.
*/
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
spin_lock_irq(cdev->ccwlock);
gdev = dev_get_drvdata(&cdev->dev);
if (!gdev) {
spin_unlock_irq(cdev->ccwlock);
return;
}
/* Get ccwgroup device reference for local processing. */
get_device(&gdev->dev);
spin_unlock_irq(cdev->ccwlock);
/* Unregister group device. */
ccwgroup_ungroup(gdev);
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
MODULE_LICENSE("GPL");

367
drivers/s390/cio/ccwreq.c Normal file
View file

@ -0,0 +1,367 @@
/*
* Handling of internal CCW device requests.
*
* Copyright IBM Corp. 2009, 2011
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/err.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "io_sch.h"
#include "cio.h"
#include "device.h"
#include "cio_debug.h"
/**
* lpm_adjust - adjust path mask
* @lpm: path mask to adjust
* @mask: mask of available paths
*
* Shift @lpm right until @lpm and @mask have at least one bit in common or
* until @lpm is zero. Return the resulting lpm.
*/
int lpm_adjust(int lpm, int mask)
{
while (lpm && ((lpm & mask) == 0))
lpm >>= 1;
return lpm;
}
/*
* Adjust path mask to use next path and reset retry count. Return resulting
* path mask.
*/
static u16 ccwreq_next_path(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
if (!req->singlepath) {
req->mask = 0;
goto out;
}
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask >> 1, req->lpm);
out:
return req->mask;
}
/*
* Clean up device state and report to callback.
*/
static void ccwreq_stop(struct ccw_device *cdev, int rc)
{
struct ccw_request *req = &cdev->private->req;
if (req->done)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
memset(&cdev->private->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
}
/*
* (Re-)Start the operation until retries and paths are exhausted.
*/
static void ccwreq_do(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw1 *cp = req->cp;
int rc = -EACCES;
while (req->mask) {
if (req->retries-- == 0) {
/* Retries exhausted, try next path. */
ccwreq_next_path(cdev);
continue;
}
/* Perform start function. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
ccw_device_set_timeout(cdev, req->timeout);
return;
}
if (rc == -ENODEV) {
/* Permanent device error. */
break;
}
if (rc == -EACCES) {
/* Permant path error. */
ccwreq_next_path(cdev);
continue;
}
/* Temporary improper status. */
rc = cio_clear(sch);
if (rc)
break;
return;
}
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_start - perform I/O request
* @cdev: ccw device
*
* Perform the I/O request specified by cdev->req.
*/
void ccw_request_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
if (req->singlepath) {
/* Try all paths twice to counter link flapping. */
req->mask = 0x8080;
} else
req->mask = req->lpm;
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0;
req->done = 0;
req->cancel = 0;
if (!req->mask)
goto out_nopath;
ccwreq_do(cdev);
return;
out_nopath:
ccwreq_stop(cdev, -EACCES);
}
/**
* ccw_request_cancel - cancel running I/O request
* @cdev: ccw device
*
* Cancel the I/O request specified by cdev->req. Return non-zero if request
* has already finished, zero otherwise.
*/
int ccw_request_cancel(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
int rc;
if (req->done)
return 1;
req->cancel = 1;
rc = cio_clear(sch);
if (rc)
ccwreq_stop(cdev, rc);
return 0;
}
/*
* Return the status of the internal I/O started on the specified ccw device.
* Perform BASIC SENSE if required.
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
struct irb *irb = &cdev->private->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
/* Perform BASIC SENSE if needed. */
if (ccw_device_accumulate_and_sense(cdev, lcirb))
return IO_RUNNING;
/* Check for halt/clear interrupt. */
if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
return IO_KILLED;
/* Check for path error. */
if (scsw->cc == 3 || scsw->pno)
return IO_PATH_ERROR;
/* Handle BASIC SENSE data. */
if (irb->esw.esw0.erw.cons) {
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
/* Ask the driver what to do */
if (cdev->drv && cdev->drv->uc_handler) {
todo = cdev->drv->uc_handler(cdev, lcirb);
CIO_TRACE_EVENT(2, "uc_response");
CIO_HEX_EVENT(2, &todo, sizeof(todo));
switch (todo) {
case UC_TODO_RETRY:
return IO_STATUS_ERROR;
case UC_TODO_RETRY_ON_NEW_PATH:
return IO_PATH_ERROR;
case UC_TODO_STOP:
return IO_REJECTED;
default:
return IO_STATUS_ERROR;
}
}
/* Assume that unexpected SENSE data implies an error. */
return IO_STATUS_ERROR;
}
/* Check for channel errors. */
if (scsw->cstat != 0)
return IO_STATUS_ERROR;
/* Check for device errors. */
if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
return IO_STATUS_ERROR;
/* Check for final state. */
if (!(scsw->dstat & DEV_STAT_DEV_END))
return IO_RUNNING;
/* Check for other improper status. */
if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
return IO_STATUS_ERROR;
return IO_DONE;
}
/*
* Log ccw request status.
*/
static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
{
struct ccw_request *req = &cdev->private->req;
struct {
struct ccw_dev_id dev_id;
u16 retries;
u8 lpm;
u8 status;
} __attribute__ ((packed)) data;
data.dev_id = cdev->private->dev_id;
data.retries = req->retries;
data.lpm = (u8) req->mask;
data.status = (u8) status;
CIO_TRACE_EVENT(2, "reqstat");
CIO_HEX_EVENT(2, &data, sizeof(data));
}
/**
* ccw_request_handler - interrupt handler for I/O request procedure.
* @cdev: ccw device
*
* Handle interrupt during I/O request procedure.
*/
void ccw_request_handler(struct ccw_device *cdev)
{
struct irb *irb = this_cpu_ptr(&cio_irb);
struct ccw_request *req = &cdev->private->req;
enum io_status status;
int rc = -EOPNOTSUPP;
/* Check status of I/O request. */
status = ccwreq_status(cdev, irb);
if (req->filter)
status = req->filter(cdev, req->data, irb, status);
if (status != IO_RUNNING)
ccw_device_set_timeout(cdev, 0);
if (status != IO_DONE && status != IO_RUNNING)
ccwreq_log_status(cdev, status);
switch (status) {
case IO_DONE:
break;
case IO_RUNNING:
return;
case IO_REJECTED:
goto err;
case IO_PATH_ERROR:
goto out_next_path;
case IO_STATUS_ERROR:
goto out_restart;
case IO_KILLED:
/* Check if request was cancelled on purpose. */
if (req->cancel) {
rc = -EIO;
goto err;
}
goto out_restart;
}
/* Check back with request initiator. */
if (!req->check)
goto out;
switch (req->check(cdev, req->data)) {
case 0:
break;
case -EAGAIN:
goto out_restart;
case -EACCES:
goto out_next_path;
default:
goto err;
}
out:
ccwreq_stop(cdev, 0);
return;
out_next_path:
/* Try next path and restart I/O. */
if (!ccwreq_next_path(cdev)) {
rc = -EACCES;
goto err;
}
out_restart:
/* Restart. */
ccwreq_do(cdev);
return;
err:
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_timeout - timeout handler for I/O request procedure
* @cdev: ccw device
*
* Handle timeout during I/O request procedure.
*/
void ccw_request_timeout(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
int rc = -ENODEV, chp;
if (cio_update_schib(sch))
goto err;
for (chp = 0; chp < 8; chp++) {
if ((0x80 >> chp) & sch->schib.pmcw.lpum)
pr_warning("%s: No interrupt was received within %lus "
"(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
dev_name(&cdev->dev), req->timeout / HZ,
scsw_cstat(&sch->schib.scsw),
scsw_dstat(&sch->schib.scsw),
sch->schid.cssid,
sch->schib.pmcw.chpid[chp]);
}
if (!ccwreq_next_path(cdev)) {
/* set the final return code for this request */
req->drc = -ETIME;
}
rc = cio_clear(sch);
if (rc)
goto err;
return;
err:
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_notoper - notoper handler for I/O request procedure
* @cdev: ccw device
*
* Handle notoper during I/O request procedure.
*/
void ccw_request_notoper(struct ccw_device *cdev)
{
ccwreq_stop(cdev, -ENODEV);
}

792
drivers/s390/cio/chp.c Normal file
View file

@ -0,0 +1,792 @@
/*
* Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/crw.h>
#include "cio.h"
#include "css.h"
#include "ioasm.h"
#include "cio_debug.h"
#include "chp.h"
#define to_channelpath(device) container_of(device, struct channel_path, dev)
#define CHP_INFO_UPDATE_INTERVAL 1*HZ
enum cfg_task_t {
cfg_none,
cfg_configure,
cfg_deconfigure
};
/* Map for pending configure tasks. */
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
static DEFINE_MUTEX(cfg_lock);
static int cfg_busy;
/* Map for channel-path status. */
static struct sclp_chp_info chp_info;
static DEFINE_MUTEX(info_lock);
/* Time after which channel-path status may be outdated. */
static unsigned long chp_info_expires;
/* Workqueue to perform pending configure tasks. */
static struct workqueue_struct *chp_wq;
static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static wait_queue_head_t cfg_wait_queue;
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
chpid_to_chp(chpid)->state = onoff;
}
/* On success return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */
int chp_get_status(struct chp_id chpid)
{
return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
}
/**
* chp_get_sch_opm - return opm for subchannel
* @sch: subchannel
*
* Calculate and return the operational path mask (opm) based on the chpids
* used by the subchannel and the status of the associated channel-paths.
*/
u8 chp_get_sch_opm(struct subchannel *sch)
{
struct chp_id chpid;
int opm;
int i;
opm = 0;
chp_id_init(&chpid);
for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i];
if (chp_get_status(chpid) != 0)
opm |= 1;
}
return opm;
}
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
/**
* chp_is_registered - check if a channel-path is registered
* @chpid: channel-path ID
*
* Return non-zero if a channel-path with the given chpid is registered,
* zero otherwise.
*/
int chp_is_registered(struct chp_id chpid)
{
return chpid_to_chp(chpid) != NULL;
}
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
*/
static int s390_vary_chpid(struct chp_id chpid, int on)
{
char dbf_text[15];
int status;
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
chpid.id);
CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
if (!on && !status)
return 0;
set_chp_logically_online(chpid, on);
chsc_chp_vary(chpid, on);
return 0;
}
/*
* Channel measurement related functions
*/
static ssize_t chp_measurement_chars_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct device *device;
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
if (!chp->cmg_chars)
return 0;
return memory_read_from_buffer(buf, count, &off,
chp->cmg_chars, sizeof(struct cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
.attr = {
.name = "measurement_chars",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_chars),
.read = chp_measurement_chars_read,
};
static void chp_measurement_copy_block(struct cmg_entry *buf,
struct channel_subsystem *css,
struct chp_id chpid)
{
void *area;
struct cmg_entry *entry, reference_buf;
int idx;
if (chpid.id < 128) {
area = css->cub_addr1;
idx = chpid.id;
} else {
area = css->cub_addr2;
idx = chpid.id - 128;
}
entry = area + (idx * sizeof(struct cmg_entry));
do {
memcpy(buf, entry, sizeof(*entry));
memcpy(&reference_buf, entry, sizeof(*entry));
} while (reference_buf.values[0] != buf->values[0]);
}
static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct channel_subsystem *css;
struct device *device;
unsigned int size;
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
css = to_css(chp->dev.parent);
size = sizeof(struct cmg_entry);
/* Only allow single reads. */
if (off || count < size)
return 0;
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
count = size;
return count;
}
static struct bin_attribute chp_measurement_attr = {
.attr = {
.name = "measurement",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_entry),
.read = chp_measurement_read,
};
void chp_remove_cmg_attr(struct channel_path *chp)
{
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
device_remove_bin_file(&chp->dev, &chp_measurement_attr);
}
int chp_add_cmg_attr(struct channel_path *chp)
{
int ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
if (ret)
return ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
if (ret)
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
return ret;
}
/*
* Files for the channel path entries.
*/
static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
int status;
mutex_lock(&chp->lock);
status = chp->state;
mutex_unlock(&chp->lock);
return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp = to_channelpath(dev);
char cmd[10];
int num_args;
int error;
num_args = sscanf(buf, "%5s", cmd);
if (!num_args)
return count;
if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
mutex_unlock(&cp->lock);
} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 0);
mutex_unlock(&cp->lock);
} else
error = -EINVAL;
return error < 0 ? error : count;
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
static ssize_t chp_configure_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *cp;
int status;
cp = to_channelpath(dev);
status = chp_info_get_status(cp->chpid);
if (status < 0)
return status;
return snprintf(buf, PAGE_SIZE, "%d\n", status);
}
static int cfg_wait_idle(void);
static ssize_t chp_configure_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp;
int val;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
cp = to_channelpath(dev);
chp_cfg_schedule(cp->chpid, val);
cfg_wait_idle();
return count;
}
static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
u8 type;
mutex_lock(&chp->lock);
type = chp->desc.desc;
mutex_unlock(&chp->lock);
return sprintf(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->cmg == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->cmg);
}
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
static ssize_t chp_shared_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->shared == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->shared);
}
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
ssize_t rc;
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
else
rc = 0;
mutex_unlock(&chp->lock);
return rc;
}
static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
static ssize_t chp_chid_external_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
ssize_t rc;
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
else
rc = 0;
mutex_unlock(&chp->lock);
return rc;
}
static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
&dev_attr_chid.attr,
&dev_attr_chid_external.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
static const struct attribute_group *chp_attr_groups[] = {
&chp_attr_group,
NULL,
};
static void chp_release(struct device *dev)
{
struct channel_path *cp;
cp = to_channelpath(dev);
kfree(cp);
}
/**
* chp_update_desc - update channel-path description
* @chp - channel-path
*
* Update the channel-path description of the specified channel-path.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
{
int rc;
rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
if (rc)
return rc;
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
return rc;
}
/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
*
* Create and register data structure representing new channel-path. Return
* zero on success, non-zero otherwise.
*/
int chp_new(struct chp_id chpid)
{
struct channel_path *chp;
int ret;
if (chp_is_registered(chpid))
return 0;
chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
if (!chp)
return -ENOMEM;
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
ret = -ENODEV;
goto out_free;
}
/* Get channel-measurement characteristics. */
if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
ret = chsc_get_channel_measurement_chars(chp);
if (ret)
goto out_free;
} else {
chp->cmg = -1;
}
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
put_device(&chp->dev);
goto out;
}
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
}
}
channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
out_free:
kfree(chp);
out:
return ret;
}
/**
* chp_get_chp_desc - return newly allocated channel-path description
* @chpid: channel-path ID
*
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
struct channel_path_desc *desc;
chp = chpid_to_chp(chpid);
if (!chp)
return NULL;
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
if (!desc)
return NULL;
mutex_lock(&chp->lock);
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
mutex_unlock(&chp->lock);
return desc;
}
/**
* chp_process_crw - process channel-path status change
* @crw0: channel report-word to handler
* @crw1: second channel-report word (always NULL)
* @overflow: crw overflow indication
*
* Handle channel-report-words indicating that the status of a channel-path
* has changed.
*/
static void chp_process_crw(struct crw *crw0, struct crw *crw1,
int overflow)
{
struct chp_id chpid;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
/*
* Check for solicited machine checks. These are
* created by reset channel path and need not be
* handled here.
*/
if (crw0->slct) {
CIO_CRW_EVENT(2, "solicited machine check for "
"channel path %02X\n", crw0->rsid);
return;
}
chp_id_init(&chpid);
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
chsc_chp_offline(chpid);
break;
default:
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
crw0->erc);
}
}
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (!(ssd->path_mask & mask))
continue;
if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
continue;
if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & link->fla_mask) != link->fla))
continue;
return mask;
}
return 0;
}
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
static inline int info_bit_num(struct chp_id id)
{
return id.id + id.cssid * (__MAX_CHPID + 1);
}
/* Force chp_info refresh on next call to info_validate(). */
static void info_expire(void)
{
mutex_lock(&info_lock);
chp_info_expires = jiffies - 1;
mutex_unlock(&info_lock);
}
/* Ensure that chp_info is up-to-date. */
static int info_update(void)
{
int rc;
mutex_lock(&info_lock);
rc = 0;
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
}
mutex_unlock(&info_lock);
return rc;
}
/**
* chp_info_get_status - retrieve configure status of a channel-path
* @chpid: channel-path ID
*
* On success, return 0 for standby, 1 for configured, 2 for reserved,
* 3 for not recognized. Return negative error code on error.
*/
int chp_info_get_status(struct chp_id chpid)
{
int rc;
int bit;
rc = info_update();
if (rc)
return rc;
bit = info_bit_num(chpid);
mutex_lock(&info_lock);
if (!chp_test_bit(chp_info.recognized, bit))
rc = CHP_STATUS_NOT_RECOGNIZED;
else if (chp_test_bit(chp_info.configured, bit))
rc = CHP_STATUS_CONFIGURED;
else if (chp_test_bit(chp_info.standby, bit))
rc = CHP_STATUS_STANDBY;
else
rc = CHP_STATUS_RESERVED;
mutex_unlock(&info_lock);
return rc;
}
/* Return configure task for chpid. */
static enum cfg_task_t cfg_get_task(struct chp_id chpid)
{
return chp_cfg_task[chpid.cssid][chpid.id];
}
/* Set configure task for chpid. */
static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
{
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */
static void cfg_func(struct work_struct *work)
{
struct chp_id chpid;
enum cfg_task_t t;
int rc;
mutex_lock(&cfg_lock);
t = cfg_none;
chp_id_for_each(&chpid) {
t = cfg_get_task(chpid);
if (t != cfg_none) {
cfg_set_task(chpid, cfg_none);
break;
}
}
mutex_unlock(&cfg_lock);
switch (t) {
case cfg_configure:
rc = sclp_chp_configure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_online(chpid);
}
break;
case cfg_deconfigure:
rc = sclp_chp_deconfigure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_offline(chpid);
}
break;
case cfg_none:
/* Get updated information after last change. */
info_update();
mutex_lock(&cfg_lock);
cfg_busy = 0;
mutex_unlock(&cfg_lock);
wake_up_interruptible(&cfg_wait_queue);
return;
}
queue_work(chp_wq, &cfg_work);
}
/**
* chp_cfg_schedule - schedule chpid configuration request
* @chpid - channel-path ID
* @configure - Non-zero for configure, zero for deconfigure
*
* Schedule a channel-path configuration/deconfiguration request.
*/
void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
mutex_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
cfg_busy = 1;
mutex_unlock(&cfg_lock);
queue_work(chp_wq, &cfg_work);
}
/**
* chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
* @chpid - channel-path ID
*
* Cancel an active channel-path deconfiguration request if it has not yet
* been performed.
*/
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
mutex_lock(&cfg_lock);
if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
mutex_unlock(&cfg_lock);
}
static int cfg_wait_idle(void)
{
if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
return -ERESTARTSYS;
return 0;
}
static int __init chp_init(void)
{
struct chp_id chpid;
int ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
chp_wq = create_singlethread_workqueue("cio_chp");
if (!chp_wq) {
crw_unregister_handler(CRW_RSC_CPATH);
return -ENOMEM;
}
INIT_WORK(&cfg_work, cfg_func);
init_waitqueue_head(&cfg_wait_queue);
if (info_update())
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
chp_new(chpid);
}
return 0;
}
subsys_initcall(chp_init);

72
drivers/s390/cio/chp.h Normal file
View file

@ -0,0 +1,72 @@
/*
* Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef S390_CHP_H
#define S390_CHP_H S390_CHP_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
#define CHP_STATUS_STANDBY 0
#define CHP_STATUS_CONFIGURED 1
#define CHP_STATUS_RESERVED 2
#define CHP_STATUS_NOT_RECOGNIZED 3
#define CHP_ONLINE 0
#define CHP_OFFLINE 1
#define CHP_VARY_ON 2
#define CHP_VARY_OFF 3
struct chp_link {
struct chp_id chpid;
u32 fla_mask;
u16 fla;
};
static inline int chp_test_bit(u8 *bitmap, int num)
{
int byte = num >> 3;
int mask = 128 >> (num & 7);
return (bitmap[byte] & mask) ? 1 : 0;
}
struct channel_path {
struct device dev;
struct chp_id chpid;
struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
void *cmg_chars;
};
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
return channel_subsystems[chpid.cssid]->chps[chpid.id];
}
int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
int chp_info_get_status(struct chp_id chpid);
int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
#endif /* S390_CHP_H */

1250
drivers/s390/cio/chsc.c Normal file

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more